smp-cps.c 14.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/*
 * Copyright (C) 2013 Imagination Technologies
 * Author: Paul Burton <paul.burton@imgtec.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the
 * Free Software Foundation;  either version 2 of the  License, or (at your
 * option) any later version.
 */

11
#include <linux/delay.h>
12
#include <linux/io.h>
13
#include <linux/irqchip/mips-gic.h>
14
#include <linux/sched/task_stack.h>
15
#include <linux/sched/hotplug.h>
16 17 18 19
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/types.h>

20
#include <asm/bcache.h>
21 22 23 24
#include <asm/mips-cm.h>
#include <asm/mips-cpc.h>
#include <asm/mips_mt.h>
#include <asm/mipsregs.h>
P
Paul Burton 已提交
25
#include <asm/pm-cps.h>
26
#include <asm/r4kcache.h>
27 28 29 30
#include <asm/smp-cps.h>
#include <asm/time.h>
#include <asm/uasm.h>

31
static bool threads_disabled;
32 33
static DECLARE_BITMAP(core_power, NR_CPUS);

34
struct core_boot_config *mips_cps_core_bootcfg;
35

36 37 38 39 40 41 42
static int __init setup_nothreads(char *s)
{
	threads_disabled = true;
	return 0;
}
early_param("nothreads", setup_nothreads);

43
static unsigned core_vpe_count(unsigned core)
44
{
45
	unsigned cfg;
46

47 48 49
	if (threads_disabled)
		return 1;

50 51
	if ((!IS_ENABLED(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
		&& (!IS_ENABLED(CONFIG_CPU_MIPSR6) || !cpu_has_vp))
52
		return 1;
53

54
	mips_cm_lock_other(core, 0);
55
	cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK;
56
	mips_cm_unlock_other();
57
	return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
58 59 60 61 62
}

static void __init cps_smp_setup(void)
{
	unsigned int ncores, nvpes, core_vpes;
63
	unsigned long core_entry;
64 65 66 67
	int c, v;

	/* Detect & record VPE topology */
	ncores = mips_cm_numcores();
68
	pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE");
69
	for (c = nvpes = 0; c < ncores; c++) {
70
		core_vpes = core_vpe_count(c);
71 72
		pr_cont("%c%u", c ? ',' : '{', core_vpes);

73 74 75 76
		/* Use the number of VPEs in core 0 for smp_num_siblings */
		if (!c)
			smp_num_siblings = core_vpes;

77 78
		for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
			cpu_data[nvpes + v].core = c;
79
#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
			cpu_data[nvpes + v].vpe_id = v;
#endif
		}

		nvpes += core_vpes;
	}
	pr_cont("} total %u\n", nvpes);

	/* Indicate present CPUs (CPU being synonymous with VPE) */
	for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
		set_cpu_possible(v, true);
		set_cpu_present(v, true);
		__cpu_number_map[v] = v;
		__cpu_logical_map[v] = v;
	}

96 97 98
	/* Set a coherent default CCA (CWB) */
	change_c0_config(CONF_CM_CMASK, 0x5);

99 100 101 102
	/* Core 0 is powered up (we're running on it) */
	bitmap_set(core_power, 0, 1);

	/* Initialise core 0 */
103
	mips_cps_core_init();
104 105 106

	/* Make core 0 coherent with everything */
	write_gcr_cl_coherence(0xff);
107

108 109 110 111 112
	if (mips_cm_revision() >= CM_REV_CM3) {
		core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
		write_gcr_bev_base(core_entry);
	}

113 114 115
#ifdef CONFIG_MIPS_MT_FPAFF
	/* If we have an FPU, enroll ourselves in the FPU-full mask */
	if (cpu_has_fpu)
116
		cpumask_set_cpu(0, &mt_fpu_cpumask);
117
#endif /* CONFIG_MIPS_MT_FPAFF */
118 119 120 121
}

static void __init cps_prepare_cpus(unsigned int max_cpus)
{
122 123
	unsigned ncores, core_vpes, c, cca;
	bool cca_unsuitable;
124
	u32 *entry_code;
125

126
	mips_mt_set_cpuoptions();
127

128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
	/* Detect whether the CCA is unsuited to multi-core SMP */
	cca = read_c0_config() & CONF_CM_CMASK;
	switch (cca) {
	case 0x4: /* CWBE */
	case 0x5: /* CWB */
		/* The CCA is coherent, multi-core is fine */
		cca_unsuitable = false;
		break;

	default:
		/* CCA is not coherent, multi-core is not usable */
		cca_unsuitable = true;
	}

	/* Warn the user if the CCA prevents multi-core */
	ncores = mips_cm_numcores();
	if (cca_unsuitable && ncores > 1) {
		pr_warn("Using only one core due to unsuitable CCA 0x%x\n",
			cca);

		for_each_present_cpu(c) {
			if (cpu_data[c].core)
				set_cpu_present(c, false);
		}
	}

154 155 156 157 158
	/*
	 * Patch the start of mips_cps_core_entry to provide:
	 *
	 * s0 = kseg0 CCA
	 */
159
	entry_code = (u32 *)&mips_cps_core_entry;
160
	uasm_i_addiu(&entry_code, 16, 0, cca);
161 162 163 164 165
	blast_dcache_range((unsigned long)&mips_cps_core_entry,
			   (unsigned long)entry_code);
	bc_wback_inv((unsigned long)&mips_cps_core_entry,
		     (void *)entry_code - (void *)&mips_cps_core_entry);
	__sync();
166

167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
	/* Allocate core boot configuration structs */
	mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg),
					GFP_KERNEL);
	if (!mips_cps_core_bootcfg) {
		pr_err("Failed to allocate boot config for %u cores\n", ncores);
		goto err_out;
	}

	/* Allocate VPE boot configuration structs */
	for (c = 0; c < ncores; c++) {
		core_vpes = core_vpe_count(c);
		mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes,
				sizeof(*mips_cps_core_bootcfg[c].vpe_config),
				GFP_KERNEL);
		if (!mips_cps_core_bootcfg[c].vpe_config) {
			pr_err("Failed to allocate %u VPE boot configs\n",
			       core_vpes);
			goto err_out;
		}
	}

	/* Mark this CPU as booted */
	atomic_set(&mips_cps_core_bootcfg[current_cpu_data.core].vpe_mask,
		   1 << cpu_vpe_id(&current_cpu_data));

	return;
err_out:
	/* Clean up allocations */
	if (mips_cps_core_bootcfg) {
		for (c = 0; c < ncores; c++)
			kfree(mips_cps_core_bootcfg[c].vpe_config);
		kfree(mips_cps_core_bootcfg);
		mips_cps_core_bootcfg = NULL;
	}

	/* Effectively disable SMP by declaring CPUs not present */
	for_each_possible_cpu(c) {
		if (c == 0)
			continue;
		set_cpu_present(c, false);
	}
208 209
}

210
static void boot_core(unsigned int core, unsigned int vpe_id)
211
{
212 213
	u32 access, stat, seq_state;
	unsigned timeout;
214 215

	/* Select the appropriate core */
216
	mips_cm_lock_other(core, 0);
217 218 219 220 221 222 223

	/* Set its reset vector */
	write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));

	/* Ensure its coherency is disabled */
	write_gcr_co_coherence(0);

224 225 226
	/* Start it with the legacy memory map and exception base */
	write_gcr_co_reset_ext_base(CM_GCR_RESET_EXT_BASE_UEB);

227 228
	/* Ensure the core can access the GCRs */
	access = read_gcr_access();
229
	access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + core);
230 231 232 233
	write_gcr_access(access);

	if (mips_cpc_present()) {
		/* Reset the core */
234
		mips_cpc_lock_other(core);
235 236

		if (mips_cm_revision() >= CM_REV_CM3) {
237 238 239
			/* Run only the requested VP following the reset */
			write_cpc_co_vp_stop(0xf);
			write_cpc_co_vp_run(1 << vpe_id);
240 241 242 243 244 245 246 247

			/*
			 * Ensure that the VP_RUN register is written before the
			 * core leaves reset.
			 */
			wmb();
		}

248
		write_cpc_co_cmd(CPC_Cx_CMD_RESET);
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270

		timeout = 100;
		while (true) {
			stat = read_cpc_co_stat_conf();
			seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE_MSK;

			/* U6 == coherent execution, ie. the core is up */
			if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U6)
				break;

			/* Delay a little while before we start warning */
			if (timeout) {
				timeout--;
				mdelay(10);
				continue;
			}

			pr_warn("Waiting for core %u to start... STAT_CONF=0x%x\n",
				core, stat);
			mdelay(1000);
		}

271
		mips_cpc_unlock_other();
272 273 274 275 276
	} else {
		/* Take the core out of reset */
		write_gcr_co_reset_release(0);
	}

277 278
	mips_cm_unlock_other();

279
	/* The core is now powered up */
280
	bitmap_set(core_power, core, 1);
281 282
}

283
static void remote_vpe_boot(void *dummy)
284
{
285 286 287 288
	unsigned core = current_cpu_data.core;
	struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];

	mips_cps_boot_vpes(core_cfg, cpu_vpe_id(&current_cpu_data));
289 290 291 292
}

static void cps_boot_secondary(int cpu, struct task_struct *idle)
{
293 294 295 296
	unsigned core = cpu_data[cpu].core;
	unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
	struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
	struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
297
	unsigned long core_entry;
298 299 300
	unsigned int remote;
	int err;

301 302 303
	vpe_cfg->pc = (unsigned long)&smp_bootstrap;
	vpe_cfg->sp = __KSTK_TOS(idle);
	vpe_cfg->gp = (unsigned long)task_thread_info(idle);
304

305 306
	atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask);

P
Paul Burton 已提交
307
	preempt_disable();
308

309
	if (!test_bit(core, core_power)) {
310
		/* Boot a VPE on a powered down core */
311
		boot_core(core, vpe_id);
P
Paul Burton 已提交
312
		goto out;
313 314
	}

315 316 317 318 319 320 321
	if (cpu_has_vp) {
		mips_cm_lock_other(core, vpe_id);
		core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
		write_gcr_co_reset_base(core_entry);
		mips_cm_unlock_other();
	}

322
	if (core != current_cpu_data.core) {
323 324
		/* Boot a VPE on another powered up core */
		for (remote = 0; remote < NR_CPUS; remote++) {
325
			if (cpu_data[remote].core != core)
326 327 328 329
				continue;
			if (cpu_online(remote))
				break;
		}
330 331 332 333 334
		if (remote >= NR_CPUS) {
			pr_crit("No online CPU in core %u to start CPU%d\n",
				core, cpu);
			goto out;
		}
335

336 337
		err = smp_call_function_single(remote, remote_vpe_boot,
					       NULL, 1);
338 339
		if (err)
			panic("Failed to call remote CPU\n");
P
Paul Burton 已提交
340
		goto out;
341 342
	}

343
	BUG_ON(!cpu_has_mipsmt && !cpu_has_vp);
344 345

	/* Boot a VPE on this core */
346
	mips_cps_boot_vpes(core_cfg, vpe_id);
P
Paul Burton 已提交
347 348
out:
	preempt_enable();
349 350 351 352 353 354 355 356
}

static void cps_init_secondary(void)
{
	/* Disable MT - we only want to run 1 TC per VPE */
	if (cpu_has_mipsmt)
		dmt();

357 358 359 360 361 362 363 364 365 366 367
	if (mips_cm_revision() >= CM_REV_CM3) {
		unsigned ident = gic_read_local_vp_id();

		/*
		 * Ensure that our calculation of the VP ID matches up with
		 * what the GIC reports, otherwise we'll have configured
		 * interrupts incorrectly.
		 */
		BUG_ON(ident != mips_cm_vp_id(smp_processor_id()));
	}

368 369 370 371 372 373
	if (cpu_has_veic)
		clear_c0_status(ST0_IM);
	else
		change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
					 STATUSF_IP4 | STATUSF_IP5 |
					 STATUSF_IP6 | STATUSF_IP7);
374 375 376 377 378 379 380 381 382
}

static void cps_smp_finish(void)
{
	write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));

#ifdef CONFIG_MIPS_MT_FPAFF
	/* If we have an FPU, enroll ourselves in the FPU-full mask */
	if (cpu_has_fpu)
383
		cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
384 385 386 387 388
#endif /* CONFIG_MIPS_MT_FPAFF */

	local_irq_enable();
}

P
Paul Burton 已提交
389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
#ifdef CONFIG_HOTPLUG_CPU

static int cps_cpu_disable(void)
{
	unsigned cpu = smp_processor_id();
	struct core_boot_config *core_cfg;

	if (!cpu)
		return -EBUSY;

	if (!cps_pm_support_state(CPS_PM_POWER_GATED))
		return -EINVAL;

	core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core];
	atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
404
	smp_mb__after_atomic();
P
Paul Burton 已提交
405
	set_cpu_online(cpu, false);
406
	calculate_cpu_foreign_map();
P
Paul Burton 已提交
407 408 409 410 411 412 413 414 415 416 417 418 419

	return 0;
}

static DECLARE_COMPLETION(cpu_death_chosen);
static unsigned cpu_death_sibling;
static enum {
	CPU_DEATH_HALT,
	CPU_DEATH_POWER,
} cpu_death;

void play_dead(void)
{
420
	unsigned int cpu, core, vpe_id;
P
Paul Burton 已提交
421 422 423 424 425 426

	local_irq_disable();
	idle_task_exit();
	cpu = smp_processor_id();
	cpu_death = CPU_DEATH_POWER;

427 428 429
	pr_debug("CPU%d going offline\n", cpu);

	if (cpu_has_mipsmt || cpu_has_vp) {
P
Paul Burton 已提交
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
		core = cpu_data[cpu].core;

		/* Look for another online VPE within the core */
		for_each_online_cpu(cpu_death_sibling) {
			if (cpu_data[cpu_death_sibling].core != core)
				continue;

			/*
			 * There is an online VPE within the core. Just halt
			 * this TC and leave the core alone.
			 */
			cpu_death = CPU_DEATH_HALT;
			break;
		}
	}

	/* This CPU has chosen its way out */
	complete(&cpu_death_chosen);

	if (cpu_death == CPU_DEATH_HALT) {
450 451 452 453 454 455 456 457 458 459 460 461 462
		vpe_id = cpu_vpe_id(&cpu_data[cpu]);

		pr_debug("Halting core %d VP%d\n", core, vpe_id);
		if (cpu_has_mipsmt) {
			/* Halt this TC */
			write_c0_tchalt(TCHALT_H);
			instruction_hazard();
		} else if (cpu_has_vp) {
			write_cpc_cl_vp_stop(1 << vpe_id);

			/* Ensure that the VP_STOP register is written */
			wmb();
		}
P
Paul Burton 已提交
463
	} else {
464
		pr_debug("Gating power to core %d\n", core);
P
Paul Burton 已提交
465 466 467 468 469 470 471 472 473 474
		/* Power down the core */
		cps_pm_enter_state(CPS_PM_POWER_GATED);
	}

	/* This should never be reached */
	panic("Failed to offline CPU %u", cpu);
}

static void wait_for_sibling_halt(void *ptr_cpu)
{
475
	unsigned cpu = (unsigned long)ptr_cpu;
476
	unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
P
Paul Burton 已提交
477 478 479 480 481 482 483 484 485 486 487 488 489 490
	unsigned halted;
	unsigned long flags;

	do {
		local_irq_save(flags);
		settc(vpe_id);
		halted = read_tc_c0_tchalt();
		local_irq_restore(flags);
	} while (!(halted & TCHALT_H));
}

static void cps_cpu_die(unsigned int cpu)
{
	unsigned core = cpu_data[cpu].core;
491
	unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
P
Paul Burton 已提交
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519
	unsigned stat;
	int err;

	/* Wait for the cpu to choose its way out */
	if (!wait_for_completion_timeout(&cpu_death_chosen,
					 msecs_to_jiffies(5000))) {
		pr_err("CPU%u: didn't offline\n", cpu);
		return;
	}

	/*
	 * Now wait for the CPU to actually offline. Without doing this that
	 * offlining may race with one or more of:
	 *
	 *   - Onlining the CPU again.
	 *   - Powering down the core if another VPE within it is offlined.
	 *   - A sibling VPE entering a non-coherent state.
	 *
	 * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing
	 * with which we could race, so do nothing.
	 */
	if (cpu_death == CPU_DEATH_POWER) {
		/*
		 * Wait for the core to enter a powered down or clock gated
		 * state, the latter happening when a JTAG probe is connected
		 * in which case the CPC will refuse to power down the core.
		 */
		do {
520
			mips_cm_lock_other(core, 0);
P
Paul Burton 已提交
521 522 523 524
			mips_cpc_lock_other(core);
			stat = read_cpc_co_stat_conf();
			stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK;
			mips_cpc_unlock_other();
525
			mips_cm_unlock_other();
P
Paul Burton 已提交
526 527 528 529 530 531 532 533 534 535 536 537 538
		} while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 &&
			 stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 &&
			 stat != CPC_Cx_STAT_CONF_SEQSTATE_U2);

		/* Indicate the core is powered off */
		bitmap_clear(core_power, core, 1);
	} else if (cpu_has_mipsmt) {
		/*
		 * Have a CPU with access to the offlined CPUs registers wait
		 * for its TC to halt.
		 */
		err = smp_call_function_single(cpu_death_sibling,
					       wait_for_sibling_halt,
539
					       (void *)(unsigned long)cpu, 1);
P
Paul Burton 已提交
540 541
		if (err)
			panic("Failed to call remote sibling CPU\n");
542 543 544 545 546 547
	} else if (cpu_has_vp) {
		do {
			mips_cm_lock_other(core, vpe_id);
			stat = read_cpc_co_vp_running();
			mips_cm_unlock_other();
		} while (stat & (1 << vpe_id));
P
Paul Burton 已提交
548 549 550 551 552
	}
}

#endif /* CONFIG_HOTPLUG_CPU */

553 554 555 556 557 558
static struct plat_smp_ops cps_smp_ops = {
	.smp_setup		= cps_smp_setup,
	.prepare_cpus		= cps_prepare_cpus,
	.boot_secondary		= cps_boot_secondary,
	.init_secondary		= cps_init_secondary,
	.smp_finish		= cps_smp_finish,
559 560
	.send_ipi_single	= mips_smp_send_ipi_single,
	.send_ipi_mask		= mips_smp_send_ipi_mask,
P
Paul Burton 已提交
561 562 563 564
#ifdef CONFIG_HOTPLUG_CPU
	.cpu_disable		= cps_cpu_disable,
	.cpu_die		= cps_cpu_die,
#endif
565 566
};

567 568 569 570 571 572
bool mips_cps_smp_in_use(void)
{
	extern struct plat_smp_ops *mp_ops;
	return mp_ops == &cps_smp_ops;
}

573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
int register_cps_smp_ops(void)
{
	if (!mips_cm_present()) {
		pr_warn("MIPS CPS SMP unable to proceed without a CM\n");
		return -ENODEV;
	}

	/* check we have a GIC - we need one for IPIs */
	if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX_MSK)) {
		pr_warn("MIPS CPS SMP unable to proceed without a GIC\n");
		return -ENODEV;
	}

	register_smp_ops(&cps_smp_ops);
	return 0;
}