platsmp.c 11.3 KB
Newer Older
1
 /*
2 3
 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
 *		http://www.samsung.com
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
 *
 * Cloned from linux/arch/arm/mach-vexpress/platsmp.c
 *
 *  Copyright (C) 2002 ARM Ltd.
 *  All Rights Reserved
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
*/

#include <linux/init.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/jiffies.h>
#include <linux/smp.h>
#include <linux/io.h>
22
#include <linux/of_address.h>
23 24

#include <asm/cacheflush.h>
25
#include <asm/cp15.h>
26
#include <asm/smp_plat.h>
27
#include <asm/smp_scu.h>
28
#include <asm/firmware.h>
29

30 31
#include <mach/map.h>

32
#include "common.h"
33
#include "regs-pmu.h"
34

35
extern void exynos4_secondary_startup(void);
36

37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
/*
 * Set or clear the USE_DELAYED_RESET_ASSERTION option, set on Exynos4 SoCs
 * during hot-(un)plugging CPUx.
 *
 * The feature can be cleared safely during first boot of secondary CPU.
 *
 * Exynos4 SoCs require setting USE_DELAYED_RESET_ASSERTION during powering
 * down a CPU so the CPU idle clock down feature could properly detect global
 * idle state when CPUx is off.
 */
static void exynos_set_delayed_reset_assertion(u32 core_id, bool enable)
{
	if (soc_is_exynos4()) {
		unsigned int tmp;

		tmp = pmu_raw_readl(EXYNOS_ARM_CORE_OPTION(core_id));
		if (enable)
			tmp |= S5P_USE_DELAYED_RESET_ASSERTION;
		else
			tmp &= ~(S5P_USE_DELAYED_RESET_ASSERTION);
		pmu_raw_writel(tmp, EXYNOS_ARM_CORE_OPTION(core_id));
	}
}

61
#ifdef CONFIG_HOTPLUG_CPU
62
static inline void cpu_leave_lowpower(u32 core_id)
63 64 65 66 67 68 69 70 71 72 73 74 75
{
	unsigned int v;

	asm volatile(
	"mrc	p15, 0, %0, c1, c0, 0\n"
	"	orr	%0, %0, %1\n"
	"	mcr	p15, 0, %0, c1, c0, 0\n"
	"	mrc	p15, 0, %0, c1, c0, 1\n"
	"	orr	%0, %0, %2\n"
	"	mcr	p15, 0, %0, c1, c0, 1\n"
	  : "=&r" (v)
	  : "Ir" (CR_C), "Ir" (0x40)
	  : "cc");
76 77

	 exynos_set_delayed_reset_assertion(core_id, false);
78 79 80 81 82 83 84 85 86 87 88 89
}

static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
{
	u32 mpidr = cpu_logical_map(cpu);
	u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);

	for (;;) {

		/* Turn the CPU off on next WFI instruction. */
		exynos_cpu_power_down(core_id);

90 91 92 93 94 95 96 97
		/*
		 * Exynos4 SoCs require setting
		 * USE_DELAYED_RESET_ASSERTION so the CPU idle
		 * clock down feature could properly detect
		 * global idle state when CPUx is off.
		 */
		exynos_set_delayed_reset_assertion(core_id, true);

98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
		wfi();

		if (pen_release == core_id) {
			/*
			 * OK, proper wakeup, we're done
			 */
			break;
		}

		/*
		 * Getting here, means that we have come out of WFI without
		 * having been woken up - this shouldn't happen
		 *
		 * Just note it happening - when we're woken, we can report
		 * its occurrence.
		 */
		(*spurious)++;
	}
}
#endif /* CONFIG_HOTPLUG_CPU */

119 120 121 122 123 124 125 126 127 128
/**
 * exynos_core_power_down : power down the specified cpu
 * @cpu : the cpu to power down
 *
 * Power down the specified cpu. The sequence must be finished by a
 * call to cpu_do_idle()
 *
 */
void exynos_cpu_power_down(int cpu)
{
129 130
	u32 core_conf;

131 132 133 134 135 136 137 138 139 140 141 142
	if (cpu == 0 && (of_machine_is_compatible("samsung,exynos5420") ||
		of_machine_is_compatible("samsung,exynos5800"))) {
		/*
		 * Bypass power down for CPU0 during suspend. Check for
		 * the SYS_PWR_REG value to decide if we are suspending
		 * the system.
		 */
		int val = pmu_raw_readl(EXYNOS5_ARM_CORE0_SYS_PWR_REG);

		if (!(val & S5P_CORE_LOCAL_PWR_EN))
			return;
	}
143 144 145 146

	core_conf = pmu_raw_readl(EXYNOS_ARM_CORE_CONFIGURATION(cpu));
	core_conf &= ~S5P_CORE_LOCAL_PWR_EN;
	pmu_raw_writel(core_conf, EXYNOS_ARM_CORE_CONFIGURATION(cpu));
147 148 149 150 151 152 153 154 155 156
}

/**
 * exynos_cpu_power_up : power up the specified cpu
 * @cpu : the cpu to power up
 *
 * Power up the specified cpu
 */
void exynos_cpu_power_up(int cpu)
{
157 158 159 160 161 162
	u32 core_conf = S5P_CORE_LOCAL_PWR_EN;

	if (soc_is_exynos3250())
		core_conf |= S5P_CORE_AUTOWAKEUP_EN;

	pmu_raw_writel(core_conf,
163
			EXYNOS_ARM_CORE_CONFIGURATION(cpu));
164 165 166 167 168 169 170 171 172
}

/**
 * exynos_cpu_power_state : returns the power state of the cpu
 * @cpu : the cpu to retrieve the power state from
 *
 */
int exynos_cpu_power_state(int cpu)
{
173
	return (pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(cpu)) &
174 175 176 177 178 179 180 181 182
			S5P_CORE_LOCAL_PWR_EN);
}

/**
 * exynos_cluster_power_down : power down the specified cluster
 * @cluster : the cluster to power down
 */
void exynos_cluster_power_down(int cluster)
{
183
	pmu_raw_writel(0, EXYNOS_COMMON_CONFIGURATION(cluster));
184 185 186 187 188 189 190 191
}

/**
 * exynos_cluster_power_up : power up the specified cluster
 * @cluster : the cluster to power up
 */
void exynos_cluster_power_up(int cluster)
{
192 193
	pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN,
			EXYNOS_COMMON_CONFIGURATION(cluster));
194 195 196 197 198 199 200 201 202
}

/**
 * exynos_cluster_power_state : returns the power state of the cluster
 * @cluster : the cluster to retrieve the power state from
 *
 */
int exynos_cluster_power_state(int cluster)
{
203 204
	return (pmu_raw_readl(EXYNOS_COMMON_STATUS(cluster)) &
		S5P_CORE_LOCAL_PWR_EN);
205 206
}

207
void __iomem *cpu_boot_reg_base(void)
208 209
{
	if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_1_1)
210
		return pmu_base_addr + S5P_INFORM5;
211
	return sysram_base_addr;
212 213 214 215 216 217 218
}

static inline void __iomem *cpu_boot_reg(int cpu)
{
	void __iomem *boot_reg;

	boot_reg = cpu_boot_reg_base();
219 220
	if (!boot_reg)
		return ERR_PTR(-ENODEV);
221 222
	if (soc_is_exynos4412())
		boot_reg += 4*cpu;
A
Arun Kumar K 已提交
223
	else if (soc_is_exynos5420() || soc_is_exynos5800())
224
		boot_reg += 4;
225 226
	return boot_reg;
}
227

228 229 230 231 232 233 234 235 236 237 238 239
/*
 * Set wake up by local power mode and execute software reset for given core.
 *
 * Currently this is needed only when booting secondary CPU on Exynos3250.
 */
static void exynos_core_restart(u32 core_id)
{
	u32 val;

	if (!of_machine_is_compatible("samsung,exynos3250"))
		return;

240 241 242 243
	while (!pmu_raw_readl(S5P_PMU_SPARE2))
		udelay(10);
	udelay(10);

244 245 246 247 248 249 250 251
	val = pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(core_id));
	val |= S5P_CORE_WAKEUP_FROM_LOCAL_CFG;
	pmu_raw_writel(val, EXYNOS_ARM_CORE_STATUS(core_id));

	pr_info("CPU%u: Software reset\n", core_id);
	pmu_raw_writel(EXYNOS_CORE_PO_RESET(core_id), EXYNOS_SWRESET);
}

252 253 254 255 256 257 258 259 260
/*
 * Write pen_release in a way that is guaranteed to be visible to all
 * observers, irrespective of whether they're taking part in coherency
 * or not.  This is necessary for the hotplug code to work reliably.
 */
static void write_pen_release(int val)
{
	pen_release = val;
	smp_wmb();
261
	sync_cache_w(&pen_release);
262 263
}

264 265 266 267 268 269 270
static void __iomem *scu_base_addr(void)
{
	return (void __iomem *)(S5P_VA_SCU);
}

static DEFINE_SPINLOCK(boot_lock);

271
static void exynos_secondary_init(unsigned int cpu)
272 273 274 275 276
{
	/*
	 * let the primary processor know we're out of the
	 * pen, then head off into the C entry point
	 */
277
	write_pen_release(-1);
278 279 280 281 282 283 284 285

	/*
	 * Synchronise with the boot thread.
	 */
	spin_lock(&boot_lock);
	spin_unlock(&boot_lock);
}

286
static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
287 288
{
	unsigned long timeout;
289 290
	u32 mpidr = cpu_logical_map(cpu);
	u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
291
	int ret = -ENOSYS;
292 293 294 295 296 297 298 299 300 301 302 303

	/*
	 * Set synchronisation state between this boot processor
	 * and the secondary one
	 */
	spin_lock(&boot_lock);

	/*
	 * The secondary processor is waiting to be released from
	 * the holding pen - release it, then wait for it to flag
	 * that it has been released by resetting pen_release.
	 *
304
	 * Note that "pen_release" is the hardware CPU core ID, whereas
305 306
	 * "cpu" is Linux's internal ID.
	 */
307
	write_pen_release(core_id);
308

309 310
	if (!exynos_cpu_power_state(core_id)) {
		exynos_cpu_power_up(core_id);
311 312 313
		timeout = 10;

		/* wait max 10 ms until cpu1 is on */
314 315
		while (exynos_cpu_power_state(core_id)
		       != S5P_CORE_LOCAL_PWR_EN) {
316 317 318 319 320 321 322 323 324 325 326 327
			if (timeout-- == 0)
				break;

			mdelay(1);
		}

		if (timeout == 0) {
			printk(KERN_ERR "cpu1 power enable failed");
			spin_unlock(&boot_lock);
			return -ETIMEDOUT;
		}
	}
328 329 330

	exynos_core_restart(core_id);

331 332 333 334 335 336 337 338
	/*
	 * Send the secondary CPU a soft interrupt, thereby causing
	 * the boot monitor to read the system wide flags register,
	 * and branch to the address found there.
	 */

	timeout = jiffies + (1 * HZ);
	while (time_before(jiffies, timeout)) {
339 340
		unsigned long boot_addr;

341
		smp_rmb();
342

343 344 345 346 347 348
		boot_addr = virt_to_phys(exynos4_secondary_startup);

		/*
		 * Try to set boot address using firmware first
		 * and fall back to boot register if it fails.
		 */
349
		ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
350 351 352
		if (ret && ret != -ENOSYS)
			goto fail;
		if (ret == -ENOSYS) {
353
			void __iomem *boot_reg = cpu_boot_reg(core_id);
354 355 356 357 358

			if (IS_ERR(boot_reg)) {
				ret = PTR_ERR(boot_reg);
				goto fail;
			}
359
			__raw_writel(boot_addr, boot_reg);
360
		}
361

362
		call_firmware_op(cpu_boot, core_id);
363

364 365 366 367
		if (soc_is_exynos3250())
			dsb_sev();
		else
			arch_send_wakeup_ipi_mask(cpumask_of(cpu));
368

369 370 371 372 373 374
		if (pen_release == -1)
			break;

		udelay(10);
	}

375 376 377
	/* No harm if this is called during first boot of secondary CPU */
	exynos_set_delayed_reset_assertion(core_id, false);

378 379 380 381
	/*
	 * now the secondary core is starting up let it run its
	 * calibrations, then wait for it to finish
	 */
382
fail:
383 384
	spin_unlock(&boot_lock);

385
	return pen_release != -1 ? ret : 0;
386 387 388 389 390 391 392
}

/*
 * Initialise the CPU possible map early - this describes the CPUs
 * which may be present or become present in the system.
 */

393
static void __init exynos_smp_init_cpus(void)
394 395 396 397
{
	void __iomem *scu_base = scu_base_addr();
	unsigned int i, ncores;

398
	if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
399
		ncores = scu_base ? scu_get_core_count(scu_base) : 1;
400 401 402 403 404 405
	else
		/*
		 * CPU Nodes are passed thru DT and set_cpu_possible
		 * is set by "arm_dt_init_cpu_maps".
		 */
		return;
406 407

	/* sanity check */
408 409 410 411
	if (ncores > nr_cpu_ids) {
		pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
			ncores, nr_cpu_ids);
		ncores = nr_cpu_ids;
412 413 414 415 416 417
	}

	for (i = 0; i < ncores; i++)
		set_cpu_possible(i, true);
}

418
static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
419
{
420 421
	int i;

422 423
	exynos_sysram_init();

424
	if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
425
		scu_enable(scu_base_addr());
426

427
	/*
428 429 430 431
	 * Write the address of secondary startup into the
	 * system-wide flags register. The boot monitor waits
	 * until it receives a soft interrupt, and then the
	 * secondary CPU branches to this address.
432 433 434
	 *
	 * Try using firmware operation first and fall back to
	 * boot register if it fails.
435
	 */
436 437
	for (i = 1; i < max_cpus; ++i) {
		unsigned long boot_addr;
438 439
		u32 mpidr;
		u32 core_id;
440
		int ret;
441

442 443
		mpidr = cpu_logical_map(i);
		core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
444 445
		boot_addr = virt_to_phys(exynos4_secondary_startup);

446
		ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
447 448 449
		if (ret && ret != -ENOSYS)
			break;
		if (ret == -ENOSYS) {
450
			void __iomem *boot_reg = cpu_boot_reg(core_id);
451 452 453

			if (IS_ERR(boot_reg))
				break;
454
			__raw_writel(boot_addr, boot_reg);
455
		}
456
	}
457
}
458

459 460 461 462 463 464
#ifdef CONFIG_HOTPLUG_CPU
/*
 * platform-specific code to shutdown a CPU
 *
 * Called with IRQs disabled
 */
465
static void exynos_cpu_die(unsigned int cpu)
466 467
{
	int spurious = 0;
468 469
	u32 mpidr = cpu_logical_map(cpu);
	u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
470 471 472 473 474 475 476 477 478

	v7_exit_coherency_flush(louis);

	platform_do_lowpower(cpu, &spurious);

	/*
	 * bring this CPU back into the world of cache
	 * coherency, and then restore interrupts
	 */
479
	cpu_leave_lowpower(core_id);
480 481 482 483 484 485

	if (spurious)
		pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
}
#endif /* CONFIG_HOTPLUG_CPU */

486 487 488 489 490 491 492 493 494
struct smp_operations exynos_smp_ops __initdata = {
	.smp_init_cpus		= exynos_smp_init_cpus,
	.smp_prepare_cpus	= exynos_smp_prepare_cpus,
	.smp_secondary_init	= exynos_secondary_init,
	.smp_boot_secondary	= exynos_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU
	.cpu_die		= exynos_cpu_die,
#endif
};