mcpm-exynos.c 11.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
 *		http://www.samsung.com
 *
 * arch/arm/mach-exynos/mcpm-exynos.c
 *
 * Based on arch/arm/mach-vexpress/dcscb.c
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/arm-cci.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of_address.h>
18
#include <linux/syscore_ops.h>
19 20 21 22 23 24 25 26 27 28 29

#include <asm/cputype.h>
#include <asm/cp15.h>
#include <asm/mcpm.h>

#include "regs-pmu.h"
#include "common.h"

#define EXYNOS5420_CPUS_PER_CLUSTER	4
#define EXYNOS5420_NR_CLUSTERS		2

30 31 32 33
#define EXYNOS5420_ENABLE_AUTOMATIC_CORE_DOWN	BIT(9)
#define EXYNOS5420_USE_ARM_CORE_DOWN_STATE	BIT(29)
#define EXYNOS5420_USE_L2_COMMON_UP_STATE	BIT(30)

34 35
static void __iomem *ns_sram_base_addr;

36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
/*
 * The common v7_exit_coherency_flush API could not be used because of the
 * Erratum 799270 workaround. This macro is the same as the common one (in
 * arch/arm/include/asm/cacheflush.h) except for the erratum handling.
 */
#define exynos_v7_exit_coherency_flush(level) \
	asm volatile( \
	"stmfd	sp!, {fp, ip}\n\t"\
	"mrc	p15, 0, r0, c1, c0, 0	@ get SCTLR\n\t" \
	"bic	r0, r0, #"__stringify(CR_C)"\n\t" \
	"mcr	p15, 0, r0, c1, c0, 0	@ set SCTLR\n\t" \
	"isb\n\t"\
	"bl	v7_flush_dcache_"__stringify(level)"\n\t" \
	"mrc	p15, 0, r0, c1, c0, 1	@ get ACTLR\n\t" \
	"bic	r0, r0, #(1 << 6)	@ disable local coherency\n\t" \
	/* Dummy Load of a device register to avoid Erratum 799270 */ \
	"ldr	r4, [%0]\n\t" \
	"and	r4, r4, #0\n\t" \
	"orr	r0, r0, r4\n\t" \
	"mcr	p15, 0, r0, c1, c0, 1	@ set ACTLR\n\t" \
	"isb\n\t" \
	"dsb\n\t" \
	"ldmfd	sp!, {fp, ip}" \
	: \
60
	: "Ir" (pmu_base_addr + S5P_INFORM0) \
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
	: "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
	  "r9", "r10", "lr", "memory")

/*
 * We can't use regular spinlocks. In the switcher case, it is possible
 * for an outbound CPU to call power_down() after its inbound counterpart
 * is already live using the same logical CPU number which trips lockdep
 * debugging.
 */
static arch_spinlock_t exynos_mcpm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static int
cpu_use_count[EXYNOS5420_CPUS_PER_CLUSTER][EXYNOS5420_NR_CLUSTERS];

#define exynos_cluster_usecnt(cluster) \
	(cpu_use_count[0][cluster] +   \
	 cpu_use_count[1][cluster] +   \
	 cpu_use_count[2][cluster] +   \
	 cpu_use_count[3][cluster])

#define exynos_cluster_unused(cluster) !exynos_cluster_usecnt(cluster)

static int exynos_power_up(unsigned int cpu, unsigned int cluster)
{
	unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);

	pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
	if (cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
		cluster >= EXYNOS5420_NR_CLUSTERS)
		return -EINVAL;

	/*
	 * Since this is called with IRQs enabled, and no arch_spin_lock_irq
	 * variant exists, we need to disable IRQs manually here.
	 */
	local_irq_disable();
	arch_spin_lock(&exynos_mcpm_lock);

	cpu_use_count[cpu][cluster]++;
	if (cpu_use_count[cpu][cluster] == 1) {
		bool was_cluster_down =
			(exynos_cluster_usecnt(cluster) == 1);

		/*
		 * Turn on the cluster (L2/COMMON) and then power on the
		 * cores.
		 */
		if (was_cluster_down)
108
			exynos_cluster_power_up(cluster);
109

110
		exynos_cpu_power_up(cpunr);
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
	} else if (cpu_use_count[cpu][cluster] != 2) {
		/*
		 * The only possible values are:
		 * 0 = CPU down
		 * 1 = CPU (still) up
		 * 2 = CPU requested to be up before it had a chance
		 *     to actually make itself down.
		 * Any other value is a bug.
		 */
		BUG();
	}

	arch_spin_unlock(&exynos_mcpm_lock);
	local_irq_enable();

126
	return 0;
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
}

/*
 * NOTE: This function requires the stack data to be visible through power down
 * and can only be executed on processors like A15 and A7 that hit the cache
 * with the C bit clear in the SCTLR register.
 */
static void exynos_power_down(void)
{
	unsigned int mpidr, cpu, cluster;
	bool last_man = false, skip_wfi = false;
	unsigned int cpunr;

	mpidr = read_cpuid_mpidr();
	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	cpunr =  cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);

	pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
	BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
			cluster >= EXYNOS5420_NR_CLUSTERS);

	__mcpm_cpu_going_down(cpu, cluster);

	arch_spin_lock(&exynos_mcpm_lock);
	BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
	cpu_use_count[cpu][cluster]--;
	if (cpu_use_count[cpu][cluster] == 0) {
		exynos_cpu_power_down(cpunr);

157 158
		if (exynos_cluster_unused(cluster)) {
			exynos_cluster_power_down(cluster);
159
			last_man = true;
160
		}
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
	} else if (cpu_use_count[cpu][cluster] == 1) {
		/*
		 * A power_up request went ahead of us.
		 * Even if we do not want to shut this CPU down,
		 * the caller expects a certain state as if the WFI
		 * was aborted.  So let's continue with cache cleaning.
		 */
		skip_wfi = true;
	} else {
		BUG();
	}

	if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
		arch_spin_unlock(&exynos_mcpm_lock);

176
		if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
			/*
			 * On the Cortex-A15 we need to disable
			 * L2 prefetching before flushing the cache.
			 */
			asm volatile(
			"mcr	p15, 1, %0, c15, c0, 3\n\t"
			"isb\n\t"
			"dsb"
			: : "r" (0x400));
		}

		/* Flush all cache levels for this cluster. */
		exynos_v7_exit_coherency_flush(all);

		/*
		 * Disable cluster-level coherency by masking
		 * incoming snoops and DVM messages:
		 */
		cci_disable_port_by_cpu(mpidr);

		__mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
	} else {
		arch_spin_unlock(&exynos_mcpm_lock);

		/* Disable and flush the local CPU cache. */
		exynos_v7_exit_coherency_flush(louis);
	}

	__mcpm_cpu_down(cpu, cluster);

	/* Now we are prepared for power-down, do it: */
	if (!skip_wfi)
		wfi();

	/* Not dead at this point?  Let our caller cope. */
}

214
static int exynos_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
{
	unsigned int tries = 100;
	unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);

	pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
	BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
			cluster >= EXYNOS5420_NR_CLUSTERS);

	/* Wait for the core state to be OFF */
	while (tries--) {
		if (ACCESS_ONCE(cpu_use_count[cpu][cluster]) == 0) {
			if ((exynos_cpu_power_state(cpunr) == 0))
				return 0; /* success: the CPU is halted */
		}

		/* Otherwise, wait and retry: */
		msleep(1);
	}

	return -ETIMEDOUT; /* timeout */
}

237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
static void exynos_powered_up(void)
{
	unsigned int mpidr, cpu, cluster;

	mpidr = read_cpuid_mpidr();
	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);

	arch_spin_lock(&exynos_mcpm_lock);
	if (cpu_use_count[cpu][cluster] == 0)
		cpu_use_count[cpu][cluster] = 1;
	arch_spin_unlock(&exynos_mcpm_lock);
}

static void exynos_suspend(u64 residency)
{
	unsigned int mpidr, cpunr;

	exynos_power_down();

	/*
	 * Execution reaches here only if cpu did not power down.
	 * Hence roll back the changes done in exynos_power_down function.
	 *
	 * CAUTION: "This function requires the stack data to be visible through
	 * power down and can only be executed on processors like A15 and A7
	 * that hit the cache with the C bit clear in the SCTLR register."
	*/
	mpidr = read_cpuid_mpidr();
	cpunr = exynos_pmu_cpunr(mpidr);

	exynos_cpu_power_up(cpunr);
}

271 272 273
static const struct mcpm_platform_ops exynos_power_ops = {
	.power_up		= exynos_power_up,
	.power_down		= exynos_power_down,
274
	.wait_for_powerdown	= exynos_wait_for_powerdown,
275 276
	.suspend		= exynos_suspend,
	.powered_up		= exynos_powered_up,
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
};

static void __init exynos_mcpm_usage_count_init(void)
{
	unsigned int mpidr, cpu, cluster;

	mpidr = read_cpuid_mpidr();
	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);

	pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
	BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER  ||
			cluster >= EXYNOS5420_NR_CLUSTERS);

	cpu_use_count[cpu][cluster] = 1;
}

/*
 * Enable cluster-level coherency, in preparation for turning on the MMU.
 */
static void __naked exynos_pm_power_up_setup(unsigned int affinity_level)
{
	asm volatile ("\n"
	"cmp	r0, #1\n"
	"bxne	lr\n"
	"b	cci_enable_port_for_self");
}

305 306
static void __init exynos_cache_off(void)
{
307
	if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
308 309 310 311 312 313 314 315 316 317
		/* disable L2 prefetching on the Cortex-A15 */
		asm volatile(
		"mcr	p15, 1, %0, c15, c0, 3\n\t"
		"isb\n\t"
		"dsb"
		: : "r" (0x400));
	}
	exynos_v7_exit_coherency_flush(all);
}

318 319 320 321 322 323
static const struct of_device_id exynos_dt_mcpm_match[] = {
	{ .compatible = "samsung,exynos5420" },
	{ .compatible = "samsung,exynos5800" },
	{},
};

324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
static void exynos_mcpm_setup_entry_point(void)
{
	/*
	 * U-Boot SPL is hardcoded to jump to the start of ns_sram_base_addr
	 * as part of secondary_cpu_start().  Let's redirect it to the
	 * mcpm_entry_point(). This is done during both secondary boot-up as
	 * well as system resume.
	 */
	__raw_writel(0xe59f0000, ns_sram_base_addr);     /* ldr r0, [pc, #0] */
	__raw_writel(0xe12fff10, ns_sram_base_addr + 4); /* bx  r0 */
	__raw_writel(virt_to_phys(mcpm_entry_point), ns_sram_base_addr + 8);
}

static struct syscore_ops exynos_mcpm_syscore_ops = {
	.resume	= exynos_mcpm_setup_entry_point,
};

341 342 343
static int __init exynos_mcpm_init(void)
{
	struct device_node *node;
344
	unsigned int value, i;
345 346
	int ret;

347
	node = of_find_matching_node(NULL, exynos_dt_mcpm_match);
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
	if (!node)
		return -ENODEV;
	of_node_put(node);

	if (!cci_probed())
		return -ENODEV;

	node = of_find_compatible_node(NULL, NULL,
			"samsung,exynos4210-sysram-ns");
	if (!node)
		return -ENODEV;

	ns_sram_base_addr = of_iomap(node, 0);
	of_node_put(node);
	if (!ns_sram_base_addr) {
		pr_err("failed to map non-secure iRAM base address\n");
		return -ENOMEM;
	}

	/*
	 * To increase the stability of KFC reset we need to program
	 * the PMU SPARE3 register
	 */
371
	pmu_raw_writel(EXYNOS5420_SWRESET_KFC_SEL, S5P_PMU_SPARE3);
372 373 374 375 376 377

	exynos_mcpm_usage_count_init();

	ret = mcpm_platform_register(&exynos_power_ops);
	if (!ret)
		ret = mcpm_sync_init(exynos_pm_power_up_setup);
378 379
	if (!ret)
		ret = mcpm_loopback(exynos_cache_off); /* turn on the CCI */
380 381 382 383 384 385 386 387 388
	if (ret) {
		iounmap(ns_sram_base_addr);
		return ret;
	}

	mcpm_smp_set_ops();

	pr_info("Exynos MCPM support installed\n");

389 390 391 392 393 394 395 396 397 398 399 400 401
	/*
	 * On Exynos5420/5800 for the A15 and A7 clusters:
	 *
	 * EXYNOS5420_ENABLE_AUTOMATIC_CORE_DOWN ensures that all the cores
	 * in a cluster are turned off before turning off the cluster L2.
	 *
	 * EXYNOS5420_USE_ARM_CORE_DOWN_STATE ensures that a cores is powered
	 * off before waking it up.
	 *
	 * EXYNOS5420_USE_L2_COMMON_UP_STATE ensures that cluster L2 will be
	 * turned on before the first man is powered up.
	 */
	for (i = 0; i < EXYNOS5420_NR_CLUSTERS; i++) {
402
		value = pmu_raw_readl(EXYNOS_COMMON_OPTION(i));
403 404 405
		value |= EXYNOS5420_ENABLE_AUTOMATIC_CORE_DOWN |
			 EXYNOS5420_USE_ARM_CORE_DOWN_STATE    |
			 EXYNOS5420_USE_L2_COMMON_UP_STATE;
406
		pmu_raw_writel(value, EXYNOS_COMMON_OPTION(i));
407 408
	}

409
	exynos_mcpm_setup_entry_point();
410

411
	register_syscore_ops(&exynos_mcpm_syscore_ops);
412 413 414 415 416

	return ret;
}

early_initcall(exynos_mcpm_init);