cpuinfo.c 7.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * Record and handle CPU attributes.
 *
 * Copyright (C) 2014 ARM Ltd.
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */
#include <asm/arch_timer.h>
#include <asm/cachetype.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
21
#include <asm/cpufeature.h>
22

23
#include <linux/bitops.h>
24
#include <linux/bug.h>
25
#include <linux/init.h>
26
#include <linux/kernel.h>
27
#include <linux/preempt.h>
28
#include <linux/printk.h>
29 30 31 32 33 34 35 36 37
#include <linux/smp.h>

/*
 * In case the boot CPU is hotpluggable, we record its initial state and
 * current state separately. Certain system registers may contain different
 * values depending on configuration at or after reset.
 */
DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
static struct cpuinfo_arm64 boot_cpu_data;
38
static bool mixed_endian_el0 = true;
39

40 41 42 43 44 45 46 47 48 49 50 51 52 53
static char *icache_policy_str[] = {
	[ICACHE_POLICY_RESERVED] = "RESERVED/UNKNOWN",
	[ICACHE_POLICY_AIVIVT] = "AIVIVT",
	[ICACHE_POLICY_VIPT] = "VIPT",
	[ICACHE_POLICY_PIPT] = "PIPT",
};

unsigned long __icache_flags;

static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
{
	unsigned int cpu = smp_processor_id();
	u32 l1ip = CTR_L1IP(info->reg_ctr);

54 55 56 57 58 59 60 61 62 63 64 65
	if (l1ip != ICACHE_POLICY_PIPT) {
		/*
		 * VIPT caches are non-aliasing if the VA always equals the PA
		 * in all bit positions that are covered by the index. This is
		 * the case if the size of a way (# of sets * line size) does
		 * not exceed PAGE_SIZE.
		 */
		u32 waysize = icache_get_numsets() * icache_get_linesize();

		if (l1ip != ICACHE_POLICY_VIPT || waysize > PAGE_SIZE)
			set_bit(ICACHEF_ALIASING, &__icache_flags);
	}
66
	if (l1ip == ICACHE_POLICY_AIVIVT)
67 68
		set_bit(ICACHEF_AIVIVT, &__icache_flags);

69
	pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
70 71
}

72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
bool cpu_supports_mixed_endian_el0(void)
{
	return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
}

bool system_supports_mixed_endian_el0(void)
{
	return mixed_endian_el0;
}

static void update_mixed_endian_el0_support(struct cpuinfo_arm64 *info)
{
	mixed_endian_el0 &= id_aa64mmfr0_mixed_endian_el0(info->reg_id_aa64mmfr0);
}

static void update_cpu_features(struct cpuinfo_arm64 *info)
{
	update_mixed_endian_el0_support(info);
}

92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
static int check_reg_mask(char *name, u64 mask, u64 boot, u64 cur, int cpu)
{
	if ((boot & mask) == (cur & mask))
		return 0;

	pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016lx, CPU%d: %#016lx\n",
		name, (unsigned long)boot, cpu, (unsigned long)cur);

	return 1;
}

#define CHECK_MASK(field, mask, boot, cur, cpu) \
	check_reg_mask(#field, mask, (boot)->reg_ ## field, (cur)->reg_ ## field, cpu)

#define CHECK(field, boot, cur, cpu) \
	CHECK_MASK(field, ~0ULL, boot, cur, cpu)

/*
 * Verify that CPUs don't have unexpected differences that will cause problems.
 */
static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
{
	unsigned int cpu = smp_processor_id();
	struct cpuinfo_arm64 *boot = &boot_cpu_data;
	unsigned int diff = 0;

	/*
	 * The kernel can handle differing I-cache policies, but otherwise
	 * caches should look identical. Userspace JITs will make use of
	 * *minLine.
	 */
	diff |= CHECK_MASK(ctr, 0xffff3fff, boot, cur, cpu);

	/*
	 * Userspace may perform DC ZVA instructions. Mismatched block sizes
	 * could result in too much or too little memory being zeroed if a
	 * process is preempted and migrated between CPUs.
	 */
	diff |= CHECK(dczid, boot, cur, cpu);

	/* If different, timekeeping will be broken (especially with KVM) */
	diff |= CHECK(cntfrq, boot, cur, cpu);

135 136 137 138 139 140 141 142 143
	/*
	 * The kernel uses self-hosted debug features and expects CPUs to
	 * support identical debug features. We presently need CTX_CMPs, WRPs,
	 * and BRPs to be identical.
	 * ID_AA64DFR1 is currently RES0.
	 */
	diff |= CHECK(id_aa64dfr0, boot, cur, cpu);
	diff |= CHECK(id_aa64dfr1, boot, cur, cpu);

144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
	/*
	 * Even in big.LITTLE, processors should be identical instruction-set
	 * wise.
	 */
	diff |= CHECK(id_aa64isar0, boot, cur, cpu);
	diff |= CHECK(id_aa64isar1, boot, cur, cpu);

	/*
	 * Differing PARange support is fine as long as all peripherals and
	 * memory are mapped within the minimum PARange of all CPUs.
	 * Linux should not care about secure memory.
	 * ID_AA64MMFR1 is currently RES0.
	 */
	diff |= CHECK_MASK(id_aa64mmfr0, 0xffffffffffff0ff0, boot, cur, cpu);
	diff |= CHECK(id_aa64mmfr1, boot, cur, cpu);

	/*
	 * EL3 is not our concern.
	 * ID_AA64PFR1 is currently RES0.
	 */
	diff |= CHECK_MASK(id_aa64pfr0, 0xffffffffffff0fff, boot, cur, cpu);
	diff |= CHECK(id_aa64pfr1, boot, cur, cpu);

	/*
	 * If we have AArch32, we care about 32-bit features for compat. These
	 * registers should be RES0 otherwise.
	 */
171
	diff |= CHECK(id_dfr0, boot, cur, cpu);
172 173 174 175 176 177
	diff |= CHECK(id_isar0, boot, cur, cpu);
	diff |= CHECK(id_isar1, boot, cur, cpu);
	diff |= CHECK(id_isar2, boot, cur, cpu);
	diff |= CHECK(id_isar3, boot, cur, cpu);
	diff |= CHECK(id_isar4, boot, cur, cpu);
	diff |= CHECK(id_isar5, boot, cur, cpu);
178 179 180 181 182 183
	/*
	 * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
	 * ACTLR formats could differ across CPUs and therefore would have to
	 * be trapped for virtualization anyway.
	 */
	diff |= CHECK_MASK(id_mmfr0, 0xff0fffff, boot, cur, cpu);
184 185 186 187 188 189
	diff |= CHECK(id_mmfr1, boot, cur, cpu);
	diff |= CHECK(id_mmfr2, boot, cur, cpu);
	diff |= CHECK(id_mmfr3, boot, cur, cpu);
	diff |= CHECK(id_pfr0, boot, cur, cpu);
	diff |= CHECK(id_pfr1, boot, cur, cpu);

190 191 192 193
	diff |= CHECK(mvfr0, boot, cur, cpu);
	diff |= CHECK(mvfr1, boot, cur, cpu);
	diff |= CHECK(mvfr2, boot, cur, cpu);

194 195 196 197 198
	/*
	 * Mismatched CPU features are a recipe for disaster. Don't even
	 * pretend to support them.
	 */
	WARN_TAINT_ONCE(diff, TAINT_CPU_OUT_OF_SPEC,
199
			"Unsupported CPU feature variation.\n");
200 201
}

202 203 204 205 206 207 208
static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
{
	info->reg_cntfrq = arch_timer_get_cntfrq();
	info->reg_ctr = read_cpuid_cachetype();
	info->reg_dczid = read_cpuid(DCZID_EL0);
	info->reg_midr = read_cpuid_id();

209 210
	info->reg_id_aa64dfr0 = read_cpuid(ID_AA64DFR0_EL1);
	info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
211 212 213 214 215 216 217
	info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
	info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
	info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
	info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
	info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
	info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);

218
	info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
219 220 221 222 223 224 225 226 227 228 229 230
	info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
	info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
	info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
	info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
	info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
	info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
	info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
	info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
	info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
	info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
	info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
	info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
231

232 233 234 235
	info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
	info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
	info->reg_mvfr2 = read_cpuid(MVFR2_EL1);

236
	cpuinfo_detect_icache_policy(info);
237 238

	check_local_cpu_errata();
239
	update_cpu_features(info);
240 241 242 243 244 245
}

void cpuinfo_store_cpu(void)
{
	struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data);
	__cpuinfo_store_cpu(info);
246
	cpuinfo_sanity_check(info);
247 248 249 250 251 252 253 254 255
}

void __init cpuinfo_store_boot_cpu(void)
{
	struct cpuinfo_arm64 *info = &per_cpu(cpu_data, 0);
	__cpuinfo_store_cpu(info);

	boot_cpu_data = *info;
}