cpu_errata.c 12.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Contains CPU specific errata definitions
 *
 * Copyright (C) 2014 ARM Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <linux/types.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/cpufeature.h>

24
static bool __maybe_unused
25
is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
26
{
27 28 29
	const struct arm64_midr_revidr *fix;
	u32 midr = read_cpuid_id(), revidr;

30
	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
31 32 33 34 35 36 37 38 39 40 41 42
	if (!MIDR_IS_CPU_MODEL_RANGE(midr, entry->midr_model,
				     entry->midr_range_min,
				     entry->midr_range_max))
		return false;

	midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
	revidr = read_cpuid(REVIDR_EL1);
	for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
		if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
			return false;

	return true;
43 44
}

45 46 47 48 49 50 51 52 53 54 55 56 57 58
static bool __maybe_unused
is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
{
	u32 model;

	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());

	model = read_cpuid_id();
	model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
		 MIDR_ARCHITECTURE_MASK;

	return model == entry->midr_model;
}

59 60 61 62 63 64 65 66 67
static bool
has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
				int scope)
{
	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
	return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) !=
		(arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
}

68 69
static void
cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
70 71 72 73 74
{
	/* Clear SCTLR_EL1.UCT */
	config_sctlr_el1(SCTLR_EL1_UCT, 0);
}

75 76 77 78 79 80 81
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>

DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);

#ifdef CONFIG_KVM
82 83
extern char __qcom_hyp_sanitize_link_stack_start[];
extern char __qcom_hyp_sanitize_link_stack_end[];
84 85 86 87
extern char __smccc_workaround_1_smc_start[];
extern char __smccc_workaround_1_smc_end[];
extern char __smccc_workaround_1_hvc_start[];
extern char __smccc_workaround_1_hvc_end[];
88

89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
				const char *hyp_vecs_end)
{
	void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
	int i;

	for (i = 0; i < SZ_2K; i += 0x80)
		memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);

	flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
}

static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
				      const char *hyp_vecs_start,
				      const char *hyp_vecs_end)
{
	static int last_slot = -1;
	static DEFINE_SPINLOCK(bp_lock);
	int cpu, slot = -1;

	spin_lock(&bp_lock);
	for_each_possible_cpu(cpu) {
		if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
			slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
			break;
		}
	}

	if (slot == -1) {
		last_slot++;
		BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
			/ SZ_2K) <= last_slot);
		slot = last_slot;
		__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
	}

	__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
	__this_cpu_write(bp_hardening_data.fn, fn);
	spin_unlock(&bp_lock);
}
#else
130 131
#define __qcom_hyp_sanitize_link_stack_start	NULL
#define __qcom_hyp_sanitize_link_stack_end	NULL
132 133 134 135
#define __smccc_workaround_1_smc_start		NULL
#define __smccc_workaround_1_smc_end		NULL
#define __smccc_workaround_1_hvc_start		NULL
#define __smccc_workaround_1_hvc_end		NULL
136

137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
				      const char *hyp_vecs_start,
				      const char *hyp_vecs_end)
{
	__this_cpu_write(bp_hardening_data.fn, fn);
}
#endif	/* CONFIG_KVM */

static void  install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
				     bp_hardening_cb_t fn,
				     const char *hyp_vecs_start,
				     const char *hyp_vecs_end)
{
	u64 pfr0;

	if (!entry->matches(entry, SCOPE_LOCAL_CPU))
		return;

	pfr0 = read_cpuid(ID_AA64PFR0_EL1);
	if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
		return;

	__install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
}
161

162 163
#include <uapi/linux/psci.h>
#include <linux/arm-smccc.h>
164 165
#include <linux/psci.h>

166 167 168 169 170 171 172 173 174 175
static void call_smc_arch_workaround_1(void)
{
	arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
}

static void call_hvc_arch_workaround_1(void)
{
	arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
}

176 177
static void
enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
178 179 180 181 182 183
{
	bp_hardening_cb_t cb;
	void *smccc_start, *smccc_end;
	struct arm_smccc_res res;

	if (!entry->matches(entry, SCOPE_LOCAL_CPU))
184
		return;
185 186

	if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
187
		return;
188 189 190 191 192 193

	switch (psci_ops.conduit) {
	case PSCI_CONDUIT_HVC:
		arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
		if (res.a0)
194
			return;
195 196 197 198 199 200 201 202 203
		cb = call_hvc_arch_workaround_1;
		smccc_start = __smccc_workaround_1_hvc_start;
		smccc_end = __smccc_workaround_1_hvc_end;
		break;

	case PSCI_CONDUIT_SMC:
		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
		if (res.a0)
204
			return;
205 206 207 208 209 210
		cb = call_smc_arch_workaround_1;
		smccc_start = __smccc_workaround_1_smc_start;
		smccc_end = __smccc_workaround_1_smc_end;
		break;

	default:
211
		return;
212 213 214 215
	}

	install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);

216
	return;
217
}
218 219 220 221 222 223 224 225 226 227 228 229 230

static void qcom_link_stack_sanitization(void)
{
	u64 tmp;

	asm volatile("mov	%0, x30		\n"
		     ".rept	16		\n"
		     "bl	. + 4		\n"
		     ".endr			\n"
		     "mov	x30, %0		\n"
		     : "=&r" (tmp));
}

231 232
static void
qcom_enable_link_stack_sanitization(const struct arm64_cpu_capabilities *entry)
233 234 235 236 237
{
	install_bp_hardening_cb(entry, qcom_link_stack_sanitization,
				__qcom_hyp_sanitize_link_stack_start,
				__qcom_hyp_sanitize_link_stack_end);
}
238 239
#endif	/* CONFIG_HARDEN_BRANCH_PREDICTOR */

240
#define MIDR_RANGE(model, min, max) \
241
	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
242
	.matches = is_affected_midr_range, \
243 244 245 246
	.midr_model = model, \
	.midr_range_min = min, \
	.midr_range_max = max

247
#define MIDR_ALL_VERSIONS(model) \
248
	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
249 250 251 252 253
	.matches = is_affected_midr_range, \
	.midr_model = model, \
	.midr_range_min = 0, \
	.midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK)

254 255 256
#define MIDR_FIXED(rev, revidr_mask) \
	.fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}

257
const struct arm64_cpu_capabilities arm64_errata[] = {
258 259 260
#if	defined(CONFIG_ARM64_ERRATUM_826319) || \
	defined(CONFIG_ARM64_ERRATUM_827319) || \
	defined(CONFIG_ARM64_ERRATUM_824069)
261 262 263 264 265
	{
	/* Cortex-A53 r0p[012] */
		.desc = "ARM errata 826319, 827319, 824069",
		.capability = ARM64_WORKAROUND_CLEAN_CACHE,
		MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02),
266
		.cpu_enable = cpu_enable_cache_maint_trap,
267
	},
268 269 270 271 272 273 274
#endif
#ifdef CONFIG_ARM64_ERRATUM_819472
	{
	/* Cortex-A53 r0p[01] */
		.desc = "ARM errata 819472",
		.capability = ARM64_WORKAROUND_CLEAN_CACHE,
		MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01),
275
		.cpu_enable = cpu_enable_cache_maint_trap,
276 277 278
	},
#endif
#ifdef CONFIG_ARM64_ERRATUM_832075
279
	{
280 281 282
	/* Cortex-A57 r0p0 - r1p2 */
		.desc = "ARM erratum 832075",
		.capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
283 284 285
		MIDR_RANGE(MIDR_CORTEX_A57,
			   MIDR_CPU_VAR_REV(0, 0),
			   MIDR_CPU_VAR_REV(1, 2)),
286
	},
287
#endif
288 289 290 291 292
#ifdef CONFIG_ARM64_ERRATUM_834220
	{
	/* Cortex-A57 r0p0 - r1p2 */
		.desc = "ARM erratum 834220",
		.capability = ARM64_WORKAROUND_834220,
293 294 295
		MIDR_RANGE(MIDR_CORTEX_A57,
			   MIDR_CPU_VAR_REV(0, 0),
			   MIDR_CPU_VAR_REV(1, 2)),
296 297
	},
#endif
298 299 300 301 302 303 304 305 306
#ifdef CONFIG_ARM64_ERRATUM_843419
	{
	/* Cortex-A53 r0p[01234] */
		.desc = "ARM erratum 843419",
		.capability = ARM64_WORKAROUND_843419,
		MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
		MIDR_FIXED(0x4, BIT(8)),
	},
#endif
307 308 309 310 311 312 313
#ifdef CONFIG_ARM64_ERRATUM_845719
	{
	/* Cortex-A53 r0p[01234] */
		.desc = "ARM erratum 845719",
		.capability = ARM64_WORKAROUND_845719,
		MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
	},
314 315 316 317 318 319 320 321
#endif
#ifdef CONFIG_CAVIUM_ERRATUM_23154
	{
	/* Cavium ThunderX, pass 1.x */
		.desc = "Cavium erratum 23154",
		.capability = ARM64_WORKAROUND_CAVIUM_23154,
		MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01),
	},
322 323 324 325 326 327
#endif
#ifdef CONFIG_CAVIUM_ERRATUM_27456
	{
	/* Cavium ThunderX, T88 pass 1.x - 2.1 */
		.desc = "Cavium erratum 27456",
		.capability = ARM64_WORKAROUND_CAVIUM_27456,
328 329 330
		MIDR_RANGE(MIDR_THUNDERX,
			   MIDR_CPU_VAR_REV(0, 0),
			   MIDR_CPU_VAR_REV(1, 1)),
331
	},
332 333 334 335 336 337
	{
	/* Cavium ThunderX, T81 pass 1.0 */
		.desc = "Cavium erratum 27456",
		.capability = ARM64_WORKAROUND_CAVIUM_27456,
		MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
	},
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
#endif
#ifdef CONFIG_CAVIUM_ERRATUM_30115
	{
	/* Cavium ThunderX, T88 pass 1.x - 2.2 */
		.desc = "Cavium erratum 30115",
		.capability = ARM64_WORKAROUND_CAVIUM_30115,
		MIDR_RANGE(MIDR_THUNDERX, 0x00,
			   (1 << MIDR_VARIANT_SHIFT) | 2),
	},
	{
	/* Cavium ThunderX, T81 pass 1.0 - 1.2 */
		.desc = "Cavium erratum 30115",
		.capability = ARM64_WORKAROUND_CAVIUM_30115,
		MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x02),
	},
	{
	/* Cavium ThunderX, T83 pass 1.0 */
		.desc = "Cavium erratum 30115",
		.capability = ARM64_WORKAROUND_CAVIUM_30115,
		MIDR_RANGE(MIDR_THUNDERX_83XX, 0x00, 0x00),
	},
359
#endif
360 361 362 363
	{
		.desc = "Mismatched cache line size",
		.capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
		.matches = has_mismatched_cache_line_size,
364
		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
365
		.cpu_enable = cpu_enable_trap_ctr_access,
366
	},
367 368 369 370 371 372 373 374
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
	{
		.desc = "Qualcomm Technologies Falkor erratum 1003",
		.capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
		MIDR_RANGE(MIDR_QCOM_FALKOR_V1,
			   MIDR_CPU_VAR_REV(0, 0),
			   MIDR_CPU_VAR_REV(0, 0)),
	},
375 376 377
	{
		.desc = "Qualcomm Technologies Kryo erratum 1003",
		.capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
378
		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
379 380 381
		.midr_model = MIDR_QCOM_KRYO,
		.matches = is_kryo_midr,
	},
382
#endif
383 384 385 386 387 388 389 390
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
	{
		.desc = "Qualcomm Technologies Falkor erratum 1009",
		.capability = ARM64_WORKAROUND_REPEAT_TLBI,
		MIDR_RANGE(MIDR_QCOM_FALKOR_V1,
			   MIDR_CPU_VAR_REV(0, 0),
			   MIDR_CPU_VAR_REV(0, 0)),
	},
391 392 393 394 395 396 397 398
#endif
#ifdef CONFIG_ARM64_ERRATUM_858921
	{
	/* Cortex-A73 all versions */
		.desc = "ARM erratum 858921",
		.capability = ARM64_WORKAROUND_858921,
		MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
	},
399 400 401 402 403
#endif
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
	{
		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
		MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
404
		.cpu_enable = enable_smccc_arch_workaround_1,
405 406 407 408
	},
	{
		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
		MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
409
		.cpu_enable = enable_smccc_arch_workaround_1,
410 411 412 413
	},
	{
		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
		MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
414
		.cpu_enable = enable_smccc_arch_workaround_1,
415 416 417 418
	},
	{
		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
		MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
419
		.cpu_enable = enable_smccc_arch_workaround_1,
420
	},
421 422 423
	{
		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
		MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
424
		.cpu_enable = qcom_enable_link_stack_sanitization,
425 426 427 428 429
	},
	{
		.capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
		MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
	},
430 431 432
	{
		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
		MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
433
		.cpu_enable = qcom_enable_link_stack_sanitization,
434 435 436 437 438
	},
	{
		.capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
		MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
	},
439 440 441
	{
		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
		MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
442
		.cpu_enable = enable_smccc_arch_workaround_1,
443 444 445 446
	},
	{
		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
		MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
447
		.cpu_enable = enable_smccc_arch_workaround_1,
448
	},
449
#endif
450
	{
451
	}
452
};