cpu_errata.c 16.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Contains CPU specific errata definitions
 *
 * Copyright (C) 2014 ARM Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <linux/types.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/cpufeature.h>

24
static bool __maybe_unused
25
is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
26
{
27 28 29
	const struct arm64_midr_revidr *fix;
	u32 midr = read_cpuid_id(), revidr;

30
	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
31
	if (!is_midr_in_range(midr, &entry->midr_range))
32 33 34 35 36 37 38 39 40
		return false;

	midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
	revidr = read_cpuid(REVIDR_EL1);
	for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
		if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
			return false;

	return true;
41 42
}

43 44 45
static bool __maybe_unused
is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
			    int scope)
46
{
47
	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
48
	return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
49 50
}

51 52 53 54 55 56 57 58 59 60 61
static bool __maybe_unused
is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
{
	u32 model;

	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());

	model = read_cpuid_id();
	model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
		 MIDR_ARCHITECTURE_MASK;

62
	return model == entry->midr_range.model;
63 64
}

65 66 67 68 69 70 71 72 73
static bool
has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
				int scope)
{
	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
	return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) !=
		(arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
}

74 75
static void
cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
76 77 78 79 80
{
	/* Clear SCTLR_EL1.UCT */
	config_sctlr_el1(SCTLR_EL1_UCT, 0);
}

81 82
atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);

83 84 85 86 87 88
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>

DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);

89
#ifdef CONFIG_KVM_INDIRECT_VECTORS
90 91
extern char __smccc_workaround_1_smc_start[];
extern char __smccc_workaround_1_smc_end[];
92

93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
				const char *hyp_vecs_end)
{
	void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
	int i;

	for (i = 0; i < SZ_2K; i += 0x80)
		memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);

	flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
}

static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
				      const char *hyp_vecs_start,
				      const char *hyp_vecs_end)
{
	static DEFINE_SPINLOCK(bp_lock);
	int cpu, slot = -1;

	spin_lock(&bp_lock);
	for_each_possible_cpu(cpu) {
		if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
			slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
			break;
		}
	}

	if (slot == -1) {
121 122
		slot = atomic_inc_return(&arm64_el2_vector_last_slot);
		BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
123 124 125 126 127 128 129 130
		__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
	}

	__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
	__this_cpu_write(bp_hardening_data.fn, fn);
	spin_unlock(&bp_lock);
}
#else
131 132
#define __smccc_workaround_1_smc_start		NULL
#define __smccc_workaround_1_smc_end		NULL
133

134 135 136 137 138 139
static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
				      const char *hyp_vecs_start,
				      const char *hyp_vecs_end)
{
	__this_cpu_write(bp_hardening_data.fn, fn);
}
140
#endif	/* CONFIG_KVM_INDIRECT_VECTORS */
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157

static void  install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
				     bp_hardening_cb_t fn,
				     const char *hyp_vecs_start,
				     const char *hyp_vecs_end)
{
	u64 pfr0;

	if (!entry->matches(entry, SCOPE_LOCAL_CPU))
		return;

	pfr0 = read_cpuid(ID_AA64PFR0_EL1);
	if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
		return;

	__install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
}
158

159 160
#include <uapi/linux/psci.h>
#include <linux/arm-smccc.h>
161 162
#include <linux/psci.h>

163 164 165 166 167 168 169 170 171 172
static void call_smc_arch_workaround_1(void)
{
	arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
}

static void call_hvc_arch_workaround_1(void)
{
	arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
}

173 174 175 176 177 178 179 180 181 182 183 184
static void qcom_link_stack_sanitization(void)
{
	u64 tmp;

	asm volatile("mov	%0, x30		\n"
		     ".rept	16		\n"
		     "bl	. + 4		\n"
		     ".endr			\n"
		     "mov	x30, %0		\n"
		     : "=&r" (tmp));
}

185 186
static void
enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
187 188 189 190
{
	bp_hardening_cb_t cb;
	void *smccc_start, *smccc_end;
	struct arm_smccc_res res;
191
	u32 midr = read_cpuid_id();
192 193

	if (!entry->matches(entry, SCOPE_LOCAL_CPU))
194
		return;
195 196

	if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
197
		return;
198 199 200 201 202

	switch (psci_ops.conduit) {
	case PSCI_CONDUIT_HVC:
		arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
203
		if ((int)res.a0 < 0)
204
			return;
205
		cb = call_hvc_arch_workaround_1;
206 207 208
		/* This is a guest, no need to patch KVM vectors */
		smccc_start = NULL;
		smccc_end = NULL;
209 210 211 212 213
		break;

	case PSCI_CONDUIT_SMC:
		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
214
		if ((int)res.a0 < 0)
215
			return;
216 217 218 219 220 221
		cb = call_smc_arch_workaround_1;
		smccc_start = __smccc_workaround_1_smc_start;
		smccc_end = __smccc_workaround_1_smc_end;
		break;

	default:
222
		return;
223 224
	}

225 226 227 228
	if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
	    ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
		cb = qcom_link_stack_sanitization;

229 230
	install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);

231
	return;
232
}
233 234
#endif	/* CONFIG_HARDEN_BRANCH_PREDICTOR */

235
#ifdef CONFIG_ARM64_SSBD
236 237
DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);

238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;

static const struct ssbd_options {
	const char	*str;
	int		state;
} ssbd_options[] = {
	{ "force-on",	ARM64_SSBD_FORCE_ENABLE, },
	{ "force-off",	ARM64_SSBD_FORCE_DISABLE, },
	{ "kernel",	ARM64_SSBD_KERNEL, },
};

static int __init ssbd_cfg(char *buf)
{
	int i;

	if (!buf || !buf[0])
		return -EINVAL;

	for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
		int len = strlen(ssbd_options[i].str);

		if (strncmp(buf, ssbd_options[i].str, len))
			continue;

		ssbd_state = ssbd_options[i].state;
		return 0;
	}

	return -EINVAL;
}
early_param("ssbd", ssbd_cfg);

270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290
void __init arm64_update_smccc_conduit(struct alt_instr *alt,
				       __le32 *origptr, __le32 *updptr,
				       int nr_inst)
{
	u32 insn;

	BUG_ON(nr_inst != 1);

	switch (psci_ops.conduit) {
	case PSCI_CONDUIT_HVC:
		insn = aarch64_insn_get_hvc_value();
		break;
	case PSCI_CONDUIT_SMC:
		insn = aarch64_insn_get_smc_value();
		break;
	default:
		return;
	}

	*updptr = cpu_to_le32(insn);
}
291

292 293 294 295 296 297 298 299 300 301 302 303 304 305
void __init arm64_enable_wa2_handling(struct alt_instr *alt,
				      __le32 *origptr, __le32 *updptr,
				      int nr_inst)
{
	BUG_ON(nr_inst != 1);
	/*
	 * Only allow mitigation on EL1 entry/exit and guest
	 * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
	 * be flipped.
	 */
	if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
		*updptr = cpu_to_le32(aarch64_insn_gen_nop());
}

306
void arm64_set_ssbd_mitigation(bool state)
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
{
	switch (psci_ops.conduit) {
	case PSCI_CONDUIT_HVC:
		arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
		break;

	case PSCI_CONDUIT_SMC:
		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
		break;

	default:
		WARN_ON_ONCE(1);
		break;
	}
}

static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
				    int scope)
{
	struct arm_smccc_res res;
327 328
	bool required = true;
	s32 val;
329 330 331

	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());

332 333
	if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
		ssbd_state = ARM64_SSBD_UNKNOWN;
334
		return false;
335
	}
336 337 338 339 340 341 342 343 344 345 346 347 348

	switch (psci_ops.conduit) {
	case PSCI_CONDUIT_HVC:
		arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
				  ARM_SMCCC_ARCH_WORKAROUND_2, &res);
		break;

	case PSCI_CONDUIT_SMC:
		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
				  ARM_SMCCC_ARCH_WORKAROUND_2, &res);
		break;

	default:
349 350
		ssbd_state = ARM64_SSBD_UNKNOWN;
		return false;
351 352
	}

353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
	val = (s32)res.a0;

	switch (val) {
	case SMCCC_RET_NOT_SUPPORTED:
		ssbd_state = ARM64_SSBD_UNKNOWN;
		return false;

	case SMCCC_RET_NOT_REQUIRED:
		pr_info_once("%s mitigation not required\n", entry->desc);
		ssbd_state = ARM64_SSBD_MITIGATED;
		return false;

	case SMCCC_RET_SUCCESS:
		required = true;
		break;

	case 1:	/* Mitigation not required on this CPU */
		required = false;
		break;

	default:
		WARN_ON(1);
		return false;
	}

	switch (ssbd_state) {
	case ARM64_SSBD_FORCE_DISABLE:
		pr_info_once("%s disabled from command-line\n", entry->desc);
		arm64_set_ssbd_mitigation(false);
		required = false;
		break;

	case ARM64_SSBD_KERNEL:
		if (required) {
			__this_cpu_write(arm64_ssbd_callback_required, 1);
			arm64_set_ssbd_mitigation(true);
		}
		break;

	case ARM64_SSBD_FORCE_ENABLE:
		pr_info_once("%s forced from command-line\n", entry->desc);
394
		arm64_set_ssbd_mitigation(true);
395 396 397 398 399 400
		required = true;
		break;

	default:
		WARN_ON(1);
		break;
401 402
	}

403
	return required;
404
}
405 406
#endif	/* CONFIG_ARM64_SSBD */

407 408
#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)	\
	.matches = is_affected_midr_range,			\
409
	.midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
410 411 412

#define CAP_MIDR_ALL_VERSIONS(model)					\
	.matches = is_affected_midr_range,				\
413
	.midr_range = MIDR_ALL_VERSIONS(model)
414

415 416 417
#define MIDR_FIXED(rev, revidr_mask) \
	.fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}

418 419 420 421
#define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max)		\
	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,				\
	CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)

422 423 424 425
#define CAP_MIDR_RANGE_LIST(list)				\
	.matches = is_affected_midr_range_list,			\
	.midr_range_list = list

426 427 428 429 430 431 432 433 434 435 436 437 438
/* Errata affecting a range of revisions of  given model variant */
#define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max)	 \
	ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)

/* Errata affecting a single variant/revision of a model */
#define ERRATA_MIDR_REV(model, var, rev)	\
	ERRATA_MIDR_RANGE(model, var, rev, var, rev)

/* Errata affecting all variants/revisions of a given a model */
#define ERRATA_MIDR_ALL_VERSIONS(model)				\
	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,			\
	CAP_MIDR_ALL_VERSIONS(model)

439 440 441 442 443
/* Errata affecting a list of midr ranges, with same work around */
#define ERRATA_MIDR_RANGE_LIST(midr_list)			\
	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,			\
	CAP_MIDR_RANGE_LIST(midr_list)

444 445 446 447 448
/*
 * Generic helper for handling capabilties with multiple (match,enable) pairs
 * of call backs, sharing the same capability bit.
 * Iterate over each entry to see if at least one matches.
 */
449 450
static bool __maybe_unused
multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry, int scope)
451 452 453 454 455 456 457 458 459 460 461 462 463 464
{
	const struct arm64_cpu_capabilities *caps;

	for (caps = entry->match_list; caps->matches; caps++)
		if (caps->matches(caps, scope))
			return true;

	return false;
}

/*
 * Take appropriate action for all matching entries in the shared capability
 * entry.
 */
465
static void __maybe_unused
466 467 468
multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
{
	const struct arm64_cpu_capabilities *caps;
469

470 471 472 473 474 475
	for (caps = entry->match_list; caps->matches; caps++)
		if (caps->matches(caps, SCOPE_LOCAL_CPU) &&
		    caps->cpu_enable)
			caps->cpu_enable(caps);
}

476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR

/*
 * List of CPUs where we need to issue a psci call to
 * harden the branch predictor.
 */
static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
	MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
	MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
	MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
	MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
	MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
	MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
	MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
	{},
};

#endif
495

496 497 498 499 500 501 502 503
#ifdef CONFIG_HARDEN_EL2_VECTORS

static const struct midr_range arm64_harden_el2_vectors[] = {
	MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
	{},
};

504 505
#endif

506
const struct arm64_cpu_capabilities arm64_errata[] = {
507 508 509
#if	defined(CONFIG_ARM64_ERRATUM_826319) || \
	defined(CONFIG_ARM64_ERRATUM_827319) || \
	defined(CONFIG_ARM64_ERRATUM_824069)
510 511 512 513
	{
	/* Cortex-A53 r0p[012] */
		.desc = "ARM errata 826319, 827319, 824069",
		.capability = ARM64_WORKAROUND_CLEAN_CACHE,
514
		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
515
		.cpu_enable = cpu_enable_cache_maint_trap,
516
	},
517 518 519 520 521 522
#endif
#ifdef CONFIG_ARM64_ERRATUM_819472
	{
	/* Cortex-A53 r0p[01] */
		.desc = "ARM errata 819472",
		.capability = ARM64_WORKAROUND_CLEAN_CACHE,
523
		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
524
		.cpu_enable = cpu_enable_cache_maint_trap,
525 526 527
	},
#endif
#ifdef CONFIG_ARM64_ERRATUM_832075
528
	{
529 530 531
	/* Cortex-A57 r0p0 - r1p2 */
		.desc = "ARM erratum 832075",
		.capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
532 533 534
		ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
				  0, 0,
				  1, 2),
535
	},
536
#endif
537 538 539 540 541
#ifdef CONFIG_ARM64_ERRATUM_834220
	{
	/* Cortex-A57 r0p0 - r1p2 */
		.desc = "ARM erratum 834220",
		.capability = ARM64_WORKAROUND_834220,
542 543 544
		ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
				  0, 0,
				  1, 2),
545 546
	},
#endif
547 548 549 550 551
#ifdef CONFIG_ARM64_ERRATUM_843419
	{
	/* Cortex-A53 r0p[01234] */
		.desc = "ARM erratum 843419",
		.capability = ARM64_WORKAROUND_843419,
552
		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
553
		MIDR_FIXED(0x4, BIT(8)),
554 555
	},
#endif
556 557 558 559 560
#ifdef CONFIG_ARM64_ERRATUM_845719
	{
	/* Cortex-A53 r0p[01234] */
		.desc = "ARM erratum 845719",
		.capability = ARM64_WORKAROUND_845719,
561
		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
562
	},
563 564 565 566 567 568
#endif
#ifdef CONFIG_CAVIUM_ERRATUM_23154
	{
	/* Cavium ThunderX, pass 1.x */
		.desc = "Cavium erratum 23154",
		.capability = ARM64_WORKAROUND_CAVIUM_23154,
569
		ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
570
	},
571 572 573 574 575 576
#endif
#ifdef CONFIG_CAVIUM_ERRATUM_27456
	{
	/* Cavium ThunderX, T88 pass 1.x - 2.1 */
		.desc = "Cavium erratum 27456",
		.capability = ARM64_WORKAROUND_CAVIUM_27456,
577 578 579
		ERRATA_MIDR_RANGE(MIDR_THUNDERX,
				  0, 0,
				  1, 1),
580
	},
581 582 583 584
	{
	/* Cavium ThunderX, T81 pass 1.0 */
		.desc = "Cavium erratum 27456",
		.capability = ARM64_WORKAROUND_CAVIUM_27456,
585
		ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
586
	},
587 588 589 590 591 592
#endif
#ifdef CONFIG_CAVIUM_ERRATUM_30115
	{
	/* Cavium ThunderX, T88 pass 1.x - 2.2 */
		.desc = "Cavium erratum 30115",
		.capability = ARM64_WORKAROUND_CAVIUM_30115,
593 594 595
		ERRATA_MIDR_RANGE(MIDR_THUNDERX,
				      0, 0,
				      1, 2),
596 597 598 599 600
	},
	{
	/* Cavium ThunderX, T81 pass 1.0 - 1.2 */
		.desc = "Cavium erratum 30115",
		.capability = ARM64_WORKAROUND_CAVIUM_30115,
601
		ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
602 603 604 605 606
	},
	{
	/* Cavium ThunderX, T83 pass 1.0 */
		.desc = "Cavium erratum 30115",
		.capability = ARM64_WORKAROUND_CAVIUM_30115,
607
		ERRATA_MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
608
	},
609
#endif
610 611 612 613
	{
		.desc = "Mismatched cache line size",
		.capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
		.matches = has_mismatched_cache_line_size,
614
		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
615
		.cpu_enable = cpu_enable_trap_ctr_access,
616
	},
617 618 619 620
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
	{
		.desc = "Qualcomm Technologies Falkor erratum 1003",
		.capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
621
		ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
622
	},
623 624 625
	{
		.desc = "Qualcomm Technologies Kryo erratum 1003",
		.capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
626
		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
627
		.midr_range.model = MIDR_QCOM_KRYO,
628 629
		.matches = is_kryo_midr,
	},
630
#endif
631 632 633 634
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
	{
		.desc = "Qualcomm Technologies Falkor erratum 1009",
		.capability = ARM64_WORKAROUND_REPEAT_TLBI,
635
		ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
636
	},
637 638 639 640 641 642
#endif
#ifdef CONFIG_ARM64_ERRATUM_858921
	{
	/* Cortex-A73 all versions */
		.desc = "ARM erratum 858921",
		.capability = ARM64_WORKAROUND_858921,
643
		ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
644
	},
645 646 647 648
#endif
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
	{
		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
649
		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
650 651
		.cpu_enable = enable_smccc_arch_workaround_1,
		ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
652
	},
653 654 655
#endif
#ifdef CONFIG_HARDEN_EL2_VECTORS
	{
656
		.desc = "EL2 vector hardening",
657
		.capability = ARM64_HARDEN_EL2_VECTORS,
658 659
		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
		ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
660
	},
661 662 663 664 665 666 667 668
#endif
#ifdef CONFIG_ARM64_SSBD
	{
		.desc = "Speculative Store Bypass Disable",
		.capability = ARM64_SSBD,
		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
		.matches = has_ssbd_mitigation,
	},
669
#endif
670
	{
671
	}
672
};