cpu_errata.c 16.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Contains CPU specific errata definitions
 *
 * Copyright (C) 2014 ARM Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

19 20
#include <linux/arm-smccc.h>
#include <linux/psci.h>
21 22 23 24 25
#include <linux/types.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/cpufeature.h>

26
static bool __maybe_unused
27
is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
28
{
29 30 31
	const struct arm64_midr_revidr *fix;
	u32 midr = read_cpuid_id(), revidr;

32
	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
33
	if (!is_midr_in_range(midr, &entry->midr_range))
34 35 36 37 38 39 40 41 42
		return false;

	midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
	revidr = read_cpuid(REVIDR_EL1);
	for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
		if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
			return false;

	return true;
43 44
}

45 46 47
static bool __maybe_unused
is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
			    int scope)
48
{
49
	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
50
	return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
51 52
}

53 54 55 56 57 58 59 60 61 62 63
static bool __maybe_unused
is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
{
	u32 model;

	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());

	model = read_cpuid_id();
	model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
		 MIDR_ARCHITECTURE_MASK;

64
	return model == entry->midr_range.model;
65 66
}

67 68 69 70
static bool
has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
				int scope)
{
71 72
	u64 mask = CTR_CACHE_MINLINE_MASK;

73
	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
74 75
	return (read_cpuid_cachetype() & mask) !=
	       (arm64_ftr_reg_ctrel0.sys_val & mask);
76 77
}

78 79
static void
cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
80 81 82 83 84
{
	/* Clear SCTLR_EL1.UCT */
	config_sctlr_el1(SCTLR_EL1_UCT, 0);
}

85 86
atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);

87 88 89 90 91 92
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>

DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);

93
#ifdef CONFIG_KVM_INDIRECT_VECTORS
94 95
extern char __smccc_workaround_1_smc_start[];
extern char __smccc_workaround_1_smc_end[];
96

97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
				const char *hyp_vecs_end)
{
	void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
	int i;

	for (i = 0; i < SZ_2K; i += 0x80)
		memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);

	flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
}

static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
				      const char *hyp_vecs_start,
				      const char *hyp_vecs_end)
{
	static DEFINE_SPINLOCK(bp_lock);
	int cpu, slot = -1;

	spin_lock(&bp_lock);
	for_each_possible_cpu(cpu) {
		if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
			slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
			break;
		}
	}

	if (slot == -1) {
125 126
		slot = atomic_inc_return(&arm64_el2_vector_last_slot);
		BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
127 128 129 130 131 132 133 134
		__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
	}

	__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
	__this_cpu_write(bp_hardening_data.fn, fn);
	spin_unlock(&bp_lock);
}
#else
135 136
#define __smccc_workaround_1_smc_start		NULL
#define __smccc_workaround_1_smc_end		NULL
137

138 139 140 141 142 143
static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
				      const char *hyp_vecs_start,
				      const char *hyp_vecs_end)
{
	__this_cpu_write(bp_hardening_data.fn, fn);
}
144
#endif	/* CONFIG_KVM_INDIRECT_VECTORS */
145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161

static void  install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
				     bp_hardening_cb_t fn,
				     const char *hyp_vecs_start,
				     const char *hyp_vecs_end)
{
	u64 pfr0;

	if (!entry->matches(entry, SCOPE_LOCAL_CPU))
		return;

	pfr0 = read_cpuid(ID_AA64PFR0_EL1);
	if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
		return;

	__install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
}
162

163 164
#include <uapi/linux/psci.h>
#include <linux/arm-smccc.h>
165 166
#include <linux/psci.h>

167 168 169 170 171 172 173 174 175 176
static void call_smc_arch_workaround_1(void)
{
	arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
}

static void call_hvc_arch_workaround_1(void)
{
	arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
}

177 178 179 180 181 182 183 184 185 186 187 188
static void qcom_link_stack_sanitization(void)
{
	u64 tmp;

	asm volatile("mov	%0, x30		\n"
		     ".rept	16		\n"
		     "bl	. + 4		\n"
		     ".endr			\n"
		     "mov	x30, %0		\n"
		     : "=&r" (tmp));
}

189 190
static void
enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
191 192 193 194
{
	bp_hardening_cb_t cb;
	void *smccc_start, *smccc_end;
	struct arm_smccc_res res;
195
	u32 midr = read_cpuid_id();
196 197

	if (!entry->matches(entry, SCOPE_LOCAL_CPU))
198
		return;
199 200

	if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
201
		return;
202 203 204 205 206

	switch (psci_ops.conduit) {
	case PSCI_CONDUIT_HVC:
		arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
207
		if ((int)res.a0 < 0)
208
			return;
209
		cb = call_hvc_arch_workaround_1;
210 211 212
		/* This is a guest, no need to patch KVM vectors */
		smccc_start = NULL;
		smccc_end = NULL;
213 214 215 216 217
		break;

	case PSCI_CONDUIT_SMC:
		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
218
		if ((int)res.a0 < 0)
219
			return;
220 221 222 223 224 225
		cb = call_smc_arch_workaround_1;
		smccc_start = __smccc_workaround_1_smc_start;
		smccc_end = __smccc_workaround_1_smc_end;
		break;

	default:
226
		return;
227 228
	}

229 230 231 232
	if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
	    ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
		cb = qcom_link_stack_sanitization;

233 234
	install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);

235
	return;
236
}
237 238
#endif	/* CONFIG_HARDEN_BRANCH_PREDICTOR */

239
#ifdef CONFIG_ARM64_SSBD
240 241
DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);

242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;

static const struct ssbd_options {
	const char	*str;
	int		state;
} ssbd_options[] = {
	{ "force-on",	ARM64_SSBD_FORCE_ENABLE, },
	{ "force-off",	ARM64_SSBD_FORCE_DISABLE, },
	{ "kernel",	ARM64_SSBD_KERNEL, },
};

static int __init ssbd_cfg(char *buf)
{
	int i;

	if (!buf || !buf[0])
		return -EINVAL;

	for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
		int len = strlen(ssbd_options[i].str);

		if (strncmp(buf, ssbd_options[i].str, len))
			continue;

		ssbd_state = ssbd_options[i].state;
		return 0;
	}

	return -EINVAL;
}
early_param("ssbd", ssbd_cfg);

274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
void __init arm64_update_smccc_conduit(struct alt_instr *alt,
				       __le32 *origptr, __le32 *updptr,
				       int nr_inst)
{
	u32 insn;

	BUG_ON(nr_inst != 1);

	switch (psci_ops.conduit) {
	case PSCI_CONDUIT_HVC:
		insn = aarch64_insn_get_hvc_value();
		break;
	case PSCI_CONDUIT_SMC:
		insn = aarch64_insn_get_smc_value();
		break;
	default:
		return;
	}

	*updptr = cpu_to_le32(insn);
}
295

296 297 298 299 300 301 302 303 304 305 306 307 308 309
void __init arm64_enable_wa2_handling(struct alt_instr *alt,
				      __le32 *origptr, __le32 *updptr,
				      int nr_inst)
{
	BUG_ON(nr_inst != 1);
	/*
	 * Only allow mitigation on EL1 entry/exit and guest
	 * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
	 * be flipped.
	 */
	if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
		*updptr = cpu_to_le32(aarch64_insn_gen_nop());
}

310
void arm64_set_ssbd_mitigation(bool state)
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
{
	switch (psci_ops.conduit) {
	case PSCI_CONDUIT_HVC:
		arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
		break;

	case PSCI_CONDUIT_SMC:
		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
		break;

	default:
		WARN_ON_ONCE(1);
		break;
	}
}

static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
				    int scope)
{
	struct arm_smccc_res res;
331 332
	bool required = true;
	s32 val;
333 334 335

	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());

336 337
	if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
		ssbd_state = ARM64_SSBD_UNKNOWN;
338
		return false;
339
	}
340 341 342 343 344 345 346 347 348 349 350 351 352

	switch (psci_ops.conduit) {
	case PSCI_CONDUIT_HVC:
		arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
				  ARM_SMCCC_ARCH_WORKAROUND_2, &res);
		break;

	case PSCI_CONDUIT_SMC:
		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
				  ARM_SMCCC_ARCH_WORKAROUND_2, &res);
		break;

	default:
353 354
		ssbd_state = ARM64_SSBD_UNKNOWN;
		return false;
355 356
	}

357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397
	val = (s32)res.a0;

	switch (val) {
	case SMCCC_RET_NOT_SUPPORTED:
		ssbd_state = ARM64_SSBD_UNKNOWN;
		return false;

	case SMCCC_RET_NOT_REQUIRED:
		pr_info_once("%s mitigation not required\n", entry->desc);
		ssbd_state = ARM64_SSBD_MITIGATED;
		return false;

	case SMCCC_RET_SUCCESS:
		required = true;
		break;

	case 1:	/* Mitigation not required on this CPU */
		required = false;
		break;

	default:
		WARN_ON(1);
		return false;
	}

	switch (ssbd_state) {
	case ARM64_SSBD_FORCE_DISABLE:
		pr_info_once("%s disabled from command-line\n", entry->desc);
		arm64_set_ssbd_mitigation(false);
		required = false;
		break;

	case ARM64_SSBD_KERNEL:
		if (required) {
			__this_cpu_write(arm64_ssbd_callback_required, 1);
			arm64_set_ssbd_mitigation(true);
		}
		break;

	case ARM64_SSBD_FORCE_ENABLE:
		pr_info_once("%s forced from command-line\n", entry->desc);
398
		arm64_set_ssbd_mitigation(true);
399 400 401 402 403 404
		required = true;
		break;

	default:
		WARN_ON(1);
		break;
405 406
	}

407
	return required;
408
}
409 410
#endif	/* CONFIG_ARM64_SSBD */

411 412
#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)	\
	.matches = is_affected_midr_range,			\
413
	.midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
414 415 416

#define CAP_MIDR_ALL_VERSIONS(model)					\
	.matches = is_affected_midr_range,				\
417
	.midr_range = MIDR_ALL_VERSIONS(model)
418

419 420 421
#define MIDR_FIXED(rev, revidr_mask) \
	.fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}

422 423 424 425
#define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max)		\
	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,				\
	CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)

426 427 428 429
#define CAP_MIDR_RANGE_LIST(list)				\
	.matches = is_affected_midr_range_list,			\
	.midr_range_list = list

430 431 432 433 434 435 436 437 438 439 440 441 442
/* Errata affecting a range of revisions of  given model variant */
#define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max)	 \
	ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)

/* Errata affecting a single variant/revision of a model */
#define ERRATA_MIDR_REV(model, var, rev)	\
	ERRATA_MIDR_RANGE(model, var, rev, var, rev)

/* Errata affecting all variants/revisions of a given a model */
#define ERRATA_MIDR_ALL_VERSIONS(model)				\
	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,			\
	CAP_MIDR_ALL_VERSIONS(model)

443 444 445 446 447
/* Errata affecting a list of midr ranges, with same work around */
#define ERRATA_MIDR_RANGE_LIST(midr_list)			\
	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,			\
	CAP_MIDR_RANGE_LIST(midr_list)

448 449 450 451 452
/*
 * Generic helper for handling capabilties with multiple (match,enable) pairs
 * of call backs, sharing the same capability bit.
 * Iterate over each entry to see if at least one matches.
 */
453 454
static bool __maybe_unused
multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry, int scope)
455 456 457 458 459 460 461 462 463 464 465 466 467 468
{
	const struct arm64_cpu_capabilities *caps;

	for (caps = entry->match_list; caps->matches; caps++)
		if (caps->matches(caps, scope))
			return true;

	return false;
}

/*
 * Take appropriate action for all matching entries in the shared capability
 * entry.
 */
469
static void __maybe_unused
470 471 472
multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
{
	const struct arm64_cpu_capabilities *caps;
473

474 475 476 477 478 479
	for (caps = entry->match_list; caps->matches; caps++)
		if (caps->matches(caps, SCOPE_LOCAL_CPU) &&
		    caps->cpu_enable)
			caps->cpu_enable(caps);
}

480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR

/*
 * List of CPUs where we need to issue a psci call to
 * harden the branch predictor.
 */
static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
	MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
	MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
	MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
	MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
	MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
	MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
	MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
495
	MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER),
496 497 498 499
	{},
};

#endif
500

501 502 503 504 505 506 507 508
#ifdef CONFIG_HARDEN_EL2_VECTORS

static const struct midr_range arm64_harden_el2_vectors[] = {
	MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
	{},
};

509 510
#endif

511
const struct arm64_cpu_capabilities arm64_errata[] = {
512 513 514
#if	defined(CONFIG_ARM64_ERRATUM_826319) || \
	defined(CONFIG_ARM64_ERRATUM_827319) || \
	defined(CONFIG_ARM64_ERRATUM_824069)
515 516 517 518
	{
	/* Cortex-A53 r0p[012] */
		.desc = "ARM errata 826319, 827319, 824069",
		.capability = ARM64_WORKAROUND_CLEAN_CACHE,
519
		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
520
		.cpu_enable = cpu_enable_cache_maint_trap,
521
	},
522 523 524 525 526 527
#endif
#ifdef CONFIG_ARM64_ERRATUM_819472
	{
	/* Cortex-A53 r0p[01] */
		.desc = "ARM errata 819472",
		.capability = ARM64_WORKAROUND_CLEAN_CACHE,
528
		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
529
		.cpu_enable = cpu_enable_cache_maint_trap,
530 531 532
	},
#endif
#ifdef CONFIG_ARM64_ERRATUM_832075
533
	{
534 535 536
	/* Cortex-A57 r0p0 - r1p2 */
		.desc = "ARM erratum 832075",
		.capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
537 538 539
		ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
				  0, 0,
				  1, 2),
540
	},
541
#endif
542 543 544 545 546
#ifdef CONFIG_ARM64_ERRATUM_834220
	{
	/* Cortex-A57 r0p0 - r1p2 */
		.desc = "ARM erratum 834220",
		.capability = ARM64_WORKAROUND_834220,
547 548 549
		ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
				  0, 0,
				  1, 2),
550 551
	},
#endif
552 553 554 555 556
#ifdef CONFIG_ARM64_ERRATUM_843419
	{
	/* Cortex-A53 r0p[01234] */
		.desc = "ARM erratum 843419",
		.capability = ARM64_WORKAROUND_843419,
557
		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
558
		MIDR_FIXED(0x4, BIT(8)),
559 560
	},
#endif
561 562 563 564 565
#ifdef CONFIG_ARM64_ERRATUM_845719
	{
	/* Cortex-A53 r0p[01234] */
		.desc = "ARM erratum 845719",
		.capability = ARM64_WORKAROUND_845719,
566
		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
567
	},
568 569 570 571 572 573
#endif
#ifdef CONFIG_CAVIUM_ERRATUM_23154
	{
	/* Cavium ThunderX, pass 1.x */
		.desc = "Cavium erratum 23154",
		.capability = ARM64_WORKAROUND_CAVIUM_23154,
574
		ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
575
	},
576 577 578 579 580 581
#endif
#ifdef CONFIG_CAVIUM_ERRATUM_27456
	{
	/* Cavium ThunderX, T88 pass 1.x - 2.1 */
		.desc = "Cavium erratum 27456",
		.capability = ARM64_WORKAROUND_CAVIUM_27456,
582 583 584
		ERRATA_MIDR_RANGE(MIDR_THUNDERX,
				  0, 0,
				  1, 1),
585
	},
586 587 588 589
	{
	/* Cavium ThunderX, T81 pass 1.0 */
		.desc = "Cavium erratum 27456",
		.capability = ARM64_WORKAROUND_CAVIUM_27456,
590
		ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
591
	},
592 593 594 595 596 597
#endif
#ifdef CONFIG_CAVIUM_ERRATUM_30115
	{
	/* Cavium ThunderX, T88 pass 1.x - 2.2 */
		.desc = "Cavium erratum 30115",
		.capability = ARM64_WORKAROUND_CAVIUM_30115,
598 599 600
		ERRATA_MIDR_RANGE(MIDR_THUNDERX,
				      0, 0,
				      1, 2),
601 602 603 604 605
	},
	{
	/* Cavium ThunderX, T81 pass 1.0 - 1.2 */
		.desc = "Cavium erratum 30115",
		.capability = ARM64_WORKAROUND_CAVIUM_30115,
606
		ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
607 608 609 610 611
	},
	{
	/* Cavium ThunderX, T83 pass 1.0 */
		.desc = "Cavium erratum 30115",
		.capability = ARM64_WORKAROUND_CAVIUM_30115,
612
		ERRATA_MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
613
	},
614
#endif
615 616 617 618
	{
		.desc = "Mismatched cache line size",
		.capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
		.matches = has_mismatched_cache_line_size,
619
		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
620
		.cpu_enable = cpu_enable_trap_ctr_access,
621
	},
622 623 624 625
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
	{
		.desc = "Qualcomm Technologies Falkor erratum 1003",
		.capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
626
		ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
627
	},
628 629 630
	{
		.desc = "Qualcomm Technologies Kryo erratum 1003",
		.capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
631
		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
632
		.midr_range.model = MIDR_QCOM_KRYO,
633 634
		.matches = is_kryo_midr,
	},
635
#endif
636 637 638 639
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
	{
		.desc = "Qualcomm Technologies Falkor erratum 1009",
		.capability = ARM64_WORKAROUND_REPEAT_TLBI,
640
		ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
641
	},
642 643 644 645 646 647
#endif
#ifdef CONFIG_ARM64_ERRATUM_858921
	{
	/* Cortex-A73 all versions */
		.desc = "ARM erratum 858921",
		.capability = ARM64_WORKAROUND_858921,
648
		ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
649
	},
650 651 652 653
#endif
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
	{
		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
654
		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
655 656
		.cpu_enable = enable_smccc_arch_workaround_1,
		ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
657
	},
658 659 660
#endif
#ifdef CONFIG_HARDEN_EL2_VECTORS
	{
661
		.desc = "EL2 vector hardening",
662
		.capability = ARM64_HARDEN_EL2_VECTORS,
663 664
		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
		ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
665
	},
666 667 668 669 670 671 672 673
#endif
#ifdef CONFIG_ARM64_SSBD
	{
		.desc = "Speculative Store Bypass Disable",
		.capability = ARM64_SSBD,
		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
		.matches = has_ssbd_mitigation,
	},
674
#endif
675
	{
676
	}
677
};