cpufeature.c 46.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Contains CPU feature definitions
 *
 * Copyright (C) 2015 ARM Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

19
#define pr_fmt(fmt) "CPU features: " fmt
20

21
#include <linux/bsearch.h>
22
#include <linux/cpumask.h>
23
#include <linux/sort.h>
24
#include <linux/stop_machine.h>
25
#include <linux/types.h>
26
#include <linux/mm.h>
27 28
#include <asm/cpu.h>
#include <asm/cpufeature.h>
29
#include <asm/cpu_ops.h>
30
#include <asm/fpsimd.h>
31
#include <asm/mmu_context.h>
32
#include <asm/processor.h>
33
#include <asm/sysreg.h>
34
#include <asm/traps.h>
35
#include <asm/virt.h>
36

37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
unsigned long elf_hwcap __read_mostly;
EXPORT_SYMBOL_GPL(elf_hwcap);

#ifdef CONFIG_COMPAT
#define COMPAT_ELF_HWCAP_DEFAULT	\
				(COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
				 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
				 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
				 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
				 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
				 COMPAT_HWCAP_LPAE)
unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
unsigned int compat_elf_hwcap2 __read_mostly;
#endif

DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
53
EXPORT_SYMBOL(cpu_hwcaps);
54

55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
/*
 * Flag to indicate if we have computed the system wide
 * capabilities based on the boot time active CPUs. This
 * will be used to determine if a new booting CPU should
 * go through the verification process to make sure that it
 * supports the system capabilities, without using a hotplug
 * notifier.
 */
static bool sys_caps_initialised;

static inline void set_sys_caps_initialised(void)
{
	sys_caps_initialised = true;
}

70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p)
{
	/* file-wide pr_fmt adds "CPU features: " prefix */
	pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps);
	return 0;
}

static struct notifier_block cpu_hwcaps_notifier = {
	.notifier_call = dump_cpu_hwcaps
};

static int __init register_cpu_hwcaps_dumper(void)
{
	atomic_notifier_chain_register(&panic_notifier_list,
				       &cpu_hwcaps_notifier);
	return 0;
}
__initcall(register_cpu_hwcaps_dumper);

89 90 91
DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
EXPORT_SYMBOL(cpu_hwcap_keys);

92
#define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
93
	{						\
94
		.sign = SIGNED,				\
95
		.visible = VISIBLE,			\
96 97 98 99 100 101 102
		.strict = STRICT,			\
		.type = TYPE,				\
		.shift = SHIFT,				\
		.width = WIDTH,				\
		.safe_val = SAFE_VAL,			\
	}

103
/* Define a feature with unsigned values */
104 105
#define ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
	__ARM64_FTR_BITS(FTR_UNSIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
106

107
/* Define a feature with a signed value */
108 109
#define S_ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
	__ARM64_FTR_BITS(FTR_SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
110

111 112 113 114 115
#define ARM64_FTR_END					\
	{						\
		.width = 0,				\
	}

116 117
/* meta feature for alternatives */
static bool __maybe_unused
118 119
cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);

120

121 122 123 124
/*
 * NOTE: Any changes to the visibility of features should be kept in
 * sync with the documentation of the CPU feature register ABI.
 */
125
static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
126
	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
127 128 129 130 131
	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
132 133 134 135 136
	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0),
137 138 139
	ARM64_FTR_END,
};

140
static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
141 142 143 144
	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
145 146 147
	ARM64_FTR_END,
};

148
static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
149
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
150
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
151
	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
152
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
153 154
	S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
	S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
155
	/* Linux doesn't care about the EL3 */
156 157 158 159
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
160 161 162
	ARM64_FTR_END,
};

163
static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
164 165 166 167
	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
168
	/* Linux shouldn't care about secure memory */
169 170 171
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
172 173 174 175
	/*
	 * Differing PARange is fine as long as all peripherals and memory are mapped
	 * within the minimum PARange of all CPUs
	 */
176
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
177 178 179
	ARM64_FTR_END,
};

180
static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
181
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
182 183 184 185 186
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
187 188 189
	ARM64_FTR_END,
};

190
static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
191 192 193 194 195
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
196 197 198
	ARM64_FTR_END,
};

199
static const struct arm64_ftr_bits ftr_ctr[] = {
200 201 202 203
	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1),	/* RAO */
	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0),	/* CWG */
	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),	/* ERG */
	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1),	/* DminLine */
204 205
	/*
	 * Linux can handle differing I-cache policies. Userspace JITs will
206
	 * make use of *minLine.
207
	 * If we have differing I-cache policies, report it as the weakest - VIPT.
208
	 */
209
	ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT),	/* L1Ip */
210
	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),	/* IminLine */
211 212 213
	ARM64_FTR_END,
};

214 215 216 217 218
struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
	.name		= "SYS_CTR_EL0",
	.ftr_bits	= ftr_ctr
};

219
static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
220 221
	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0xf),	/* InnerShr */
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),	/* FCSE */
222
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0),	/* AuxReg */
223 224 225 226 227
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),	/* TCM */
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),	/* ShareLvl */
	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0xf),	/* OuterShr */
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),	/* PMSA */
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),	/* VMSA */
228 229 230
	ARM64_FTR_END,
};

231
static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
232 233 234 235 236
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 36, 28, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
237 238 239
	/*
	 * We can instantiate multiple PMU instances with different levels
	 * of support.
240 241 242 243
	 */
	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
244 245 246
	ARM64_FTR_END,
};

247
static const struct arm64_ftr_bits ftr_mvfr2[] = {
248 249
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),		/* FPMisc */
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),		/* SIMDMisc */
250 251 252
	ARM64_FTR_END,
};

253
static const struct arm64_ftr_bits ftr_dczid[] = {
254 255
	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 4, 1, 1),		/* DZP */
	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),	/* BS */
256 257 258 259
	ARM64_FTR_END,
};


260
static const struct arm64_ftr_bits ftr_id_isar5[] = {
261 262 263 264 265 266
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0),
267 268 269
	ARM64_FTR_END,
};

270
static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
271
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),	/* ac2 */
272 273 274
	ARM64_FTR_END,
};

275
static const struct arm64_ftr_bits ftr_id_pfr0[] = {
276 277 278 279
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),		/* State3 */
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),		/* State2 */
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),		/* State1 */
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),		/* State0 */
280 281 282
	ARM64_FTR_END,
};

283
static const struct arm64_ftr_bits ftr_id_dfr0[] = {
284 285 286 287 288 289 290 291
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf),	/* PerfMon */
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
292 293 294
	ARM64_FTR_END,
};

295 296 297 298 299 300
static const struct arm64_ftr_bits ftr_zcr[] = {
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE,
		ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_SIZE, 0),	/* LEN */
	ARM64_FTR_END,
};

301 302 303 304 305 306
/*
 * Common ftr bits for a 32bit register with all hidden, strict
 * attributes, with 4bit feature fields and a default safe value of
 * 0. Covers the following 32bit registers:
 * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
 */
307
static const struct arm64_ftr_bits ftr_generic_32bits[] = {
308 309 310 311 312 313 314 315
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
316 317 318
	ARM64_FTR_END,
};

319 320
/* Table for a single 32bit feature value */
static const struct arm64_ftr_bits ftr_single32[] = {
321
	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 32, 0),
322 323 324
	ARM64_FTR_END,
};

325
static const struct arm64_ftr_bits ftr_raz[] = {
326 327 328
	ARM64_FTR_END,
};

329 330 331
#define ARM64_FTR_REG(id, table) {		\
	.sys_id = id,				\
	.reg = 	&(struct arm64_ftr_reg){	\
332 333
		.name = #id,			\
		.ftr_bits = &((table)[0]),	\
334
	}}
335

336 337 338 339
static const struct __ftr_reg_entry {
	u32			sys_id;
	struct arm64_ftr_reg 	*reg;
} arm64_ftr_regs[] = {
340 341 342 343

	/* Op1 = 0, CRn = 0, CRm = 1 */
	ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0),
	ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_generic_32bits),
344
	ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0),
345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
	ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0),
	ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits),
	ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits),
	ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits),

	/* Op1 = 0, CRn = 0, CRm = 2 */
	ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_generic_32bits),
	ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits),
	ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits),
	ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits),
	ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits),
	ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
	ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),

	/* Op1 = 0, CRn = 0, CRm = 3 */
	ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
	ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits),
	ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),

	/* Op1 = 0, CRn = 0, CRm = 4 */
	ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
366
	ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_raz),
367
	ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_raz),
368 369 370

	/* Op1 = 0, CRn = 0, CRm = 5 */
	ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
371
	ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_raz),
372 373 374

	/* Op1 = 0, CRn = 0, CRm = 6 */
	ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
375
	ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1),
376 377 378 379

	/* Op1 = 0, CRn = 0, CRm = 7 */
	ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
	ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
380
	ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
381

382 383 384
	/* Op1 = 0, CRn = 1, CRm = 2 */
	ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr),

385
	/* Op1 = 3, CRn = 0, CRm = 0 */
386
	{ SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
387 388 389
	ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),

	/* Op1 = 3, CRn = 14, CRm = 0 */
390
	ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_single32),
391 392 393 394
};

static int search_cmp_ftr_reg(const void *id, const void *regp)
{
395
	return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id;
396 397 398 399 400 401 402 403 404 405 406 407 408 409
}

/*
 * get_arm64_ftr_reg - Lookup a feature register entry using its
 * sys_reg() encoding. With the array arm64_ftr_regs sorted in the
 * ascending order of sys_id , we use binary search to find a matching
 * entry.
 *
 * returns - Upon success,  matching ftr_reg entry for id.
 *         - NULL on failure. It is upto the caller to decide
 *	     the impact of a failure.
 */
static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
{
410 411 412
	const struct __ftr_reg_entry *ret;

	ret = bsearch((const void *)(unsigned long)sys_id,
413 414 415 416
			arm64_ftr_regs,
			ARRAY_SIZE(arm64_ftr_regs),
			sizeof(arm64_ftr_regs[0]),
			search_cmp_ftr_reg);
417 418 419
	if (ret)
		return ret->reg;
	return NULL;
420 421
}

422 423
static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg,
			       s64 ftr_val)
424 425 426 427 428 429 430 431
{
	u64 mask = arm64_ftr_mask(ftrp);

	reg &= ~mask;
	reg |= (ftr_val << ftrp->shift) & mask;
	return reg;
}

432 433
static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
				s64 cur)
434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455
{
	s64 ret = 0;

	switch (ftrp->type) {
	case FTR_EXACT:
		ret = ftrp->safe_val;
		break;
	case FTR_LOWER_SAFE:
		ret = new < cur ? new : cur;
		break;
	case FTR_HIGHER_SAFE:
		ret = new > cur ? new : cur;
		break;
	default:
		BUG();
	}

	return ret;
}

static void __init sort_ftr_regs(void)
{
456 457 458 459 460
	int i;

	/* Check that the array is sorted so that we can do the binary search */
	for (i = 1; i < ARRAY_SIZE(arm64_ftr_regs); i++)
		BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id);
461 462 463 464 465
}

/*
 * Initialise the CPU feature register from Boot CPU values.
 * Also initiliases the strict_mask for the register.
466 467
 * Any bits that are not covered by an arm64_ftr_bits entry are considered
 * RES0 for the system-wide value, and must strictly match.
468 469 470 471 472
 */
static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
{
	u64 val = 0;
	u64 strict_mask = ~0x0ULL;
473
	u64 user_mask = 0;
474 475
	u64 valid_mask = 0;

476
	const struct arm64_ftr_bits *ftrp;
477 478 479 480 481
	struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg);

	BUG_ON(!reg);

	for (ftrp  = reg->ftr_bits; ftrp->width; ftrp++) {
482
		u64 ftr_mask = arm64_ftr_mask(ftrp);
483 484 485
		s64 ftr_new = arm64_ftr_value(ftrp, new);

		val = arm64_ftr_set_value(ftrp, val, ftr_new);
486 487

		valid_mask |= ftr_mask;
488
		if (!ftrp->strict)
489
			strict_mask &= ~ftr_mask;
490 491 492 493 494 495
		if (ftrp->visible)
			user_mask |= ftr_mask;
		else
			reg->user_val = arm64_ftr_set_value(ftrp,
							    reg->user_val,
							    ftrp->safe_val);
496
	}
497 498 499

	val &= valid_mask;

500 501
	reg->sys_val = val;
	reg->strict_mask = strict_mask;
502
	reg->user_mask = user_mask;
503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
}

void __init init_cpu_features(struct cpuinfo_arm64 *info)
{
	/* Before we start using the tables, make sure it is sorted */
	sort_ftr_regs();

	init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr);
	init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid);
	init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq);
	init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0);
	init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
	init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
	init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
	init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
	init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
519
	init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
520 521
	init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
	init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
522
	init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542

	if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
		init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
		init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
		init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
		init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
		init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
		init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
		init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
		init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
		init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
		init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
		init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
		init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
		init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
		init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
		init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
		init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
	}

543 544 545 546
	if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
		init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
		sve_init_vq_map();
	}
547 548
}

549
static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
550
{
551
	const struct arm64_ftr_bits *ftrp;
552 553 554 555 556 557 558 559 560 561 562 563 564 565

	for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
		s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val);
		s64 ftr_new = arm64_ftr_value(ftrp, new);

		if (ftr_cur == ftr_new)
			continue;
		/* Find a safe value */
		ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur);
		reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new);
	}

}

566
static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
567
{
568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
	struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);

	BUG_ON(!regp);
	update_cpu_ftr_reg(regp, val);
	if ((boot & regp->strict_mask) == (val & regp->strict_mask))
		return 0;
	pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n",
			regp->name, boot, cpu, val);
	return 1;
}

/*
 * Update system wide CPU feature registers with the values from a
 * non-boot CPU. Also performs SANITY checks to make sure that there
 * aren't any insane variations from that of the boot CPU.
 */
void update_cpu_features(int cpu,
			 struct cpuinfo_arm64 *info,
			 struct cpuinfo_arm64 *boot)
{
	int taint = 0;

	/*
	 * The kernel can handle differing I-cache policies, but otherwise
	 * caches should look identical. Userspace JITs will make use of
	 * *minLine.
	 */
	taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu,
				      info->reg_ctr, boot->reg_ctr);

	/*
	 * Userspace may perform DC ZVA instructions. Mismatched block sizes
	 * could result in too much or too little memory being zeroed if a
	 * process is preempted and migrated between CPUs.
	 */
	taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu,
				      info->reg_dczid, boot->reg_dczid);

	/* If different, timekeeping will be broken (especially with KVM) */
	taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu,
				      info->reg_cntfrq, boot->reg_cntfrq);

	/*
	 * The kernel uses self-hosted debug features and expects CPUs to
	 * support identical debug features. We presently need CTX_CMPs, WRPs,
	 * and BRPs to be identical.
	 * ID_AA64DFR1 is currently RES0.
	 */
	taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu,
				      info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0);
	taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu,
				      info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1);
	/*
	 * Even in big.LITTLE, processors should be identical instruction-set
	 * wise.
	 */
	taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu,
				      info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
	taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
				      info->reg_id_aa64isar1, boot->reg_id_aa64isar1);

	/*
	 * Differing PARange support is fine as long as all peripherals and
	 * memory are mapped within the minimum PARange of all CPUs.
	 * Linux should not care about secure memory.
	 */
	taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu,
				      info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
	taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
				      info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
638 639
	taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
				      info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
640 641 642 643 644 645 646 647 648 649

	/*
	 * EL3 is not our concern.
	 * ID_AA64PFR1 is currently RES0.
	 */
	taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
				      info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
	taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
				      info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);

650 651 652
	taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu,
				      info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0);

653
	/*
654 655
	 * If we have AArch32, we care about 32-bit features for compat.
	 * If the system doesn't support AArch32, don't update them.
656
	 */
657
	if (id_aa64pfr0_32bit_el0(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
658 659 660
		id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {

		taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
661
					info->reg_id_dfr0, boot->reg_id_dfr0);
662
		taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
663
					info->reg_id_isar0, boot->reg_id_isar0);
664
		taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
665
					info->reg_id_isar1, boot->reg_id_isar1);
666
		taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
667
					info->reg_id_isar2, boot->reg_id_isar2);
668
		taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
669
					info->reg_id_isar3, boot->reg_id_isar3);
670
		taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
671
					info->reg_id_isar4, boot->reg_id_isar4);
672
		taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
673 674
					info->reg_id_isar5, boot->reg_id_isar5);

675 676 677 678 679 680
		/*
		 * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
		 * ACTLR formats could differ across CPUs and therefore would have to
		 * be trapped for virtualization anyway.
		 */
		taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
681
					info->reg_id_mmfr0, boot->reg_id_mmfr0);
682
		taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
683
					info->reg_id_mmfr1, boot->reg_id_mmfr1);
684
		taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
685
					info->reg_id_mmfr2, boot->reg_id_mmfr2);
686
		taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
687
					info->reg_id_mmfr3, boot->reg_id_mmfr3);
688
		taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
689
					info->reg_id_pfr0, boot->reg_id_pfr0);
690
		taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
691
					info->reg_id_pfr1, boot->reg_id_pfr1);
692
		taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
693
					info->reg_mvfr0, boot->reg_mvfr0);
694
		taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
695
					info->reg_mvfr1, boot->reg_mvfr1);
696
		taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
697
					info->reg_mvfr2, boot->reg_mvfr2);
698
	}
699

700 701 702 703 704 705 706 707 708 709
	if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
		taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu,
					info->reg_zcr, boot->reg_zcr);

		/* Probe vector lengths, unless we already gave up on SVE */
		if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
		    !sys_caps_initialised)
			sve_update_vq_map();
	}

710 711 712 713
	/*
	 * Mismatched CPU features are a recipe for disaster. Don't even
	 * pretend to support them.
	 */
714 715 716 717
	if (taint) {
		pr_warn_once("Unsupported CPU feature variation detected.\n");
		add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
	}
718 719
}

720
u64 read_sanitised_ftr_reg(u32 id)
721 722 723 724 725 726 727
{
	struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id);

	/* We shouldn't get a request for an unsupported register */
	BUG_ON(!regp);
	return regp->sys_val;
}
728

729 730 731
#define read_sysreg_case(r)	\
	case r:		return read_sysreg_s(r)

732
/*
733
 * __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated.
734 735
 * Read the system register on the current CPU
 */
736
static u64 __read_sysreg_by_encoding(u32 sys_id)
737 738
{
	switch (sys_id) {
739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769
	read_sysreg_case(SYS_ID_PFR0_EL1);
	read_sysreg_case(SYS_ID_PFR1_EL1);
	read_sysreg_case(SYS_ID_DFR0_EL1);
	read_sysreg_case(SYS_ID_MMFR0_EL1);
	read_sysreg_case(SYS_ID_MMFR1_EL1);
	read_sysreg_case(SYS_ID_MMFR2_EL1);
	read_sysreg_case(SYS_ID_MMFR3_EL1);
	read_sysreg_case(SYS_ID_ISAR0_EL1);
	read_sysreg_case(SYS_ID_ISAR1_EL1);
	read_sysreg_case(SYS_ID_ISAR2_EL1);
	read_sysreg_case(SYS_ID_ISAR3_EL1);
	read_sysreg_case(SYS_ID_ISAR4_EL1);
	read_sysreg_case(SYS_ID_ISAR5_EL1);
	read_sysreg_case(SYS_MVFR0_EL1);
	read_sysreg_case(SYS_MVFR1_EL1);
	read_sysreg_case(SYS_MVFR2_EL1);

	read_sysreg_case(SYS_ID_AA64PFR0_EL1);
	read_sysreg_case(SYS_ID_AA64PFR1_EL1);
	read_sysreg_case(SYS_ID_AA64DFR0_EL1);
	read_sysreg_case(SYS_ID_AA64DFR1_EL1);
	read_sysreg_case(SYS_ID_AA64MMFR0_EL1);
	read_sysreg_case(SYS_ID_AA64MMFR1_EL1);
	read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
	read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
	read_sysreg_case(SYS_ID_AA64ISAR1_EL1);

	read_sysreg_case(SYS_CNTFRQ_EL0);
	read_sysreg_case(SYS_CTR_EL0);
	read_sysreg_case(SYS_DCZID_EL0);

770 771 772 773 774 775
	default:
		BUG();
		return 0;
	}
}

776 777
#include <linux/irqchip/arm-gic-v3.h>

778 779 780
static bool
feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
{
781
	int val = cpuid_feature_extract_field(reg, entry->field_pos, entry->sign);
782 783 784 785

	return val >= entry->min_field_value;
}

786
static bool
787
has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
788 789
{
	u64 val;
790

791 792
	WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
	if (scope == SCOPE_SYSTEM)
793
		val = read_sanitised_ftr_reg(entry->sys_reg);
794
	else
795
		val = __read_sysreg_by_encoding(entry->sys_reg);
796

797 798
	return feature_matches(val, entry);
}
799

800
static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
801 802 803
{
	bool has_sre;

804
	if (!has_cpuid_feature(entry, scope))
805 806 807 808 809 810 811 812 813 814
		return false;

	has_sre = gic_enable_sre();
	if (!has_sre)
		pr_warn_once("%s present but disabled by higher exception level\n",
			     entry->desc);

	return has_sre;
}

815
static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused)
816 817 818 819
{
	u32 midr = read_cpuid_id();

	/* Cavium ThunderX pass 1.x and 2.x */
820 821 822
	return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX,
		MIDR_CPU_VAR_REV(0, 0),
		MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
823 824
}

825
static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
826 827 828 829
{
	return is_kernel_in_hyp_mode();
}

830 831 832
static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry,
			   int __unused)
{
833
	phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
834 835 836 837 838 839 840 841 842

	/*
	 * Activate the lower HYP offset only if:
	 * - the idmap doesn't clash with it,
	 * - the kernel is not running at EL2.
	 */
	return idmap_addr > GENMASK(VA_BITS - 2, 0) && !is_kernel_in_hyp_mode();
}

843 844
static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused)
{
845
	u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
846 847 848 849 850

	return cpuid_feature_extract_signed_field(pfr0,
					ID_AA64PFR0_FP_SHIFT) < 0;
}

851 852 853 854 855 856
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */

static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
				int __unused)
{
857 858
	u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);

859 860 861 862 863 864 865 866 867 868 869
	/* Forced on command line? */
	if (__kpti_forced) {
		pr_info_once("kernel page table isolation forced %s by command line option\n",
			     __kpti_forced > 0 ? "ON" : "OFF");
		return __kpti_forced > 0;
	}

	/* Useful for KASLR robustness */
	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
		return true;

870 871 872
	/* Defer to CPU feature registers */
	return !cpuid_feature_extract_unsigned_field(pfr0,
						     ID_AA64PFR0_CSV3_SHIFT);
873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888
}

static int __init parse_kpti(char *str)
{
	bool enabled;
	int ret = strtobool(str, &enabled);

	if (ret)
		return ret;

	__kpti_forced = enabled ? 1 : -1;
	return 0;
}
__setup("kpti=", parse_kpti);
#endif	/* CONFIG_UNMAP_KERNEL_AT_EL0 */

889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904
static int cpu_copy_el2regs(void *__unused)
{
	/*
	 * Copy register values that aren't redirected by hardware.
	 *
	 * Before code patching, we only set tpidr_el1, all CPUs need to copy
	 * this value to tpidr_el2 before we patch the code. Once we've done
	 * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
	 * do anything here.
	 */
	if (!alternatives_applied)
		write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);

	return 0;
}

905
static const struct arm64_cpu_capabilities arm64_features[] = {
906 907 908
	{
		.desc = "GIC system register CPU interface",
		.capability = ARM64_HAS_SYSREG_GIC_CPUIF,
909
		.def_scope = SCOPE_SYSTEM,
910
		.matches = has_useable_gicv3_cpuif,
911 912
		.sys_reg = SYS_ID_AA64PFR0_EL1,
		.field_pos = ID_AA64PFR0_GIC_SHIFT,
913
		.sign = FTR_UNSIGNED,
914
		.min_field_value = 1,
915
	},
916 917 918 919
#ifdef CONFIG_ARM64_PAN
	{
		.desc = "Privileged Access Never",
		.capability = ARM64_HAS_PAN,
920
		.def_scope = SCOPE_SYSTEM,
921 922 923
		.matches = has_cpuid_feature,
		.sys_reg = SYS_ID_AA64MMFR1_EL1,
		.field_pos = ID_AA64MMFR1_PAN_SHIFT,
924
		.sign = FTR_UNSIGNED,
925 926 927 928
		.min_field_value = 1,
		.enable = cpu_enable_pan,
	},
#endif /* CONFIG_ARM64_PAN */
929 930 931 932
#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
	{
		.desc = "LSE atomic instructions",
		.capability = ARM64_HAS_LSE_ATOMICS,
933
		.def_scope = SCOPE_SYSTEM,
934 935 936
		.matches = has_cpuid_feature,
		.sys_reg = SYS_ID_AA64ISAR0_EL1,
		.field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
937
		.sign = FTR_UNSIGNED,
938 939 940
		.min_field_value = 2,
	},
#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
941 942 943
	{
		.desc = "Software prefetching using PRFM",
		.capability = ARM64_HAS_NO_HW_PREFETCH,
944
		.def_scope = SCOPE_SYSTEM,
945 946
		.matches = has_no_hw_prefetch,
	},
947 948 949 950
#ifdef CONFIG_ARM64_UAO
	{
		.desc = "User Access Override",
		.capability = ARM64_HAS_UAO,
951
		.def_scope = SCOPE_SYSTEM,
952 953 954 955
		.matches = has_cpuid_feature,
		.sys_reg = SYS_ID_AA64MMFR2_EL1,
		.field_pos = ID_AA64MMFR2_UAO_SHIFT,
		.min_field_value = 1,
956 957 958 959
		/*
		 * We rely on stop_machine() calling uao_thread_switch() to set
		 * UAO immediately after patching.
		 */
960 961
	},
#endif /* CONFIG_ARM64_UAO */
962 963 964
#ifdef CONFIG_ARM64_PAN
	{
		.capability = ARM64_ALT_PAN_NOT_UAO,
965
		.def_scope = SCOPE_SYSTEM,
966 967 968
		.matches = cpufeature_pan_not_uao,
	},
#endif /* CONFIG_ARM64_PAN */
969 970 971
	{
		.desc = "Virtualization Host Extensions",
		.capability = ARM64_HAS_VIRT_HOST_EXTN,
972
		.def_scope = SCOPE_SYSTEM,
973
		.matches = runs_at_el2,
974
		.enable = cpu_copy_el2regs,
975
	},
976 977 978
	{
		.desc = "32-bit EL0 Support",
		.capability = ARM64_HAS_32BIT_EL0,
979
		.def_scope = SCOPE_SYSTEM,
980 981 982 983 984 985
		.matches = has_cpuid_feature,
		.sys_reg = SYS_ID_AA64PFR0_EL1,
		.sign = FTR_UNSIGNED,
		.field_pos = ID_AA64PFR0_EL0_SHIFT,
		.min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
	},
986 987 988 989 990 991
	{
		.desc = "Reduced HYP mapping offset",
		.capability = ARM64_HYP_OFFSET_LOW,
		.def_scope = SCOPE_SYSTEM,
		.matches = hyp_offset_low,
	},
992 993
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
	{
994
		.desc = "Kernel page table isolation (KPTI)",
995 996 997 998 999
		.capability = ARM64_UNMAP_KERNEL_AT_EL0,
		.def_scope = SCOPE_SYSTEM,
		.matches = unmap_kernel_at_el0,
	},
#endif
1000 1001 1002 1003 1004 1005 1006
	{
		/* FP/SIMD is not implemented */
		.capability = ARM64_HAS_NO_FPSIMD,
		.def_scope = SCOPE_SYSTEM,
		.min_field_value = 0,
		.matches = has_no_fpsimd,
	},
R
Robin Murphy 已提交
1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017
#ifdef CONFIG_ARM64_PMEM
	{
		.desc = "Data cache clean to Point of Persistence",
		.capability = ARM64_HAS_DCPOP,
		.def_scope = SCOPE_SYSTEM,
		.matches = has_cpuid_feature,
		.sys_reg = SYS_ID_AA64ISAR1_EL1,
		.field_pos = ID_AA64ISAR1_DPB_SHIFT,
		.min_field_value = 1,
	},
#endif
1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
#ifdef CONFIG_ARM64_SVE
	{
		.desc = "Scalable Vector Extension",
		.capability = ARM64_SVE,
		.def_scope = SCOPE_SYSTEM,
		.sys_reg = SYS_ID_AA64PFR0_EL1,
		.sign = FTR_UNSIGNED,
		.field_pos = ID_AA64PFR0_SVE_SHIFT,
		.min_field_value = ID_AA64PFR0_SVE,
		.matches = has_cpuid_feature,
		.enable = sve_kernel_enable,
	},
#endif /* CONFIG_ARM64_SVE */
1031 1032 1033
	{},
};

1034
#define HWCAP_CAP(reg, field, s, min_value, type, cap)	\
1035 1036
	{							\
		.desc = #cap,					\
1037
		.def_scope = SCOPE_SYSTEM,			\
1038 1039 1040
		.matches = has_cpuid_feature,			\
		.sys_reg = reg,					\
		.field_pos = field,				\
1041
		.sign = s,					\
1042 1043 1044 1045 1046
		.min_field_value = min_value,			\
		.hwcap_type = type,				\
		.hwcap = cap,					\
	}

S
Suzuki K Poulose 已提交
1047
static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
1048 1049 1050 1051
	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL),
	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1),
	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2),
1052
	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_SHA512),
1053 1054
	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32),
	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS),
1055
	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDRDM),
1056 1057 1058 1059
	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA3),
	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM3),
	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4),
	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP),
1060
	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDFHM),
1061
	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
1062
	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
1063
	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
1064
	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
1065
	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_DCPOP),
1066
	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT),
1067
	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
1068
	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
1069 1070 1071
#ifdef CONFIG_ARM64_SVE
	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
#endif
1072 1073 1074 1075
	{},
};

static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
1076
#ifdef CONFIG_COMPAT
1077 1078 1079 1080 1081
	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
1082 1083 1084 1085
#endif
	{},
};

S
Suzuki K Poulose 已提交
1086
static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
{
	switch (cap->hwcap_type) {
	case CAP_HWCAP:
		elf_hwcap |= cap->hwcap;
		break;
#ifdef CONFIG_COMPAT
	case CAP_COMPAT_HWCAP:
		compat_elf_hwcap |= (u32)cap->hwcap;
		break;
	case CAP_COMPAT_HWCAP2:
		compat_elf_hwcap2 |= (u32)cap->hwcap;
		break;
#endif
	default:
		WARN_ON(1);
		break;
	}
}

/* Check if we have a particular HWCAP enabled */
S
Suzuki K Poulose 已提交
1107
static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
{
	bool rc;

	switch (cap->hwcap_type) {
	case CAP_HWCAP:
		rc = (elf_hwcap & cap->hwcap) != 0;
		break;
#ifdef CONFIG_COMPAT
	case CAP_COMPAT_HWCAP:
		rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0;
		break;
	case CAP_COMPAT_HWCAP2:
		rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0;
		break;
#endif
	default:
		WARN_ON(1);
		rc = false;
	}

	return rc;
}

1131
static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
1132
{
1133 1134
	/* We support emulation of accesses to CPU ID feature registers */
	elf_hwcap |= HWCAP_CPUID;
1135
	for (; hwcaps->matches; hwcaps++)
1136
		if (hwcaps->matches(hwcaps, hwcaps->def_scope))
1137
			cap_set_elf_hwcap(hwcaps);
1138 1139
}

1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
/*
 * Check if the current CPU has a given feature capability.
 * Should be called from non-preemptible context.
 */
static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
			       unsigned int cap)
{
	const struct arm64_cpu_capabilities *caps;

	if (WARN_ON(preemptible()))
		return false;

	for (caps = cap_array; caps->desc; caps++)
		if (caps->capability == cap &&
		    caps->matches &&
		    caps->matches(caps, SCOPE_LOCAL_CPU))
			return true;
	return false;
}

1160
void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
1161 1162
			    const char *info)
{
1163
	for (; caps->matches; caps++) {
1164
		if (!caps->matches(caps, caps->def_scope))
1165 1166
			continue;

1167 1168 1169
		if (!cpus_have_cap(caps->capability) && caps->desc)
			pr_info("%s %s\n", info, caps->desc);
		cpus_set_cap(caps->capability);
1170
	}
1171 1172 1173
}

/*
1174 1175
 * Run through the enabled capabilities and enable() it on all active
 * CPUs
1176
 */
1177
void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
1178
{
1179 1180 1181 1182 1183 1184 1185 1186 1187 1188
	for (; caps->matches; caps++) {
		unsigned int num = caps->capability;

		if (!cpus_have_cap(num))
			continue;

		/* Ensure cpus_have_const_cap(num) works */
		static_branch_enable(&cpu_hwcap_keys[num]);

		if (caps->enable) {
1189 1190 1191 1192 1193 1194
			/*
			 * Use stop_machine() as it schedules the work allowing
			 * us to modify PSTATE, instead of on_each_cpu() which
			 * uses an IPI, giving us a PSTATE that disappears when
			 * we return.
			 */
1195
			stop_machine(caps->enable, (void *)caps, cpu_online_mask);
1196 1197
		}
	}
1198 1199 1200
}

/*
1201 1202
 * Check for CPU features that are used in early boot
 * based on the Boot CPU value.
1203
 */
1204
static void check_early_cpu_features(void)
1205
{
1206
	verify_cpu_run_el();
1207
	verify_cpu_asid_bits();
1208
}
1209

1210 1211 1212 1213
static void
verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
{

1214 1215
	for (; caps->matches; caps++)
		if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) {
1216 1217 1218 1219 1220 1221 1222
			pr_crit("CPU%d: missing HWCAP: %s\n",
					smp_processor_id(), caps->desc);
			cpu_die_early();
		}
}

static void
1223
verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list)
1224
{
1225
	const struct arm64_cpu_capabilities *caps = caps_list;
1226
	for (; caps->matches; caps++) {
1227
		if (!cpus_have_cap(caps->capability))
1228 1229 1230 1231 1232
			continue;
		/*
		 * If the new CPU misses an advertised feature, we cannot proceed
		 * further, park the cpu.
		 */
1233
		if (!__this_cpu_has_cap(caps_list, caps->capability)) {
1234 1235 1236 1237 1238
			pr_crit("CPU%d: missing feature: %s\n",
					smp_processor_id(), caps->desc);
			cpu_die_early();
		}
		if (caps->enable)
1239
			caps->enable((void *)caps);
1240 1241 1242
	}
}

1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259
static void verify_sve_features(void)
{
	u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
	u64 zcr = read_zcr_features();

	unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK;
	unsigned int len = zcr & ZCR_ELx_LEN_MASK;

	if (len < safe_len || sve_verify_vq_map()) {
		pr_crit("CPU%d: SVE: required vector length(s) missing\n",
			smp_processor_id());
		cpu_die_early();
	}

	/* Add checks on other ZCR bits here if necessary */
}

1260 1261 1262 1263 1264 1265 1266 1267
/*
 * Run through the enabled system capabilities and enable() it on this CPU.
 * The capabilities were decided based on the available CPUs at the boot time.
 * Any new CPU should match the system wide status of the capability. If the
 * new CPU doesn't have a capability which the system now has enabled, we
 * cannot do anything to fix it up and could cause unexpected failures. So
 * we park the CPU.
 */
1268
static void verify_local_cpu_capabilities(void)
1269
{
1270 1271 1272
	verify_local_cpu_errata_workarounds();
	verify_local_cpu_features(arm64_features);
	verify_local_elf_hwcaps(arm64_elf_hwcaps);
1273

1274 1275
	if (system_supports_32bit_el0())
		verify_local_elf_hwcaps(compat_elf_hwcaps);
1276 1277 1278

	if (system_supports_sve())
		verify_sve_features();
1279 1280 1281

	if (system_uses_ttbr0_pan())
		pr_info("Emulating Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
1282
}
1283

1284 1285 1286 1287 1288 1289
void check_local_cpu_capabilities(void)
{
	/*
	 * All secondary CPUs should conform to the early CPU features
	 * in use by the kernel based on boot CPU.
	 */
1290 1291
	check_early_cpu_features();

1292
	/*
1293 1294 1295 1296
	 * If we haven't finalised the system capabilities, this CPU gets
	 * a chance to update the errata work arounds.
	 * Otherwise, this CPU should verify that it has all the system
	 * advertised capabilities.
1297 1298
	 */
	if (!sys_caps_initialised)
1299 1300 1301
		update_cpu_errata_workarounds();
	else
		verify_local_cpu_capabilities();
1302 1303
}

1304
static void __init setup_feature_capabilities(void)
1305
{
1306 1307
	update_cpu_capabilities(arm64_features, "detected feature:");
	enable_cpu_capabilities(arm64_features);
1308 1309
}

1310 1311 1312 1313 1314 1315 1316 1317
DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
EXPORT_SYMBOL(arm64_const_caps_ready);

static void __init mark_const_caps_ready(void)
{
	static_branch_enable(&arm64_const_caps_ready);
}

1318 1319 1320 1321 1322 1323 1324 1325
extern const struct arm64_cpu_capabilities arm64_errata[];

bool this_cpu_has_cap(unsigned int cap)
{
	return (__this_cpu_has_cap(arm64_features, cap) ||
		__this_cpu_has_cap(arm64_errata, cap));
}

1326
void __init setup_cpu_features(void)
1327
{
1328 1329 1330
	u32 cwg;
	int cls;

1331 1332
	/* Set the CPU feature capabilies */
	setup_feature_capabilities();
1333
	enable_errata_workarounds();
1334
	mark_const_caps_ready();
1335
	setup_elf_hwcaps(arm64_elf_hwcaps);
1336 1337 1338

	if (system_supports_32bit_el0())
		setup_elf_hwcaps(compat_elf_hwcaps);
1339

1340 1341
	sve_setup();

1342 1343 1344
	/* Advertise that we have computed the system capabilities */
	set_sys_caps_initialised();

1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355
	/*
	 * Check for sane CTR_EL0.CWG value.
	 */
	cwg = cache_type_cwg();
	cls = cache_line_size();
	if (!cwg)
		pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
			cls);
	if (L1_CACHE_BYTES < cls)
		pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
			L1_CACHE_BYTES, cls);
1356
}
1357 1358

static bool __maybe_unused
1359
cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
1360
{
1361
	return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
1362
}
1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438

/*
 * We emulate only the following system register space.
 * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 4 - 7]
 * See Table C5-6 System instruction encodings for System register accesses,
 * ARMv8 ARM(ARM DDI 0487A.f) for more details.
 */
static inline bool __attribute_const__ is_emulated(u32 id)
{
	return (sys_reg_Op0(id) == 0x3 &&
		sys_reg_CRn(id) == 0x0 &&
		sys_reg_Op1(id) == 0x0 &&
		(sys_reg_CRm(id) == 0 ||
		 ((sys_reg_CRm(id) >= 4) && (sys_reg_CRm(id) <= 7))));
}

/*
 * With CRm == 0, reg should be one of :
 * MIDR_EL1, MPIDR_EL1 or REVIDR_EL1.
 */
static inline int emulate_id_reg(u32 id, u64 *valp)
{
	switch (id) {
	case SYS_MIDR_EL1:
		*valp = read_cpuid_id();
		break;
	case SYS_MPIDR_EL1:
		*valp = SYS_MPIDR_SAFE_VAL;
		break;
	case SYS_REVIDR_EL1:
		/* IMPLEMENTATION DEFINED values are emulated with 0 */
		*valp = 0;
		break;
	default:
		return -EINVAL;
	}

	return 0;
}

static int emulate_sys_reg(u32 id, u64 *valp)
{
	struct arm64_ftr_reg *regp;

	if (!is_emulated(id))
		return -EINVAL;

	if (sys_reg_CRm(id) == 0)
		return emulate_id_reg(id, valp);

	regp = get_arm64_ftr_reg(id);
	if (regp)
		*valp = arm64_ftr_reg_user_value(regp);
	else
		/*
		 * The untracked registers are either IMPLEMENTATION DEFINED
		 * (e.g, ID_AFR0_EL1) or reserved RAZ.
		 */
		*valp = 0;
	return 0;
}

static int emulate_mrs(struct pt_regs *regs, u32 insn)
{
	int rc;
	u32 sys_reg, dst;
	u64 val;

	/*
	 * sys_reg values are defined as used in mrs/msr instruction.
	 * shift the imm value to get the encoding.
	 */
	sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5;
	rc = emulate_sys_reg(sys_reg, &val);
	if (!rc) {
		dst = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
1439
		pt_regs_write_reg(regs, dst, val);
1440
		arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459
	}

	return rc;
}

static struct undef_hook mrs_hook = {
	.instr_mask = 0xfff00000,
	.instr_val  = 0xd5300000,
	.pstate_mask = COMPAT_PSR_MODE_MASK,
	.pstate_val = PSR_MODE_EL0t,
	.fn = emulate_mrs,
};

static int __init enable_mrs_emulation(void)
{
	register_undef_hook(&mrs_hook);
	return 0;
}

1460
core_initcall(enable_mrs_emulation);