reset.c 10.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
M
Marc Zyngier 已提交
2 3 4 5 6 7 8 9 10 11
/*
 * Copyright (C) 2012,2013 - ARM Ltd
 * Author: Marc Zyngier <marc.zyngier@arm.com>
 *
 * Derived from arch/arm/kvm/reset.c
 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
 */

#include <linux/errno.h>
12
#include <linux/kernel.h>
M
Marc Zyngier 已提交
13 14
#include <linux/kvm_host.h>
#include <linux/kvm.h>
15
#include <linux/hw_breakpoint.h>
16
#include <linux/slab.h>
17
#include <linux/string.h>
18
#include <linux/types.h>
M
Marc Zyngier 已提交
19

M
Marc Zyngier 已提交
20 21
#include <kvm/arm_arch_timer.h>

22
#include <asm/cpufeature.h>
M
Marc Zyngier 已提交
23
#include <asm/cputype.h>
24
#include <asm/fpsimd.h>
M
Marc Zyngier 已提交
25 26
#include <asm/ptrace.h>
#include <asm/kvm_arm.h>
27
#include <asm/kvm_asm.h>
28
#include <asm/kvm_emulate.h>
29
#include <asm/kvm_mmu.h>
30
#include <asm/virt.h>
M
Marc Zyngier 已提交
31

32 33 34
/* Maximum phys_shift supported for any VM on this host */
static u32 kvm_ipa_limit;

M
Marc Zyngier 已提交
35 36 37
/*
 * ARMv8 Reset Values
 */
38 39
#define VCPU_RESET_PSTATE_EL1	(PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT | \
				 PSR_F_BIT | PSR_D_BIT)
M
Marc Zyngier 已提交
40

41 42
#define VCPU_RESET_PSTATE_SVC	(PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | \
				 PSR_AA32_I_BIT | PSR_AA32_F_BIT)
43

44 45
unsigned int kvm_sve_max_vl;

46
int kvm_arm_init_sve(void)
47 48
{
	if (system_supports_sve()) {
49
		kvm_sve_max_vl = sve_max_virtualisable_vl();
50 51 52 53 54 55 56 57 58 59 60 61 62 63

		/*
		 * The get_sve_reg()/set_sve_reg() ioctl interface will need
		 * to be extended with multiple register slice support in
		 * order to support vector lengths greater than
		 * SVE_VL_ARCH_MAX:
		 */
		if (WARN_ON(kvm_sve_max_vl > SVE_VL_ARCH_MAX))
			kvm_sve_max_vl = SVE_VL_ARCH_MAX;

		/*
		 * Don't even try to make use of vector lengths that
		 * aren't available on all CPUs, for now:
		 */
64
		if (kvm_sve_max_vl < sve_max_vl())
65 66 67 68 69 70 71
			pr_warn("KVM: SVE vector length for guests limited to %u bytes\n",
				kvm_sve_max_vl);
	}

	return 0;
}

72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
{
	if (!system_supports_sve())
		return -EINVAL;

	vcpu->arch.sve_max_vl = kvm_sve_max_vl;

	/*
	 * Userspace can still customize the vector lengths by writing
	 * KVM_REG_ARM64_SVE_VLS.  Allocation is deferred until
	 * kvm_arm_vcpu_finalize(), which freezes the configuration.
	 */
	vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_SVE;

	return 0;
}

89 90 91 92 93 94 95 96
/*
 * Finalize vcpu's maximum SVE vector length, allocating
 * vcpu->arch.sve_state as necessary.
 */
static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
{
	void *buf;
	unsigned int vl;
97 98
	size_t reg_sz;
	int ret;
99 100 101 102

	vl = vcpu->arch.sve_max_vl;

	/*
F
Fuad Tabba 已提交
103
	 * Responsibility for these properties is shared between
104
	 * kvm_arm_init_sve(), kvm_vcpu_enable_sve() and
105 106
	 * set_sve_vls().  Double-check here just to be sure:
	 */
107
	if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl() ||
108 109 110
		    vl > SVE_VL_ARCH_MAX))
		return -EIO;

111 112
	reg_sz = vcpu_sve_state_size(vcpu);
	buf = kzalloc(reg_sz, GFP_KERNEL_ACCOUNT);
113 114 115
	if (!buf)
		return -ENOMEM;

116
	ret = kvm_share_hyp(buf, buf + reg_sz);
117 118 119 120 121
	if (ret) {
		kfree(buf);
		return ret;
	}
	
122 123 124 125 126
	vcpu->arch.sve_state = buf;
	vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED;
	return 0;
}

127
int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
128
{
129
	switch (feature) {
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
	case KVM_ARM_VCPU_SVE:
		if (!vcpu_has_sve(vcpu))
			return -EINVAL;

		if (kvm_arm_vcpu_sve_finalized(vcpu))
			return -EPERM;

		return kvm_vcpu_finalize_sve(vcpu);
	}

	return -EINVAL;
}

bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
{
	if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu))
		return false;

	return true;
}

151
void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
152
{
153 154 155 156 157 158 159
	void *sve_state = vcpu->arch.sve_state;

	kvm_vcpu_unshare_task_fp(vcpu);
	kvm_unshare_hyp(vcpu, vcpu + 1);
	if (sve_state)
		kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu));
	kfree(sve_state);
160 161
}

162 163 164 165 166 167
static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
{
	if (vcpu_has_sve(vcpu))
		memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu));
}

168 169 170 171
static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
{
	/*
	 * For now make sure that both address/generic pointer authentication
172 173
	 * features are requested by the userspace together and the system
	 * supports these capabilities.
174 175
	 */
	if (!test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
176 177
	    !test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features) ||
	    !system_has_full_ptr_auth())
178 179 180 181 182 183
		return -EINVAL;

	vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_PTRAUTH;
	return 0;
}

184 185 186 187 188 189 190 191 192 193
static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
{
	struct kvm_vcpu *tmp;
	bool is32bit;
	int i;

	is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
	if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
		return false;

194 195 196 197
	/* MTE is incompatible with AArch32 */
	if (kvm_has_mte(vcpu->kvm) && is32bit)
		return false;

198 199 200 201 202 203 204 205 206
	/* Check that the vcpus are either all 32bit or all 64bit */
	kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
		if (vcpu_has_feature(tmp, KVM_ARM_VCPU_EL1_32BIT) != is32bit)
			return false;
	}

	return true;
}

M
Marc Zyngier 已提交
207 208 209 210
/**
 * kvm_reset_vcpu - sets core registers and sys_regs to reset value
 * @vcpu: The VCPU pointer
 *
211 212 213
 * This function sets the registers on the virtual CPU struct to their
 * architecturally defined reset values, except for registers whose reset is
 * deferred until kvm_arm_vcpu_finalize().
214 215 216 217 218
 *
 * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT
 * ioctl or as part of handling a request issued by another VCPU in the PSCI
 * handling code.  In the first case, the VCPU will not be loaded, and in the
 * second case the VCPU will be loaded.  Because this function operates purely
F
Fuad Tabba 已提交
219
 * on the memory-backed values of system registers, we want to do a full put if
220 221 222 223
 * we were loaded (handling a request) and load the values back at the end of
 * the function.  Otherwise we leave the state alone.  In both cases, we
 * disable preemption around the vcpu reset as we would otherwise race with
 * preempt notifiers which also call put/load.
M
Marc Zyngier 已提交
224 225 226
 */
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
{
227
	struct vcpu_reset_state reset_state;
228
	int ret;
229
	bool loaded;
230
	u32 pstate;
231

232 233 234 235 236
	mutex_lock(&vcpu->kvm->lock);
	reset_state = vcpu->arch.reset_state;
	WRITE_ONCE(vcpu->arch.reset_state.reset, false);
	mutex_unlock(&vcpu->kvm->lock);

237 238 239
	/* Reset PMU outside of the non-preemptible section */
	kvm_pmu_vcpu_reset(vcpu);

240 241 242 243
	preempt_disable();
	loaded = (vcpu->cpu != -1);
	if (loaded)
		kvm_arch_vcpu_put(vcpu);
M
Marc Zyngier 已提交
244

245 246 247 248 249 250 251 252 253 254
	if (!kvm_arm_vcpu_sve_finalized(vcpu)) {
		if (test_bit(KVM_ARM_VCPU_SVE, vcpu->arch.features)) {
			ret = kvm_vcpu_enable_sve(vcpu);
			if (ret)
				goto out;
		}
	} else {
		kvm_vcpu_reset_sve(vcpu);
	}

255 256
	if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
	    test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) {
257 258
		if (kvm_vcpu_enable_ptrauth(vcpu)) {
			ret = -EINVAL;
259
			goto out;
260
		}
261 262
	}

263 264 265 266 267
	if (!vcpu_allowed_register_width(vcpu)) {
		ret = -EINVAL;
		goto out;
	}

M
Marc Zyngier 已提交
268 269
	switch (vcpu->arch.target) {
	default:
270
		if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
271
			pstate = VCPU_RESET_PSTATE_SVC;
272
		} else {
273
			pstate = VCPU_RESET_PSTATE_EL1;
274 275
		}

276 277 278 279
		if (kvm_vcpu_has_pmu(vcpu) && !kvm_arm_support_pmu_v3()) {
			ret = -EINVAL;
			goto out;
		}
M
Marc Zyngier 已提交
280 281 282 283
		break;
	}

	/* Reset core registers */
284
	memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
285 286 287 288 289
	memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
	vcpu->arch.ctxt.spsr_abt = 0;
	vcpu->arch.ctxt.spsr_und = 0;
	vcpu->arch.ctxt.spsr_irq = 0;
	vcpu->arch.ctxt.spsr_fiq = 0;
290
	vcpu_gp_regs(vcpu)->pstate = pstate;
M
Marc Zyngier 已提交
291 292 293 294

	/* Reset system registers */
	kvm_reset_sys_regs(vcpu);

295 296 297 298
	/*
	 * Additional reset state handling that PSCI may have imposed on us.
	 * Must be done after all the sys_reg reset.
	 */
299 300
	if (reset_state.reset) {
		unsigned long target_pc = reset_state.pc;
301 302 303 304 305 306 307 308

		/* Gracefully handle Thumb2 entry point */
		if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
			target_pc &= ~1UL;
			vcpu_set_thumb(vcpu);
		}

		/* Propagate caller endianness */
309
		if (reset_state.be)
310 311 312
			kvm_vcpu_set_be(vcpu);

		*vcpu_pc(vcpu) = target_pc;
313
		vcpu_set_reg(vcpu, 0, reset_state.r0);
314 315
	}

M
Marc Zyngier 已提交
316
	/* Reset timer */
317 318 319 320 321 322
	ret = kvm_timer_vcpu_reset(vcpu);
out:
	if (loaded)
		kvm_arch_vcpu_load(vcpu, smp_processor_id());
	preempt_enable();
	return ret;
M
Marc Zyngier 已提交
323
}
324

325 326 327 328 329
u32 get_kvm_ipa_limit(void)
{
	return kvm_ipa_limit;
}

330
int kvm_set_ipa_limit(void)
331
{
332
	unsigned int parange;
333
	u64 mmfr0;
334

335 336 337
	mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
	parange = cpuid_feature_extract_unsigned_field(mmfr0,
				ID_AA64MMFR0_PARANGE_SHIFT);
338 339 340 341 342 343 344 345
	/*
	 * IPA size beyond 48 bits could not be supported
	 * on either 4K or 16K page size. Hence let's cap
	 * it to 48 bits, in case it's reported as larger
	 * on the system.
	 */
	if (PAGE_SIZE != SZ_64K)
		parange = min(parange, (unsigned int)ID_AA64MMFR0_PARANGE_48);
346 347 348 349 350

	/*
	 * Check with ARMv8.5-GTG that our PAGE_SIZE is supported at
	 * Stage-2. If not, things will stop very quickly.
	 */
351
	switch (cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_TGRAN_2_SHIFT)) {
352
	case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE:
353 354
		kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
		return -EINVAL;
355
	case ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT:
356 357
		kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n");
		break;
358
	case ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX:
359 360
		kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
		break;
361 362 363
	default:
		kvm_err("Unsupported value for TGRAN_2, giving up\n");
		return -EINVAL;
364 365
	}

366
	kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift(parange);
367 368 369
	kvm_info("IPA Size Limit: %d bits%s\n", kvm_ipa_limit,
		 ((kvm_ipa_limit < KVM_PHYS_SHIFT) ?
		  " (Reduced IPA size, limited VM/VMM compatibility)" : ""));
370 371

	return 0;
372 373
}

374
int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
375
{
376 377
	u64 mmfr0, mmfr1;
	u32 phys_shift;
378

379
	if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
380
		return -EINVAL;
381

382 383 384
	phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type);
	if (phys_shift) {
		if (phys_shift > kvm_ipa_limit ||
385
		    phys_shift < ARM64_MIN_PARANGE_BITS)
386 387 388
			return -EINVAL;
	} else {
		phys_shift = KVM_PHYS_SHIFT;
389 390 391 392 393
		if (phys_shift > kvm_ipa_limit) {
			pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n",
				     current->comm);
			return -EINVAL;
		}
394 395
	}

396
	mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
397 398
	mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
	kvm->arch.vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift);
399

400 401
	return 0;
}