switch.c 14.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * Copyright (C) 2015 - ARM Ltd
 * Author: Marc Zyngier <marc.zyngier@arm.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

18
#include <linux/types.h>
19
#include <linux/jump_label.h>
20
#include <uapi/linux/psci.h>
21

22 23
#include <kvm/arm_psci.h>

24
#include <asm/kvm_asm.h>
25
#include <asm/kvm_emulate.h>
26
#include <asm/kvm_hyp.h>
27
#include <asm/kvm_mmu.h>
28
#include <asm/fpsimd.h>
29
#include <asm/debug-monitors.h>
30

31 32 33 34 35
static bool __hyp_text __fpsimd_enabled_nvhe(void)
{
	return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP);
}

36
static bool fpsimd_enabled_vhe(void)
37 38 39 40
{
	return !!(read_sysreg(cpacr_el1) & CPACR_EL1_FPEN);
}

41 42 43 44 45 46 47 48 49
/* Save the 32-bit only FPSIMD system register state */
static void __hyp_text __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
{
	if (!vcpu_el1_is_32bit(vcpu))
		return;

	vcpu->arch.ctxt.sys_regs[FPEXC32_EL2] = read_sysreg(fpexc32_el2);
}

50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
static void __hyp_text __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
{
	/*
	 * We are about to set CPTR_EL2.TFP to trap all floating point
	 * register accesses to EL2, however, the ARM ARM clearly states that
	 * traps are only taken to EL2 if the operation would not otherwise
	 * trap to EL1.  Therefore, always make sure that for 32-bit guests,
	 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
	 * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
	 * it will cause an exception.
	 */
	if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) {
		write_sysreg(1 << 30, fpexc32_el2);
		isb();
	}
}

static void __hyp_text __activate_traps_common(struct kvm_vcpu *vcpu)
{
	/* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
	write_sysreg(1 << 15, hstr_el2);

	/*
	 * Make sure we trap PMU access from EL0 to EL2. Also sanitize
	 * PMSELR_EL0 to make sure it never contains the cycle
	 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
	 * EL1 instead of being trapped to EL2.
	 */
	write_sysreg(0, pmselr_el0);
	write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
	write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
}

static void __hyp_text __deactivate_traps_common(void)
{
	write_sysreg(0, hstr_el2);
	write_sysreg(0, pmuserenr_el0);
}

89
static void activate_traps_vhe(struct kvm_vcpu *vcpu)
90 91 92 93 94
{
	u64 val;

	val = read_sysreg(cpacr_el1);
	val |= CPACR_EL1_TTA;
95
	val &= ~(CPACR_EL1_FPEN | CPACR_EL1_ZEN);
96 97
	write_sysreg(val, cpacr_el1);

98
	write_sysreg(kvm_get_hyp_vector(), vbar_el1);
99 100
}

101
static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
102 103 104
{
	u64 val;

105 106
	__activate_traps_common(vcpu);

107
	val = CPTR_EL2_DEFAULT;
108
	val |= CPTR_EL2_TTA | CPTR_EL2_TFP | CPTR_EL2_TZ;
109 110 111
	write_sysreg(val, cptr_el2);
}

112 113
static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
{
114
	u64 hcr = vcpu->arch.hcr_el2;
115

116
	write_sysreg(hcr, hcr_el2);
117

118
	if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
119 120
		write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);

121
	__activate_traps_fpsimd32(vcpu);
122 123 124 125
	if (has_vhe())
		activate_traps_vhe(vcpu);
	else
		__activate_traps_nvhe(vcpu);
126
}
127

128
static void deactivate_traps_vhe(void)
129 130 131
{
	extern char vectors[];	/* kernel exception vectors */
	write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
132
	write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
133
	write_sysreg(vectors, vbar_el1);
134 135
}

136
static void __hyp_text __deactivate_traps_nvhe(void)
137
{
138 139
	u64 mdcr_el2 = read_sysreg(mdcr_el2);

140 141
	__deactivate_traps_common();

142 143 144 145
	mdcr_el2 &= MDCR_EL2_HPMN_MASK;
	mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;

	write_sysreg(mdcr_el2, mdcr_el2);
146
	write_sysreg(HCR_RW, hcr_el2);
147 148 149 150 151
	write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
}

static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
{
152 153 154 155 156 157 158 159 160
	/*
	 * If we pended a virtual abort, preserve it until it gets
	 * cleared. See D1.14.3 (Virtual Interrupts) for details, but
	 * the crucial bit is "On taking a vSError interrupt,
	 * HCR_EL2.VSE is cleared to 0."
	 */
	if (vcpu->arch.hcr_el2 & HCR_VSE)
		vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);

161 162 163 164
	if (has_vhe())
		deactivate_traps_vhe();
	else
		__deactivate_traps_nvhe();
165 166
}

167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
void activate_traps_vhe_load(struct kvm_vcpu *vcpu)
{
	__activate_traps_common(vcpu);
}

void deactivate_traps_vhe_put(void)
{
	u64 mdcr_el2 = read_sysreg(mdcr_el2);

	mdcr_el2 &= MDCR_EL2_HPMN_MASK |
		    MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT |
		    MDCR_EL2_TPMS;

	write_sysreg(mdcr_el2, mdcr_el2);

	__deactivate_traps_common();
}

185
static void __hyp_text __activate_vm(struct kvm *kvm)
186 187 188 189 190 191 192 193 194
{
	write_sysreg(kvm->arch.vttbr, vttbr_el2);
}

static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
{
	write_sysreg(0, vttbr_el2);
}

195 196
/* Save VGICv3 state on non-VHE systems */
static void __hyp_text __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
197
{
198
	if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
199
		__vgic_v3_save_state(vcpu);
200 201
		__vgic_v3_deactivate_traps(vcpu);
	}
202 203
}

204 205
/* Restore VGICv3 state on non_VEH systems */
static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
206
{
207 208
	if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
		__vgic_v3_activate_traps(vcpu);
209
		__vgic_v3_restore_state(vcpu);
210
	}
211 212
}

213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
static bool __hyp_text __true_value(void)
{
	return true;
}

static bool __hyp_text __false_value(void)
{
	return false;
}

static hyp_alternate_select(__check_arm_834220,
			    __false_value, __true_value,
			    ARM64_WORKAROUND_834220);

static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
{
	u64 par, tmp;

	/*
	 * Resolve the IPA the hard way using the guest VA.
	 *
	 * Stage-1 translation already validated the memory access
	 * rights. As such, we can use the EL1 translation regime, and
	 * don't have to distinguish between EL0 and EL1 access.
	 *
	 * We do need to save/restore PAR_EL1 though, as we haven't
	 * saved the guest context yet, and we may return early...
	 */
	par = read_sysreg(par_el1);
	asm volatile("at s1e1r, %0" : : "r" (far));
	isb();

	tmp = read_sysreg(par_el1);
	write_sysreg(par, par_el1);

	if (unlikely(tmp & 1))
		return false; /* Translation failed, back to guest */

	/* Convert PAR to HPFAR format */
	*hpfar = ((tmp >> 12) & ((1UL << 36) - 1)) << 4;
	return true;
}

static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
{
258 259
	u8 ec;
	u64 esr;
260 261
	u64 hpfar, far;

262 263
	esr = vcpu->arch.fault.esr_el2;
	ec = ESR_ELx_EC(esr);
264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293

	if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
		return true;

	far = read_sysreg_el2(far);

	/*
	 * The HPFAR can be invalid if the stage 2 fault did not
	 * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
	 * bit is clear) and one of the two following cases are true:
	 *   1. The fault was due to a permission fault
	 *   2. The processor carries errata 834220
	 *
	 * Therefore, for all non S1PTW faults where we either have a
	 * permission fault or the errata workaround is enabled, we
	 * resolve the IPA using the AT instruction.
	 */
	if (!(esr & ESR_ELx_S1PTW) &&
	    (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
		if (!__translate_far_to_hpfar(far, &hpfar))
			return false;
	} else {
		hpfar = read_sysreg(hpfar_el2);
	}

	vcpu->arch.fault.far_el2 = far;
	vcpu->arch.fault.hpfar_el2 = hpfar;
	return true;
}

294 295 296 297 298
/* Skip an instruction which has been emulated. Returns true if
 * execution can continue or false if we need to exit hyp mode because
 * single-step was in effect.
 */
static bool __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
299 300 301 302 303 304 305 306 307 308 309 310
{
	*vcpu_pc(vcpu) = read_sysreg_el2(elr);

	if (vcpu_mode_is_32bit(vcpu)) {
		vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr);
		kvm_skip_instr32(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
		write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr);
	} else {
		*vcpu_pc(vcpu) += 4;
	}

	write_sysreg_el2(*vcpu_pc(vcpu), elr);
311 312 313 314 315 316 317 318

	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
		vcpu->arch.fault.esr_el2 =
			(ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT) | 0x22;
		return false;
	} else {
		return true;
	}
319 320
}

321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
void __hyp_text __hyp_switch_fpsimd(u64 esr __always_unused,
				    struct kvm_vcpu *vcpu)
{
	kvm_cpu_context_t *host_ctxt;

	if (has_vhe())
		write_sysreg(read_sysreg(cpacr_el1) | CPACR_EL1_FPEN,
			     cpacr_el1);
	else
		write_sysreg(read_sysreg(cptr_el2) & ~(u64)CPTR_EL2_TFP,
			     cptr_el2);

	isb();

	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
	__fpsimd_save_state(&host_ctxt->gp_regs.fp_regs);
	__fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs);

	/* Skip restoring fpexc32 for AArch64 guests */
	if (!(read_sysreg(hcr_el2) & HCR_RW))
		write_sysreg(vcpu->arch.ctxt.sys_regs[FPEXC32_EL2],
			     fpexc32_el2);
}

345 346 347 348 349 350
/*
 * Return true when we were able to fixup the guest exit and should return to
 * the guest, false when we should restore the host state and return to the
 * main run loop.
 */
static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
351
{
352
	if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
353
		vcpu->arch.fault.esr_el2 = read_sysreg_el2(esr);
354

355 356 357 358 359 360
	/*
	 * We're using the raw exception code in order to only process
	 * the trap if no SError is pending. We will come back to the
	 * same PC once the SError has been injected, and replay the
	 * trapping instruction.
	 */
361 362
	if (*exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
		return true;
363

364
	if (static_branch_unlikely(&vgic_v2_cpuif_trap) &&
365
	    *exit_code == ARM_EXCEPTION_TRAP) {
366 367 368 369 370 371 372 373
		bool valid;

		valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
			kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
			kvm_vcpu_dabt_isvalid(vcpu) &&
			!kvm_vcpu_dabt_isextabt(vcpu) &&
			!kvm_vcpu_dabt_iss1tw(vcpu);

374 375 376 377
		if (valid) {
			int ret = __vgic_v2_perform_cpuif_access(vcpu);

			if (ret == 1) {
378
				if (__skip_instr(vcpu))
379
					return true;
380
				else
381
					*exit_code = ARM_EXCEPTION_TRAP;
382 383 384
			}

			if (ret == -1) {
385 386 387 388 389 390 391 392
				/* Promote an illegal access to an
				 * SError. If we would be returning
				 * due to single-step clear the SS
				 * bit so handle_exit knows what to
				 * do after dealing with the error.
				 */
				if (!__skip_instr(vcpu))
					*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
393
				*exit_code = ARM_EXCEPTION_EL1_SERROR;
394
			}
395 396 397
		}
	}

398
	if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
399
	    *exit_code == ARM_EXCEPTION_TRAP &&
400 401 402 403 404
	    (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 ||
	     kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) {
		int ret = __vgic_v3_perform_cpuif_access(vcpu);

		if (ret == 1) {
405
			if (__skip_instr(vcpu))
406
				return true;
407
			else
408
				*exit_code = ARM_EXCEPTION_TRAP;
409 410 411
		}
	}

412 413 414 415
	/* Return to the host kernel and handle the exit */
	return false;
}

416 417 418 419 420 421 422 423
/* Switch to the guest for VHE systems running in EL2 */
int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
{
	struct kvm_cpu_context *host_ctxt;
	struct kvm_cpu_context *guest_ctxt;
	bool fp_enabled;
	u64 exit_code;

424
	host_ctxt = vcpu->arch.host_cpu_context;
425 426 427
	host_ctxt->__hyp_running_vcpu = vcpu;
	guest_ctxt = &vcpu->arch.ctxt;

428
	sysreg_save_host_state_vhe(host_ctxt);
429 430

	__activate_traps(vcpu);
431
	__activate_vm(vcpu->kvm);
432

433
	sysreg_restore_guest_state_vhe(guest_ctxt);
434 435 436 437 438 439 440 441 442
	__debug_switch_to_guest(vcpu);

	do {
		/* Jump in the fire! */
		exit_code = __guest_enter(vcpu, host_ctxt);

		/* And we're baaack! */
	} while (fixup_guest_exit(vcpu, &exit_code));

443
	fp_enabled = fpsimd_enabled_vhe();
444

445
	sysreg_save_guest_state_vhe(guest_ctxt);
446 447 448

	__deactivate_traps(vcpu);

449
	sysreg_restore_host_state_vhe(host_ctxt);
450 451 452 453

	if (fp_enabled) {
		__fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs);
		__fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs);
454
		__fpsimd_save_fpexc32(vcpu);
455 456 457 458 459 460 461 462 463
	}

	__debug_switch_to_host(vcpu);

	return exit_code;
}

/* Switch to the guest for legacy non-VHE systems */
int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
464 465 466 467 468 469 470 471 472 473 474 475
{
	struct kvm_cpu_context *host_ctxt;
	struct kvm_cpu_context *guest_ctxt;
	bool fp_enabled;
	u64 exit_code;

	vcpu = kern_hyp_va(vcpu);

	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
	host_ctxt->__hyp_running_vcpu = vcpu;
	guest_ctxt = &vcpu->arch.ctxt;

476
	__sysreg_save_state_nvhe(host_ctxt);
477 478

	__activate_traps(vcpu);
479
	__activate_vm(kern_hyp_va(vcpu->kvm));
480

481
	__hyp_vgic_restore_state(vcpu);
482 483 484 485 486 487 488
	__timer_enable_traps(vcpu);

	/*
	 * We must restore the 32-bit state before the sysregs, thanks
	 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
	 */
	__sysreg32_restore_state(vcpu);
489
	__sysreg_restore_state_nvhe(guest_ctxt);
490 491 492 493 494 495 496 497 498
	__debug_switch_to_guest(vcpu);

	do {
		/* Jump in the fire! */
		exit_code = __guest_enter(vcpu, host_ctxt);

		/* And we're baaack! */
	} while (fixup_guest_exit(vcpu, &exit_code));

499
	fp_enabled = __fpsimd_enabled_nvhe();
500

501
	__sysreg_save_state_nvhe(guest_ctxt);
502
	__sysreg32_save_state(vcpu);
503
	__timer_disable_traps(vcpu);
504
	__hyp_vgic_save_state(vcpu);
505 506 507 508

	__deactivate_traps(vcpu);
	__deactivate_vm(vcpu);

509
	__sysreg_restore_state_nvhe(host_ctxt);
510

511 512 513
	if (fp_enabled) {
		__fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs);
		__fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs);
514
		__fpsimd_save_fpexc32(vcpu);
515 516
	}

517 518 519 520
	/*
	 * This must come after restoring the host sysregs, since a non-VHE
	 * system may enable SPE here and make use of the TTBRs.
	 */
521
	__debug_switch_to_host(vcpu);
522 523 524

	return exit_code;
}
M
Marc Zyngier 已提交
525 526 527

static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";

528
static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
529
					     struct kvm_cpu_context *__host_ctxt)
M
Marc Zyngier 已提交
530
{
531
	struct kvm_vcpu *vcpu;
532
	unsigned long str_va;
533

534 535 536 537 538 539
	vcpu = __host_ctxt->__hyp_running_vcpu;

	if (read_sysreg(vttbr_el2)) {
		__timer_disable_traps(vcpu);
		__deactivate_traps(vcpu);
		__deactivate_vm(vcpu);
540
		__sysreg_restore_state_nvhe(__host_ctxt);
541 542
	}

543 544 545 546 547 548 549 550
	/*
	 * Force the panic string to be loaded from the literal pool,
	 * making sure it is a kernel address and not a PC-relative
	 * reference.
	 */
	asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va));

	__hyp_do_panic(str_va,
551 552
		       spsr,  elr,
		       read_sysreg(esr_el2),   read_sysreg_el2(far),
553
		       read_sysreg(hpfar_el2), par, vcpu);
554 555
}

556 557
static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
				 struct kvm_cpu_context *host_ctxt)
558
{
559 560 561 562
	struct kvm_vcpu *vcpu;
	vcpu = host_ctxt->__hyp_running_vcpu;

	__deactivate_traps(vcpu);
563
	sysreg_restore_host_state_vhe(host_ctxt);
564

565 566 567
	panic(__hyp_panic_string,
	      spsr,  elr,
	      read_sysreg_el2(esr),   read_sysreg_el2(far),
568
	      read_sysreg(hpfar_el2), par, vcpu);
569 570
}

571
void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
572 573 574
{
	u64 spsr = read_sysreg_el2(spsr);
	u64 elr = read_sysreg_el2(elr);
M
Marc Zyngier 已提交
575 576
	u64 par = read_sysreg(par_el1);

577 578 579 580
	if (!has_vhe())
		__hyp_call_panic_nvhe(spsr, elr, par, host_ctxt);
	else
		__hyp_call_panic_vhe(spsr, elr, par, host_ctxt);
M
Marc Zyngier 已提交
581 582 583

	unreachable();
}