switch.c 6.9 KB
Newer Older
1 2 3 4 5 6 7
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2015 - ARM Ltd
 * Author: Marc Zyngier <marc.zyngier@arm.com>
 */

#include <hyp/switch.h>
8
#include <hyp/sysreg-sr.h>
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29

#include <linux/arm-smccc.h>
#include <linux/kvm_host.h>
#include <linux/types.h>
#include <linux/jump_label.h>
#include <uapi/linux/psci.h>

#include <kvm/arm_psci.h>

#include <asm/barrier.h>
#include <asm/cpufeature.h>
#include <asm/kprobes.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <asm/fpsimd.h>
#include <asm/debug-monitors.h>
#include <asm/processor.h>
#include <asm/thread_info.h>

30 31 32 33
/* Non-VHE specific context */
DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
34

35
static void __activate_traps(struct kvm_vcpu *vcpu)
36 37 38 39 40 41 42 43 44 45 46 47 48 49
{
	u64 val;

	___activate_traps(vcpu);
	__activate_traps_common(vcpu);

	val = CPTR_EL2_DEFAULT;
	val |= CPTR_EL2_TTA | CPTR_EL2_TZ | CPTR_EL2_TAM;
	if (!update_fp_enabled(vcpu)) {
		val |= CPTR_EL2_TFP;
		__activate_traps_fpsimd32(vcpu);
	}

	write_sysreg(val, cptr_el2);
50
	write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
51 52 53 54 55 56 57 58 59 60

	if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
		struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;

		isb();
		/*
		 * At this stage, and thanks to the above isb(), S2 is
		 * configured and enabled. We can now restore the guest's S1
		 * configuration: SCTLR, and only then TCR.
		 */
61
		write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1),	SYS_SCTLR);
62
		isb();
63
		write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1),	SYS_TCR);
64 65 66
	}
}

67
static void __deactivate_traps(struct kvm_vcpu *vcpu)
68
{
69
	extern char __kvm_hyp_host_vector[];
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
	u64 mdcr_el2;

	___deactivate_traps(vcpu);

	mdcr_el2 = read_sysreg(mdcr_el2);

	if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
		u64 val;

		/*
		 * Set the TCR and SCTLR registers in the exact opposite
		 * sequence as __activate_traps (first prevent walks,
		 * then force the MMU on). A generous sprinkling of isb()
		 * ensure that things happen in this exact order.
		 */
		val = read_sysreg_el1(SYS_TCR);
		write_sysreg_el1(val | TCR_EPD1_MASK | TCR_EPD0_MASK, SYS_TCR);
		isb();
		val = read_sysreg_el1(SYS_SCTLR);
		write_sysreg_el1(val | SCTLR_ELx_M, SYS_SCTLR);
		isb();
	}

	__deactivate_traps_common();

	mdcr_el2 &= MDCR_EL2_HPMN_MASK;
	mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;

	write_sysreg(mdcr_el2, mdcr_el2);
	write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
	write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
101
	write_sysreg(__kvm_hyp_host_vector, vbar_el2);
102 103
}

104
static void __load_host_stage2(void)
105 106 107 108 109
{
	write_sysreg(0, vttbr_el2);
}

/* Save VGICv3 state on non-VHE systems */
110
static void __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
111 112 113 114 115 116 117 118
{
	if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
		__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
		__vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
	}
}

/* Restore VGICv3 state on non_VEH systems */
119
static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
120 121 122 123 124 125 126 127 128 129
{
	if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
		__vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
		__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
	}
}

/**
 * Disable host events, enable guest events
 */
130
static bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
{
	struct kvm_host_data *host;
	struct kvm_pmu_events *pmu;

	host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
	pmu = &host->pmu_events;

	if (pmu->events_host)
		write_sysreg(pmu->events_host, pmcntenclr_el0);

	if (pmu->events_guest)
		write_sysreg(pmu->events_guest, pmcntenset_el0);

	return (pmu->events_host || pmu->events_guest);
}

/**
 * Disable guest events, enable host events
 */
150
static void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
{
	struct kvm_host_data *host;
	struct kvm_pmu_events *pmu;

	host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
	pmu = &host->pmu_events;

	if (pmu->events_guest)
		write_sysreg(pmu->events_guest, pmcntenclr_el0);

	if (pmu->events_host)
		write_sysreg(pmu->events_host, pmcntenset_el0);
}

/* Switch to the guest for legacy non-VHE systems */
166
int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
{
	struct kvm_cpu_context *host_ctxt;
	struct kvm_cpu_context *guest_ctxt;
	bool pmu_switch_needed;
	u64 exit_code;

	/*
	 * Having IRQs masked via PMR when entering the guest means the GIC
	 * will not signal the CPU of interrupts of lower priority, and the
	 * only way to get out will be via guest exceptions.
	 * Naturally, we want to avoid this.
	 */
	if (system_uses_irq_prio_masking()) {
		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
		pmr_sync();
	}

184
	host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
	host_ctxt->__hyp_running_vcpu = vcpu;
	guest_ctxt = &vcpu->arch.ctxt;

	pmu_switch_needed = __pmu_switch_to_guest(host_ctxt);

	__sysreg_save_state_nvhe(host_ctxt);

	/*
	 * We must restore the 32-bit state before the sysregs, thanks
	 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
	 *
	 * Also, and in order to be able to deal with erratum #1319537 (A57)
	 * and #1319367 (A72), we must ensure that all VM-related sysreg are
	 * restored before we enable S2 translation.
	 */
	__sysreg32_restore_state(vcpu);
	__sysreg_restore_state_nvhe(guest_ctxt);

203
	__load_guest_stage2(kern_hyp_va(vcpu->arch.hw_mmu));
204 205 206 207 208 209 210 211 212
	__activate_traps(vcpu);

	__hyp_vgic_restore_state(vcpu);
	__timer_enable_traps(vcpu);

	__debug_switch_to_guest(vcpu);

	do {
		/* Jump in the fire! */
A
Andrew Scull 已提交
213
		exit_code = __guest_enter(vcpu);
214 215 216 217 218 219 220 221 222 223

		/* And we're baaack! */
	} while (fixup_guest_exit(vcpu, &exit_code));

	__sysreg_save_state_nvhe(guest_ctxt);
	__sysreg32_save_state(vcpu);
	__timer_disable_traps(vcpu);
	__hyp_vgic_save_state(vcpu);

	__deactivate_traps(vcpu);
224
	__load_host_stage2();
225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243

	__sysreg_restore_state_nvhe(host_ctxt);

	if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
		__fpsimd_save_fpexc32(vcpu);

	/*
	 * This must come after restoring the host sysregs, since a non-VHE
	 * system may enable SPE here and make use of the TTBRs.
	 */
	__debug_switch_to_host(vcpu);

	if (pmu_switch_needed)
		__pmu_switch_to_host(host_ctxt);

	/* Returning to host will clear PSR.I, remask PMR if needed */
	if (system_uses_irq_prio_masking())
		gic_write_pmr(GIC_PRIO_IRQOFF);

244 245
	host_ctxt->__hyp_running_vcpu = NULL;

246 247 248
	return exit_code;
}

249
void __noreturn hyp_panic(void)
250 251 252 253
{
	u64 spsr = read_sysreg_el2(SYS_SPSR);
	u64 elr = read_sysreg_el2(SYS_ELR);
	u64 par = read_sysreg(par_el1);
254
	bool restore_host = true;
255 256
	struct kvm_cpu_context *host_ctxt;
	struct kvm_vcpu *vcpu;
257

258
	host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
259 260
	vcpu = host_ctxt->__hyp_running_vcpu;

261
	if (vcpu) {
262 263
		__timer_disable_traps(vcpu);
		__deactivate_traps(vcpu);
264
		__load_host_stage2();
265 266 267
		__sysreg_restore_state_nvhe(host_ctxt);
	}

268
	__hyp_do_panic(restore_host, spsr, elr, par);
269 270
	unreachable();
}
271 272 273 274 275

asmlinkage void kvm_unexpected_el2_exception(void)
{
	return __kvm_unexpected_el2_exception();
}