handle_exit.c 4.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6 7 8 9 10 11
/*
 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
 */

#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_mmu.h>
12
#include <kvm/arm_hypercalls.h>
13 14 15
#include <trace/events/kvm.h>

#include "trace.h"
16 17 18 19 20

typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);

static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
21 22
	int ret;

23 24
	trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
		      kvm_vcpu_hvc_get_imm(vcpu));
25
	vcpu->stat.hvc_exit_stat++;
26

27
	ret = kvm_hvc_call_handler(vcpu);
28
	if (ret < 0) {
29
		vcpu_set_reg(vcpu, 0, ~0UL);
30
		return 1;
31
	}
32

33
	return ret;
34 35 36 37
}

static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
38 39 40 41 42 43 44 45 46 47
	/*
	 * "If an SMC instruction executed at Non-secure EL1 is
	 * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
	 * Trap exception, not a Secure Monitor Call exception [...]"
	 *
	 * We need to advance the PC after the trap, as it would
	 * otherwise return to the same address...
	 */
	vcpu_set_reg(vcpu, 0, ~0UL);
	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
48 49 50
	return 1;
}

51
/**
52
 * kvm_handle_wfx - handle a WFI or WFE instructions trapped in guests
53 54 55
 * @vcpu:	the vcpu pointer
 * @run:	the kvm_run structure pointer
 *
56 57 58 59 60
 * WFE: Yield the CPU and come back to this vcpu when the scheduler
 * decides to.
 * WFI: Simply call kvm_vcpu_block(), which will halt execution of
 * world-switches and schedule other host processes until there is an
 * incoming IRQ or FIQ to the VM.
61
 */
62
static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
63
{
64 65
	if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE) {
		trace_kvm_wfx(*vcpu_pc(vcpu), true);
66
		vcpu->stat.wfe_exit_stat++;
67
		kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
68 69
	} else {
		trace_kvm_wfx(*vcpu_pc(vcpu), false);
70
		vcpu->stat.wfi_exit_stat++;
71
		kvm_vcpu_block(vcpu);
72
		kvm_clear_request(KVM_REQ_UNHALT, vcpu);
73
	}
74

75 76
	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));

77 78 79
	return 1;
}

80 81 82 83 84 85 86 87 88 89 90
static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
	u32 hsr = kvm_vcpu_get_hsr(vcpu);

	kvm_pr_unimpl("Unknown exception class: hsr: %#08x\n",
		      hsr);

	kvm_inject_undefined(vcpu);
	return 1;
}

91
static exit_handle_fn arm_exit_handlers[] = {
92
	[0 ... HSR_EC_MAX]	= kvm_handle_unknown_ec,
93
	[HSR_EC_WFI]		= kvm_handle_wfx,
94 95
	[HSR_EC_CP15_32]	= kvm_handle_cp15_32,
	[HSR_EC_CP15_64]	= kvm_handle_cp15_64,
96
	[HSR_EC_CP14_MR]	= kvm_handle_cp14_32,
97
	[HSR_EC_CP14_LS]	= kvm_handle_cp14_load_store,
98
	[HSR_EC_CP14_64]	= kvm_handle_cp14_64,
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
	[HSR_EC_CP_0_13]	= kvm_handle_cp_0_13_access,
	[HSR_EC_CP10_ID]	= kvm_handle_cp10_id,
	[HSR_EC_HVC]		= handle_hvc,
	[HSR_EC_SMC]		= handle_smc,
	[HSR_EC_IABT]		= kvm_handle_guest_abort,
	[HSR_EC_DABT]		= kvm_handle_guest_abort,
};

static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
{
	u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);

	return arm_exit_handlers[hsr_ec];
}

/*
 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
 * proper exit to userspace.
 */
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
		       int exception_index)
{
	exit_handle_fn exit_handler;

123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
	if (ARM_ABORT_PENDING(exception_index)) {
		u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);

		/*
		 * HVC/SMC already have an adjusted PC, which we need
		 * to correct in order to return to after having
		 * injected the abort.
		 */
		if (hsr_ec == HSR_EC_HVC || hsr_ec == HSR_EC_SMC) {
			u32 adj =  kvm_vcpu_trap_il_is32bit(vcpu) ? 4 : 2;
			*vcpu_pc(vcpu) -= adj;
		}

		kvm_inject_vabt(vcpu);
		return 1;
	}

	exception_index = ARM_EXCEPTION_CODE(exception_index);

142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
	switch (exception_index) {
	case ARM_EXCEPTION_IRQ:
		return 1;
	case ARM_EXCEPTION_HVC:
		/*
		 * See ARM ARM B1.14.1: "Hyp traps on instructions
		 * that fail their condition code check"
		 */
		if (!kvm_condition_valid(vcpu)) {
			kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
			return 1;
		}

		exit_handler = kvm_get_exit_handler(vcpu);

		return exit_handler(vcpu, run);
158 159 160
	case ARM_EXCEPTION_DATA_ABORT:
		kvm_inject_vabt(vcpu);
		return 1;
161 162 163 164 165 166 167 168
	case ARM_EXCEPTION_HYP_GONE:
		/*
		 * HYP has been reset to the hyp-stub. This happens
		 * when a guest is pre-empted by kvm_reboot()'s
		 * shutdown call.
		 */
		run->exit_reason = KVM_EXIT_FAIL_ENTRY;
		return 0;
169 170 171 172 173 174 175
	default:
		kvm_pr_unimpl("Unsupported exception type: %d",
			      exception_index);
		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
		return 0;
	}
}