hyp-entry.S 6.5 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-only */
2
/*
3
 * Copyright (C) 2015-2018 - ARM Ltd
4 5 6
 * Author: Marc Zyngier <marc.zyngier@arm.com>
 */

7
#include <linux/arm-smccc.h>
8 9 10 11 12 13 14 15
#include <linux/linkage.h>

#include <asm/alternative.h>
#include <asm/assembler.h>
#include <asm/cpufeature.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
16
#include <asm/mmu.h>
17

18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
.macro save_caller_saved_regs_vect
	/* x0 and x1 were saved in the vector entry */
	stp	x2, x3,   [sp, #-16]!
	stp	x4, x5,   [sp, #-16]!
	stp	x6, x7,   [sp, #-16]!
	stp	x8, x9,   [sp, #-16]!
	stp	x10, x11, [sp, #-16]!
	stp	x12, x13, [sp, #-16]!
	stp	x14, x15, [sp, #-16]!
	stp	x16, x17, [sp, #-16]!
.endm

.macro restore_caller_saved_regs_vect
	ldp	x16, x17, [sp], #16
	ldp	x14, x15, [sp], #16
	ldp	x12, x13, [sp], #16
	ldp	x10, x11, [sp], #16
	ldp	x8, x9,   [sp], #16
	ldp	x6, x7,   [sp], #16
	ldp	x4, x5,   [sp], #16
	ldp	x2, x3,   [sp], #16
	ldp	x0, x1,   [sp], #16
.endm

42 43
	.text

44 45 46 47 48
.macro do_el2_call
	/*
	 * Shuffle the parameters before calling the function
	 * pointed to in x0. Assumes parameters in x[1,2,3].
	 */
49
	str	lr, [sp, #-16]!
50 51 52 53 54
	mov	lr, x0
	mov	x0, x1
	mov	x1, x2
	mov	x2, x3
	blr	lr
55
	ldr	lr, [sp], #16
56 57
.endm

58 59
el1_sync:				// Guest trapped into EL2

60 61
	mrs	x0, esr_el2
	lsr	x0, x0, #ESR_ELx_EC_SHIFT
62
	cmp	x0, #ESR_ELx_EC_HVC64
63
	ccmp	x0, #ESR_ELx_EC_HVC32, #4, ne
64 65
	b.ne	el1_trap

66
#ifdef __KVM_NVHE_HYPERVISOR__
67 68
	mrs	x1, vttbr_el2		// If vttbr is valid, the guest
	cbnz	x1, el1_hvc_guest	// called HVC
69 70

	/* Here, we're pretty sure the host called HVC. */
71
	ldp	x0, x1, [sp], #16
72

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
	/* Check for a stub HVC call */
	cmp	x0, #HVC_STUB_HCALL_NR
	b.hs	1f

	/*
	 * Compute the idmap address of __kvm_handle_stub_hvc and
	 * jump there. Since we use kimage_voffset, do not use the
	 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
	 * (by loading it from the constant pool).
	 *
	 * Preserve x0-x4, which may contain stub parameters.
	 */
	ldr	x5, =__kvm_handle_stub_hvc
	ldr_l	x6, kimage_voffset

	/* x5 = __pa(x5) */
	sub	x5, x5, x6
	br	x5
91

92
1:
93
	/*
94
	 * Perform the EL2 call
95 96
	 */
	kern_hyp_va	x0
97
	do_el2_call
98

99
	eret
100
	sb
101
#endif /* __KVM_NVHE_HYPERVISOR__ */
102

103 104 105 106 107 108 109 110 111
el1_hvc_guest:
	/*
	 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
	 * The workaround has already been applied on the host,
	 * so let's quickly get back to the guest. We don't bother
	 * restoring x1, as it can be clobbered anyway.
	 */
	ldr	x1, [sp]				// Guest's x0
	eor	w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
112 113 114 115 116
	cbz	w1, wa_epilogue

	/* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
	eor	w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
			  ARM_SMCCC_ARCH_WORKAROUND_2)
117
	cbnz	w1, el1_trap
118 119 120

wa_epilogue:
	mov	x0, xzr
121 122
	add	sp, sp, #16
	eret
123
	sb
124

125
el1_trap:
126
	get_vcpu_ptr	x1, x0
127
	mov	x0, #ARM_EXCEPTION_TRAP
128 129 130
	b	__guest_exit

el1_irq:
131
	get_vcpu_ptr	x1, x0
132
	mov	x0, #ARM_EXCEPTION_IRQ
133 134
	b	__guest_exit

135
el1_error:
136
	get_vcpu_ptr	x1, x0
137 138 139
	mov	x0, #ARM_EXCEPTION_EL1_SERROR
	b	__guest_exit

140
el2_sync:
141
	/* Check for illegal exception return */
142
	mrs	x0, spsr_el2
143
	tbnz	x0, #20, 1f
144

145 146 147 148 149 150 151
	save_caller_saved_regs_vect
	stp     x29, x30, [sp, #-16]!
	bl	kvm_unexpected_el2_exception
	ldp     x29, x30, [sp], #16
	restore_caller_saved_regs_vect

	eret
152

153
1:
154 155 156 157 158 159
	/* Let's attempt a recovery from the illegal exception return */
	get_vcpu_ptr	x1, x0
	mov	x0, #ARM_EXCEPTION_IL
	b	__guest_exit


160
el2_error:
161 162 163 164 165 166 167
	save_caller_saved_regs_vect
	stp     x29, x30, [sp, #-16]!

	bl	kvm_unexpected_el2_exception

	ldp     x29, x30, [sp], #16
	restore_caller_saved_regs_vect
168

169
	eret
170
	sb
171

172
#ifdef __KVM_NVHE_HYPERVISOR__
173
SYM_FUNC_START(__hyp_do_panic)
174 175 176 177 178 179
	mov	lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
		      PSR_MODE_EL1h)
	msr	spsr_el2, lr
	ldr	lr, =panic
	msr	elr_el2, lr
	eret
180
	sb
181
SYM_FUNC_END(__hyp_do_panic)
182
#endif
183

184
SYM_CODE_START(__hyp_panic)
185
	get_host_ctxt x0, x1
186
	b	hyp_panic
187
SYM_CODE_END(__hyp_panic)
188

189
.macro invalid_vector	label, target = __hyp_panic
190
	.align	2
191
SYM_CODE_START(\label)
192
	b \target
193
SYM_CODE_END(\label)
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
.endm

	/* None of these should ever happen */
	invalid_vector	el2t_sync_invalid
	invalid_vector	el2t_irq_invalid
	invalid_vector	el2t_fiq_invalid
	invalid_vector	el2t_error_invalid
	invalid_vector	el2h_sync_invalid
	invalid_vector	el2h_irq_invalid
	invalid_vector	el2h_fiq_invalid
	invalid_vector	el1_fiq_invalid

	.ltorg

	.align 11

210 211 212 213 214 215 216
.macro check_preamble_length start, end
/* kvm_patch_vector_branch() generates code that jumps over the preamble. */
.if ((\end-\start) != KVM_VECTOR_PREAMBLE)
	.error "KVM vector preamble length mismatch"
.endif
.endm

217 218
.macro valid_vect target
	.align 7
219
661:
220
	esb
221
	stp	x0, x1, [sp, #-16]!
222
662:
223
	b	\target
224 225

check_preamble_length 661b, 662b
226 227 228 229
.endm

.macro invalid_vect target
	.align 7
230
661:
231
	b	\target
232
	nop
233
662:
234 235
	ldp	x0, x1, [sp], #16
	b	\target
236 237

check_preamble_length 661b, 662b
238 239
.endm

240
SYM_CODE_START(__kvm_hyp_vector)
241 242 243 244 245
	invalid_vect	el2t_sync_invalid	// Synchronous EL2t
	invalid_vect	el2t_irq_invalid	// IRQ EL2t
	invalid_vect	el2t_fiq_invalid	// FIQ EL2t
	invalid_vect	el2t_error_invalid	// Error EL2t

246
	valid_vect	el2_sync		// Synchronous EL2h
247 248 249 250 251 252 253 254 255 256 257 258 259
	invalid_vect	el2h_irq_invalid	// IRQ EL2h
	invalid_vect	el2h_fiq_invalid	// FIQ EL2h
	valid_vect	el2_error		// Error EL2h

	valid_vect	el1_sync		// Synchronous 64-bit EL1
	valid_vect	el1_irq			// IRQ 64-bit EL1
	invalid_vect	el1_fiq_invalid		// FIQ 64-bit EL1
	valid_vect	el1_error		// Error 64-bit EL1

	valid_vect	el1_sync		// Synchronous 32-bit EL1
	valid_vect	el1_irq			// IRQ 32-bit EL1
	invalid_vect	el1_fiq_invalid		// FIQ 32-bit EL1
	valid_vect	el1_error		// Error 32-bit EL1
260
SYM_CODE_END(__kvm_hyp_vector)
261 262 263

.macro hyp_ventry
	.align 7
264 265
1:	esb
	.rept 26
266 267 268 269 270
	nop
	.endr
/*
 * The default sequence is to directly branch to the KVM vectors,
 * using the computed offset. This applies for VHE as well as
271
 * !ARM64_HARDEN_EL2_VECTORS. The first vector must always run the preamble.
272 273 274 275 276 277 278 279 280 281
 *
 * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
 * with:
 *
 * stp	x0, x1, [sp, #-16]!
 * movz	x0, #(addr & 0xffff)
 * movk	x0, #((addr >> 16) & 0xffff), lsl #16
 * movk	x0, #((addr >> 32) & 0xffff), lsl #32
 * br	x0
 *
282 283
 * Where:
 * addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE.
284 285 286
 * See kvm_patch_vector_branch for details.
 */
alternative_cb	kvm_patch_vector_branch
287 288
	stp	x0, x1, [sp, #-16]!
	b	__kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE)
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
	nop
	nop
	nop
alternative_cb_end
.endm

.macro generate_vectors
0:
	.rept 16
	hyp_ventry
	.endr
	.org 0b + SZ_2K		// Safety measure
.endm

	.align	11
304
SYM_CODE_START(__bp_harden_hyp_vecs)
305 306 307
	.rept BP_HARDEN_EL2_SLOTS
	generate_vectors
	.endr
308 309 310
1:	.org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
	.org 1b
SYM_CODE_END(__bp_harden_hyp_vecs)
新手
引导
客服 返回
顶部