interrupts.S 13.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 */
18 19 20 21 22

#include <linux/linkage.h>
#include <linux/const.h>
#include <asm/unified.h>
#include <asm/page.h>
23
#include <asm/ptrace.h>
24 25
#include <asm/asm-offsets.h>
#include <asm/kvm_asm.h>
26
#include <asm/kvm_arm.h>
27 28
#include <asm/vfpmacros.h>
#include "interrupts_head.S"
29 30 31 32 33 34 35 36

	.text

__kvm_hyp_code_start:
	.globl __kvm_hyp_code_start

/********************************************************************
 * Flush per-VMID TLBs
37
 *
38
 * void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
39 40 41 42 43 44
 *
 * We rely on the hardware to broadcast the TLB invalidation to all CPUs
 * inside the inner-shareable domain (which is the case for all v7
 * implementations).  If we come across a non-IS SMP implementation, we'll
 * have to use an IPI based mechanism. Until then, we stick to the simple
 * hardware assisted version.
45 46 47
 *
 * As v7 does not support flushing per IPA, just nuke the whole TLB
 * instead, ignoring the ipa value.
48
 */
49
ENTRY(__kvm_tlb_flush_vmid_ipa)
50 51
	push	{r2, r3}

52
	dsb	ishst
53 54
	add	r0, r0, #KVM_VTTBR
	ldrd	r2, r3, [r0]
55
	mcrr	p15, 6, rr_lo_hi(r2, r3), c2	@ Write VTTBR
56 57
	isb
	mcr     p15, 0, r0, c8, c3, 0	@ TLBIALLIS (rt ignored)
58
	dsb	ish
59 60 61 62 63 64 65
	isb
	mov	r2, #0
	mov	r3, #0
	mcrr	p15, 6, r2, r3, c2	@ Back to VMID #0
	isb				@ Not necessary if followed by eret

	pop	{r2, r3}
66
	bx	lr
67
ENDPROC(__kvm_tlb_flush_vmid_ipa)
68

69 70 71 72 73 74 75 76 77 78 79
/**
 * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs
 *
 * Reuses __kvm_tlb_flush_vmid_ipa() for ARMv7, without passing address
 * parameter
 */

ENTRY(__kvm_tlb_flush_vmid)
	b	__kvm_tlb_flush_vmid_ipa
ENDPROC(__kvm_tlb_flush_vmid)

80
/********************************************************************
81 82 83 84
 * Flush TLBs and instruction caches of all CPUs inside the inner-shareable
 * domain, for all VMIDs
 *
 * void __kvm_flush_vm_context(void);
85
 */
86
ENTRY(__kvm_flush_vm_context)
87 88 89 90 91 92
	mov	r0, #0			@ rn parameter for c15 flushes is SBZ

	/* Invalidate NS Non-Hyp TLB Inner Shareable (TLBIALLNSNHIS) */
	mcr     p15, 4, r0, c8, c3, 4
	/* Invalidate instruction caches Inner Shareable (ICIALLUIS) */
	mcr     p15, 0, r0, c7, c1, 0
93
	dsb	ish
94 95
	isb				@ Not necessary if followed by eret

96 97 98
	bx	lr
ENDPROC(__kvm_flush_vm_context)

99

100 101
/********************************************************************
 *  Hypervisor world-switch code
102 103 104
 *
 *
 * int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
105 106
 */
ENTRY(__kvm_vcpu_run)
107 108 109 110 111
	@ Save the vcpu pointer
	mcr	p15, 4, vcpu, c13, c0, 2	@ HTPIDR

	save_host_regs

112
	restore_vgic_state
113
	restore_timer_state
114

115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
	@ Store hardware CP15 state and load guest state
	read_cp15_state store_to_vcpu = 0
	write_cp15_state read_from_vcpu = 1

	@ If the host kernel has not been configured with VFPv3 support,
	@ then it is safer if we deny guests from using it as well.
#ifdef CONFIG_VFPv3
	@ Set FPEXC_EN so the guest doesn't trap floating point instructions
	VFPFMRX r2, FPEXC		@ VMRS
	push	{r2}
	orr	r2, r2, #FPEXC_EN
	VFPFMXR FPEXC, r2		@ VMSR
#endif

	@ Configure Hyp-role
	configure_hyp_role vmentry

	@ Trap coprocessor CRx accesses
	set_hstr vmentry
	set_hcptr vmentry, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
	set_hdcr vmentry

	@ Write configured ID register into MIDR alias
	ldr	r1, [vcpu, #VCPU_MIDR]
	mcr	p15, 4, r1, c0, c0, 0

	@ Write guest view of MPIDR into VMPIDR
	ldr	r1, [vcpu, #CP15_OFFSET(c0_MPIDR)]
	mcr	p15, 4, r1, c0, c0, 5

	@ Set up guest memory translation
	ldr	r1, [vcpu, #VCPU_KVM]
	add	r1, r1, #KVM_VTTBR
	ldrd	r2, r3, [r1]
149
	mcrr	p15, 6, rr_lo_hi(r2, r3), c2	@ Write VTTBR
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172

	@ We're all done, just restore the GPRs and go to the guest
	restore_guest_regs
	clrex				@ Clear exclusive monitor
	eret

__kvm_vcpu_return:
	/*
	 * return convention:
	 * guest r0, r1, r2 saved on the stack
	 * r0: vcpu pointer
	 * r1: exception code
	 */
	save_guest_regs

	@ Set VMID == 0
	mov	r2, #0
	mov	r3, #0
	mcrr	p15, 6, r2, r3, c2	@ Write VTTBR

	@ Don't trap coprocessor accesses for host kernel
	set_hstr vmexit
	set_hdcr vmexit
173
	set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)), after_vfp_restore
174 175 176 177 178 179 180 181 182 183 184 185 186

#ifdef CONFIG_VFPv3
	@ Switch VFP/NEON hardware state to the host's
	add	r7, vcpu, #VCPU_VFP_GUEST
	store_vfp_state r7
	add	r7, vcpu, #VCPU_VFP_HOST
	ldr	r7, [r7]
	restore_vfp_state r7

after_vfp_restore:
	@ Restore FPEXC_EN which we clobbered on entry
	pop	{r2}
	VFPFMXR FPEXC, r2
187 188
#else
after_vfp_restore:
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
#endif

	@ Reset Hyp-role
	configure_hyp_role vmexit

	@ Let host read hardware MIDR
	mrc	p15, 0, r2, c0, c0, 0
	mcr	p15, 4, r2, c0, c0, 0

	@ Back to hardware MPIDR
	mrc	p15, 0, r2, c0, c0, 5
	mcr	p15, 4, r2, c0, c0, 5

	@ Store guest CP15 state and restore host state
	read_cp15_state store_to_vcpu = 1
	write_cp15_state read_from_vcpu = 0

206
	save_timer_state
207 208
	save_vgic_state

209 210
	restore_host_regs
	clrex				@ Clear exclusive monitor
211
#ifndef CONFIG_CPU_ENDIAN_BE8
212 213
	mov	r0, r1			@ Return the return code
	mov	r1, #0			@ Clear upper bits in return value
214 215 216 217
#else
	@ r1 already has return code
	mov	r0, #0			@ Clear upper bits in return value
#endif /* CONFIG_CPU_ENDIAN_BE8 */
218
	bx	lr			@ return to IOCTL
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236

/********************************************************************
 *  Call function in Hyp mode
 *
 *
 * u64 kvm_call_hyp(void *hypfn, ...);
 *
 * This is not really a variadic function in the classic C-way and care must
 * be taken when calling this to ensure parameters are passed in registers
 * only, since the stack will change between the caller and the callee.
 *
 * Call the function with the first argument containing a pointer to the
 * function you wish to call in Hyp mode, and subsequent arguments will be
 * passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the
 * function pointer can be passed).  The function being called must be mapped
 * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c).  Return values are
 * passed in r0 and r1.
 *
237 238 239 240
 * A function pointer with a value of 0xffffffff has a special meaning,
 * and is used to implement __hyp_get_vectors in the same way as in
 * arch/arm/kernel/hyp_stub.S.
 *
241 242 243 244 245 246 247 248 249 250 251
 * The calling convention follows the standard AAPCS:
 *   r0 - r3: caller save
 *   r12:     caller save
 *   rest:    callee save
 */
ENTRY(kvm_call_hyp)
	hvc	#0
	bx	lr

/********************************************************************
 * Hypervisor exception vector and handlers
252 253 254 255 256 257 258 259
 *
 *
 * The KVM/ARM Hypervisor ABI is defined as follows:
 *
 * Entry to Hyp mode from the host kernel will happen _only_ when an HVC
 * instruction is issued since all traps are disabled when running the host
 * kernel as per the Hyp-mode initialization at boot time.
 *
260
 * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc
261
 * below) when the HVC instruction is called from SVC mode (i.e. a guest or the
262
 * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC
263 264 265 266 267 268 269 270 271 272 273 274 275 276
 * instructions are called from within Hyp-mode.
 *
 * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
 *    Switching to Hyp mode is done through a simple HVC #0 instruction. The
 *    exception vector code will check that the HVC comes from VMID==0 and if
 *    so will push the necessary state (SPSR, lr_usr) on the Hyp stack.
 *    - r0 contains a pointer to a HYP function
 *    - r1, r2, and r3 contain arguments to the above function.
 *    - The HYP function will be called with its arguments in r0, r1 and r2.
 *    On HYP function return, we return directly to SVC.
 *
 * Note that the above is used to execute code in Hyp-mode from a host-kernel
 * point of view, and is a different concept from performing a world-switch and
 * executing guest code SVC mode (with a VMID != 0).
277 278
 */

279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
/* Handle undef, svc, pabt, or dabt by crashing with a user notice */
.macro bad_exception exception_code, panic_str
	push	{r0-r2}
	mrrc	p15, 6, r0, r1, c2	@ Read VTTBR
	lsr	r1, r1, #16
	ands	r1, r1, #0xff
	beq	99f

	load_vcpu			@ Load VCPU pointer
	.if \exception_code == ARM_EXCEPTION_DATA_ABORT
	mrc	p15, 4, r2, c5, c2, 0	@ HSR
	mrc	p15, 4, r1, c6, c0, 0	@ HDFAR
	str	r2, [vcpu, #VCPU_HSR]
	str	r1, [vcpu, #VCPU_HxFAR]
	.endif
	.if \exception_code == ARM_EXCEPTION_PREF_ABORT
	mrc	p15, 4, r2, c5, c2, 0	@ HSR
	mrc	p15, 4, r1, c6, c0, 2	@ HIFAR
	str	r2, [vcpu, #VCPU_HSR]
	str	r1, [vcpu, #VCPU_HxFAR]
	.endif
	mov	r1, #\exception_code
	b	__kvm_vcpu_return

	@ We were in the host already. Let's craft a panic-ing return to SVC.
99:	mrs	r2, cpsr
	bic	r2, r2, #MODE_MASK
	orr	r2, r2, #SVC_MODE
THUMB(	orr	r2, r2, #PSR_T_BIT	)
	msr	spsr_cxsf, r2
	mrs	r1, ELR_hyp
R
Russell King 已提交
310
	ldr	r2, =panic
311 312
	msr	ELR_hyp, r2
	ldr	r0, =\panic_str
313
	clrex				@ Clear exclusive monitor
314 315 316 317 318
	eret
.endm

	.text

319 320 321
	.align 5
__kvm_hyp_vector:
	.globl __kvm_hyp_vector
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376

	@ Hyp-mode exception vector
	W(b)	hyp_reset
	W(b)	hyp_undef
	W(b)	hyp_svc
	W(b)	hyp_pabt
	W(b)	hyp_dabt
	W(b)	hyp_hvc
	W(b)	hyp_irq
	W(b)	hyp_fiq

	.align
hyp_reset:
	b	hyp_reset

	.align
hyp_undef:
	bad_exception ARM_EXCEPTION_UNDEFINED, und_die_str

	.align
hyp_svc:
	bad_exception ARM_EXCEPTION_HVC, svc_die_str

	.align
hyp_pabt:
	bad_exception ARM_EXCEPTION_PREF_ABORT, pabt_die_str

	.align
hyp_dabt:
	bad_exception ARM_EXCEPTION_DATA_ABORT, dabt_die_str

	.align
hyp_hvc:
	/*
	 * Getting here is either becuase of a trap from a guest or from calling
	 * HVC from the host kernel, which means "switch to Hyp mode".
	 */
	push	{r0, r1, r2}

	@ Check syndrome register
	mrc	p15, 4, r1, c5, c2, 0	@ HSR
	lsr	r0, r1, #HSR_EC_SHIFT
	cmp	r0, #HSR_EC_HVC
	bne	guest_trap		@ Not HVC instr.

	/*
	 * Let's check if the HVC came from VMID 0 and allow simple
	 * switch to Hyp mode
	 */
	mrrc    p15, 6, r0, r2, c2
	lsr     r2, r2, #16
	and     r2, r2, #0xff
	cmp     r2, #0
	bne	guest_trap		@ Guest called HVC

377 378 379 380
	/*
	 * Getting here means host called HVC, we shift parameters and branch
	 * to Hyp function.
	 */
381 382
	pop	{r0, r1, r2}

383 384 385 386 387
	/* Check for __hyp_get_vectors */
	cmp	r0, #-1
	mrceq	p15, 4, r0, c12, c0, 0	@ get HVBAR
	beq	1f

388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
	push	{lr}
	mrs	lr, SPSR
	push	{lr}

	mov	lr, r0
	mov	r0, r1
	mov	r1, r2
	mov	r2, r3

THUMB(	orr	lr, #1)
	blx	lr			@ Call the HYP function

	pop	{lr}
	msr	SPSR_csxf, lr
	pop	{lr}
403
1:	eret
404 405 406 407 408 409 410

guest_trap:
	load_vcpu			@ Load VCPU pointer to r0
	str	r1, [vcpu, #VCPU_HSR]

	@ Check if we need the fault information
	lsr	r1, r1, #HSR_EC_SHIFT
411 412 413 414
#ifdef CONFIG_VFPv3
	cmp	r1, #HSR_EC_CP_0_13
	beq	switch_to_guest_vfp
#endif
415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
	cmp	r1, #HSR_EC_IABT
	mrceq	p15, 4, r2, c6, c0, 2	@ HIFAR
	beq	2f
	cmp	r1, #HSR_EC_DABT
	bne	1f
	mrc	p15, 4, r2, c6, c0, 0	@ HDFAR

2:	str	r2, [vcpu, #VCPU_HxFAR]

	/*
	 * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode:
	 *
	 * Abort on the stage 2 translation for a memory access from a
	 * Non-secure PL1 or PL0 mode:
	 *
	 * For any Access flag fault or Translation fault, and also for any
	 * Permission fault on the stage 2 translation of a memory access
	 * made as part of a translation table walk for a stage 1 translation,
	 * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR
	 * is UNKNOWN.
	 */

	/* Check for permission fault, and S1PTW */
	mrc	p15, 4, r1, c5, c2, 0	@ HSR
	and	r0, r1, #HSR_FSC_TYPE
	cmp	r0, #FSC_PERM
	tsteq	r1, #(1 << 7)		@ S1PTW
	mrcne	p15, 4, r2, c6, c0, 4	@ HPFAR
	bne	3f

445 446 447 448
	/* Preserve PAR */
	mrrc	p15, 0, r0, r1, c7	@ PAR
	push	{r0, r1}

449 450 451 452 453 454 455 456 457 458
	/* Resolve IPA using the xFAR */
	mcr	p15, 0, r2, c7, c8, 0	@ ATS1CPR
	isb
	mrrc	p15, 0, r0, r1, c7	@ PAR
	tst	r0, #1
	bne	4f			@ Failed translation
	ubfx	r2, r0, #12, #20
	lsl	r2, r2, #4
	orr	r2, r2, r1, lsl #24

459 460 461 462
	/* Restore PAR */
	pop	{r0, r1}
	mcrr	p15, 0, r0, r1, c7	@ PAR

463 464 465 466 467 468
3:	load_vcpu			@ Load VCPU pointer to r0
	str	r2, [r0, #VCPU_HPFAR]

1:	mov	r1, #ARM_EXCEPTION_HVC
	b	__kvm_vcpu_return

469 470
4:	pop	{r0, r1}		@ Failed translation, return to guest
	mcrr	p15, 0, r0, r1, c7	@ PAR
471
	clrex
472
	pop	{r0, r1, r2}
473 474 475 476 477 478 479 480 481 482 483 484 485
	eret

/*
 * If VFPv3 support is not available, then we will not switch the VFP
 * registers; however cp10 and cp11 accesses will still trap and fallback
 * to the regular coprocessor emulation code, which currently will
 * inject an undefined exception to the guest.
 */
#ifdef CONFIG_VFPv3
switch_to_guest_vfp:
	push	{r3-r7}

	@ NEON/VFP used.  Turn on VFP access.
486
	set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11))
487 488 489 490 491 492 493 494 495 496

	@ Switch VFP/NEON hardware state to the guest's
	add	r7, r0, #VCPU_VFP_HOST
	ldr	r7, [r7]
	store_vfp_state r7
	add	r7, r0, #VCPU_VFP_GUEST
	restore_vfp_state r7

	pop	{r3-r7}
	pop	{r0-r2}
497
	clrex
498 499 500 501 502 503 504 505 506 507 508 509 510 511 512
	eret
#endif

	.align
hyp_irq:
	push	{r0, r1, r2}
	mov	r1, #ARM_EXCEPTION_IRQ
	load_vcpu			@ Load VCPU pointer to r0
	b	__kvm_vcpu_return

	.align
hyp_fiq:
	b	hyp_fiq

	.ltorg
513 514 515

__kvm_hyp_code_end:
	.globl	__kvm_hyp_code_end
516 517 518 519

	.section ".rodata"

und_die_str:
520
	.ascii	"unexpected undefined exception in Hyp mode at: %#08x\n"
521
pabt_die_str:
522
	.ascii	"unexpected prefetch abort in Hyp mode at: %#08x\n"
523
dabt_die_str:
524
	.ascii	"unexpected data abort in Hyp mode at: %#08x\n"
525
svc_die_str:
526
	.ascii	"unexpected HVC/SVC trap in Hyp mode at: %#08x\n"