book3s_hv_rmhandlers.S 84.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
 *
 * Derived from book3s_rmhandlers.S and other files, which are:
 *
 * Copyright SUSE Linux Products GmbH 2009
 *
 * Authors: Alexander Graf <agraf@suse.de>
 */

#include <asm/ppc_asm.h>
#include <asm/kvm_asm.h>
#include <asm/reg.h>
23
#include <asm/mmu.h>
24
#include <asm/page.h>
25 26
#include <asm/ptrace.h>
#include <asm/hvcall.h>
27 28
#include <asm/asm-offsets.h>
#include <asm/exception-64s.h>
29
#include <asm/kvm_book3s_asm.h>
30
#include <asm/book3s/64/mmu-hash.h>
31
#include <asm/tm.h>
32
#include <asm/opal.h>
33
#include <asm/xive-regs.h>
34
#include <asm/thread_info.h>
35

36 37 38 39 40 41
/* Sign-extend HDEC if not on POWER9 */
#define EXTEND_HDEC(reg)			\
BEGIN_FTR_SECTION;				\
	extsw	reg, reg;			\
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)

42 43 44 45
/* Values in HSTATE_NAPPING(r13) */
#define NAPPING_CEDE	1
#define NAPPING_NOVCPU	2

46
/* Stack frame offsets for kvmppc_hv_entry */
47
#define SFS			160
48 49 50 51 52 53 54 55
#define STACK_SLOT_TRAP		(SFS-4)
#define STACK_SLOT_TID		(SFS-16)
#define STACK_SLOT_PSSCR	(SFS-24)
#define STACK_SLOT_PID		(SFS-32)
#define STACK_SLOT_IAMR		(SFS-40)
#define STACK_SLOT_CIABR	(SFS-48)
#define STACK_SLOT_DAWR		(SFS-56)
#define STACK_SLOT_DAWRX	(SFS-64)
56
#define STACK_SLOT_HFSCR	(SFS-72)
57

58
/*
59
 * Call kvmppc_hv_entry in real mode.
60 61 62 63 64 65
 * Must be called with interrupts hard-disabled.
 *
 * Input Registers:
 *
 * LR = return address to continue at after eventually re-enabling MMU
 */
66
_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
67 68 69
	mflr	r0
	std	r0, PPC_LR_STKOFF(r1)
	stdu	r1, -112(r1)
70
	mfmsr	r10
71
	std	r10, HSTATE_HOST_MSR(r13)
72
	LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
73 74 75 76 77 78 79
	li	r0,MSR_RI
	andc	r0,r10,r0
	li	r6,MSR_IR | MSR_DR
	andc	r6,r10,r6
	mtmsrd	r0,1		/* clear RI in MSR */
	mtsrr0	r5
	mtsrr1	r6
80
	RFI_TO_KERNEL
81

82
kvmppc_call_hv_entry:
83 84 85 86 87 88 89 90 91 92 93 94 95
BEGIN_FTR_SECTION
	/* On P9, do LPCR setting, if necessary */
	ld	r3, HSTATE_SPLIT_MODE(r13)
	cmpdi	r3, 0
	beq	46f
	lwz	r4, KVM_SPLIT_DO_SET(r3)
	cmpwi	r4, 0
	beq	46f
	bl	kvmhv_p9_set_lpcr
	nop
46:
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)

96
	ld	r4, HSTATE_KVM_VCPU(r13)
97 98 99 100
	bl	kvmppc_hv_entry

	/* Back from guest - restore host state and return to caller */

101
BEGIN_FTR_SECTION
102 103 104 105 106
	/* Restore host DABR and DABRX */
	ld	r5,HSTATE_DABR(r13)
	li	r6,7
	mtspr	SPRN_DABR,r5
	mtspr	SPRN_DABRX,r6
107
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
108 109

	/* Restore SPRG3 */
S
Scott Wood 已提交
110 111
	ld	r3,PACA_SPRG_VDSO(r13)
	mtspr	SPRN_SPRG_VDSO_WRITE,r3
112 113

	/* Reload the host's PMU registers */
114
	lbz	r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */
115 116
	cmpwi	r4, 0
	beq	23f			/* skip if not */
117
BEGIN_FTR_SECTION
118
	ld	r3, HSTATE_MMCR0(r13)
119 120 121 122
	andi.	r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
	cmpwi	r4, MMCR0_PMAO
	beql	kvmppc_fix_pmao
END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
123 124 125 126 127 128
	lwz	r3, HSTATE_PMC1(r13)
	lwz	r4, HSTATE_PMC2(r13)
	lwz	r5, HSTATE_PMC3(r13)
	lwz	r6, HSTATE_PMC4(r13)
	lwz	r8, HSTATE_PMC5(r13)
	lwz	r9, HSTATE_PMC6(r13)
129 130 131 132 133 134
	mtspr	SPRN_PMC1, r3
	mtspr	SPRN_PMC2, r4
	mtspr	SPRN_PMC3, r5
	mtspr	SPRN_PMC4, r6
	mtspr	SPRN_PMC5, r8
	mtspr	SPRN_PMC6, r9
135 136 137 138 139
	ld	r3, HSTATE_MMCR0(r13)
	ld	r4, HSTATE_MMCR1(r13)
	ld	r5, HSTATE_MMCRA(r13)
	ld	r6, HSTATE_SIAR(r13)
	ld	r7, HSTATE_SDAR(r13)
140 141
	mtspr	SPRN_MMCR1, r4
	mtspr	SPRN_MMCRA, r5
142 143 144
	mtspr	SPRN_SIAR, r6
	mtspr	SPRN_SDAR, r7
BEGIN_FTR_SECTION
145 146
	ld	r8, HSTATE_MMCR2(r13)
	ld	r9, HSTATE_SIER(r13)
147 148 149
	mtspr	SPRN_MMCR2, r8
	mtspr	SPRN_SIER, r9
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
150 151 152 153
	mtspr	SPRN_MMCR0, r3
	isync
23:

154 155 156 157 158 159 160 161 162
	/*
	 * Reload DEC.  HDEC interrupts were disabled when
	 * we reloaded the host's LPCR value.
	 */
	ld	r3, HSTATE_DECEXP(r13)
	mftb	r4
	subf	r4, r4, r3
	mtspr	SPRN_DEC, r4

163 164 165 166
	/* hwthread_req may have got set by cede or no vcpu, so clear it */
	li	r0, 0
	stb	r0, HSTATE_HWTHREAD_REQ(r13)

167
	/*
168 169 170 171 172 173 174
	 * For external interrupts we need to call the Linux
	 * handler to process the interrupt. We do that by jumping
	 * to absolute address 0x500 for external interrupts.
	 * The [h]rfid at the end of the handler will return to
	 * the book3s_hv_interrupts.S code. For other interrupts
	 * we do the rfid to get back to the book3s_hv_interrupts.S
	 * code here.
175 176 177 178 179
	 */
	ld	r8, 112+PPC_LR_STKOFF(r1)
	addi	r1, r1, 112
	ld	r7, HSTATE_HOST_MSR(r13)

180 181 182
	/* Return the trap number on this thread as the return value */
	mr	r3, r12

183 184 185 186 187 188 189 190 191
	/*
	 * If we came back from the guest via a relocation-on interrupt,
	 * we will be in virtual mode at this point, which makes it a
	 * little easier to get back to the caller.
	 */
	mfmsr	r0
	andi.	r0, r0, MSR_IR		/* in real mode? */
	bne	.Lvirt_return

192
	/* RFI into the highmem handler */
193 194 195 196 197 198
	mfmsr	r6
	li	r0, MSR_RI
	andc	r6, r6, r0
	mtmsrd	r6, 1			/* Clear RI in MSR */
	mtsrr0	r8
	mtsrr1	r7
199
	RFI_TO_KERNEL
200

201
	/* Virtual-mode return */
202
.Lvirt_return:
203
	mtlr	r8
204 205
	blr

206 207
kvmppc_primary_no_guest:
	/* We handle this much like a ceded vcpu */
208
	/* put the HDEC into the DEC, since HDEC interrupts don't wake us */
209 210
	/* HDEC may be larger than DEC for arch >= v3.00, but since the */
	/* HDEC value came from DEC in the first place, it will fit */
211 212
	mfspr	r3, SPRN_HDEC
	mtspr	SPRN_DEC, r3
213 214 215 216 217 218 219 220 221 222 223 224 225
	/*
	 * Make sure the primary has finished the MMU switch.
	 * We should never get here on a secondary thread, but
	 * check it for robustness' sake.
	 */
	ld	r5, HSTATE_KVM_VCORE(r13)
65:	lbz	r0, VCORE_IN_GUEST(r5)
	cmpwi	r0, 0
	beq	65b
	/* Set LPCR. */
	ld	r8,VCORE_LPCR(r5)
	mtspr	SPRN_LPCR,r8
	isync
226 227 228 229 230 231 232 233 234 235
	/* set our bit in napping_threads */
	ld	r5, HSTATE_KVM_VCORE(r13)
	lbz	r7, HSTATE_PTID(r13)
	li	r0, 1
	sld	r0, r0, r7
	addi	r6, r5, VCORE_NAPPING_THREADS
1:	lwarx	r3, 0, r6
	or	r3, r3, r0
	stwcx.	r3, 0, r6
	bne	1b
236
	/* order napping_threads update vs testing entry_exit_map */
237 238 239 240 241 242 243 244
	isync
	li	r12, 0
	lwz	r7, VCORE_ENTRY_EXIT(r5)
	cmpwi	r7, 0x100
	bge	kvm_novcpu_exit	/* another thread already exiting */
	li	r3, NAPPING_NOVCPU
	stb	r3, HSTATE_NAPPING(r13)

245
	li	r3, 0		/* Don't wake on privileged (OS) doorbell */
246 247
	b	kvm_do_nap

248 249 250 251 252 253 254
/*
 * kvm_novcpu_wakeup
 *	Entered from kvm_start_guest if kvm_hstate.napping is set
 *	to NAPPING_NOVCPU
 *		r2 = kernel TOC
 *		r13 = paca
 */
255 256 257 258 259 260
kvm_novcpu_wakeup:
	ld	r1, HSTATE_HOST_R1(r13)
	ld	r5, HSTATE_KVM_VCORE(r13)
	li	r0, 0
	stb	r0, HSTATE_NAPPING(r13)

261 262
	/* check the wake reason */
	bl	kvmppc_check_wake_reason
263

264 265 266 267 268 269 270
	/*
	 * Restore volatile registers since we could have called
	 * a C routine in kvmppc_check_wake_reason.
	 *	r5 = VCORE
	 */
	ld	r5, HSTATE_KVM_VCORE(r13)

271 272 273 274 275 276 277 278 279 280
	/* see if any other thread is already exiting */
	lwz	r0, VCORE_ENTRY_EXIT(r5)
	cmpwi	r0, 0x100
	bge	kvm_novcpu_exit

	/* clear our bit in napping_threads */
	lbz	r7, HSTATE_PTID(r13)
	li	r0, 1
	sld	r0, r0, r7
	addi	r6, r5, VCORE_NAPPING_THREADS
281 282 283
4:	lwarx	r7, 0, r6
	andc	r7, r7, r0
	stwcx.	r7, 0, r6
284 285
	bne	4b

286
	/* See if the wake reason means we need to exit */
287 288 289
	cmpdi	r3, 0
	bge	kvm_novcpu_exit

290 291
	/* See if our timeslice has expired (HDEC is negative) */
	mfspr	r0, SPRN_HDEC
292
	EXTEND_HDEC(r0)
293
	li	r12, BOOK3S_INTERRUPT_HV_DECREMENTER
294
	cmpdi	r0, 0
295 296
	blt	kvm_novcpu_exit

297 298 299
	/* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
	ld	r4, HSTATE_KVM_VCPU(r13)
	cmpdi	r4, 0
300 301 302 303 304 305 306
	beq	kvmppc_primary_no_guest

#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
	addi	r3, r4, VCPU_TB_RMENTRY
	bl	kvmhv_start_timing
#endif
	b	kvmppc_got_guest
307 308

kvm_novcpu_exit:
309 310 311 312 313 314 315
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
	ld	r4, HSTATE_KVM_VCPU(r13)
	cmpdi	r4, 0
	beq	13f
	addi	r3, r4, VCPU_TB_RMEXIT
	bl	kvmhv_accumulate_time
#endif
316
13:	mr	r3, r12
317
	stw	r12, STACK_SLOT_TRAP(r1)
318 319
	bl	kvmhv_commence_exit
	nop
320
	b	kvmhv_switch_to_host
321

322
/*
323
 * We come in here when wakened from nap mode.
324 325
 * Relocation is off and most register values are lost.
 * r13 points to the PACA.
326
 * r3 contains the SRR1 wakeup value, SRR1 is trashed.
327 328 329
 */
	.globl	kvm_start_guest
kvm_start_guest:
330
	/* Set runlatch bit the minute you wake up from nap */
331 332 333
	mfspr	r0, SPRN_CTRLF
	ori 	r0, r0, 1
	mtspr	SPRN_CTRLT, r0
334

335 336 337 338 339 340
	/*
	 * Could avoid this and pass it through in r3. For now,
	 * code expects it to be in SRR1.
	 */
	mtspr	SPRN_SRR1,r3

341 342
	ld	r2,PACATOC(r13)

343 344 345
	li	r0,0
	stb	r0,PACA_FTRACE_ENABLED(r13)

346 347
	li	r0,KVM_HWTHREAD_IN_KVM
	stb	r0,HSTATE_HWTHREAD_STATE(r13)
348

349 350 351
	/* NV GPR values from power7_idle() will no longer be valid */
	li	r0,1
	stb	r0,PACA_NAPSTATELOST(r13)
352

353 354
	/* were we napping due to cede? */
	lbz	r0,HSTATE_NAPPING(r13)
355 356 357 358 359 360 361
	cmpwi	r0,NAPPING_CEDE
	beq	kvm_end_cede
	cmpwi	r0,NAPPING_NOVCPU
	beq	kvm_novcpu_wakeup

	ld	r1,PACAEMERGSP(r13)
	subi	r1,r1,STACK_FRAME_OVERHEAD
362 363 364 365 366 367 368

	/*
	 * We weren't napping due to cede, so this must be a secondary
	 * thread being woken up to run a guest, or being woken up due
	 * to a stray IPI.  (Or due to some machine check or hypervisor
	 * maintenance interrupt while the core is in KVM.)
	 */
369 370

	/* Check the wake reason in SRR1 to see why we got here */
371
	bl	kvmppc_check_wake_reason
372 373 374 375 376
	/*
	 * kvmppc_check_wake_reason could invoke a C routine, but we
	 * have no volatile registers to restore when we return.
	 */

377 378
	cmpdi	r3, 0
	bge	kvm_no_guest
379

380 381 382 383
	/* get vcore pointer, NULL if we have nothing to run */
	ld	r5,HSTATE_KVM_VCORE(r13)
	cmpdi	r5,0
	/* if we have no vcore to run, go back to sleep */
384
	beq	kvm_no_guest
385

386 387
kvm_secondary_got_guest:

388
	/* Set HSTATE_DSCR(r13) to something sensible */
389
	ld	r6, PACA_DSCR_DEFAULT(r13)
390
	std	r6, HSTATE_DSCR(r13)
391

392 393 394 395
	/* On thread 0 of a subcore, set HDEC to max */
	lbz	r4, HSTATE_PTID(r13)
	cmpwi	r4, 0
	bne	63f
396 397
	LOAD_REG_ADDR(r6, decrementer_max)
	ld	r6, 0(r6)
398 399 400 401 402
	mtspr	SPRN_HDEC, r6
	/* and set per-LPAR registers, if doing dynamic micro-threading */
	ld	r6, HSTATE_SPLIT_MODE(r13)
	cmpdi	r6, 0
	beq	63f
403
BEGIN_FTR_SECTION
404 405 406 407 408 409 410
	ld	r0, KVM_SPLIT_RPR(r6)
	mtspr	SPRN_RPR, r0
	ld	r0, KVM_SPLIT_PMMAR(r6)
	mtspr	SPRN_PMMAR, r0
	ld	r0, KVM_SPLIT_LDBAR(r6)
	mtspr	SPRN_LDBAR, r0
	isync
411 412 413 414
FTR_SECTION_ELSE
	/* On P9 we use the split_info for coordinating LPCR changes */
	lwz	r4, KVM_SPLIT_DO_SET(r6)
	cmpwi	r4, 0
415
	beq	1f
416 417 418
	mr	r3, r6
	bl	kvmhv_p9_set_lpcr
	nop
419
1:
420
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
421 422
63:
	/* Order load of vcpu after load of vcore */
423
	lwsync
424
	ld	r4, HSTATE_KVM_VCPU(r13)
425
	bl	kvmppc_hv_entry
426 427

	/* Back from the guest, go back to nap */
428
	/* Clear our vcpu and vcore pointers so we don't come back in early */
429
	li	r0, 0
430
	std	r0, HSTATE_KVM_VCPU(r13)
431
	/*
432
	 * Once we clear HSTATE_KVM_VCORE(r13), the code in
433 434 435
	 * kvmppc_run_core() is going to assume that all our vcpu
	 * state is visible in memory.  This lwsync makes sure
	 * that that is true.
436
	 */
437
	lwsync
438
	std	r0, HSTATE_KVM_VCORE(r13)
439

440 441 442 443 444 445 446 447 448 449 450 451
	/*
	 * All secondaries exiting guest will fall through this path.
	 * Before proceeding, just check for HMI interrupt and
	 * invoke opal hmi handler. By now we are sure that the
	 * primary thread on this core/subcore has already made partition
	 * switch/TB resync and we are good to call opal hmi handler.
	 */
	cmpwi	r12, BOOK3S_INTERRUPT_HMI
	bne	kvm_no_guest

	li	r3,0			/* NULL argument */
	bl	hmi_exception_realmode
452 453 454 455 456 457
/*
 * At this point we have finished executing in the guest.
 * We need to wait for hwthread_req to become zero, since
 * we may not turn on the MMU while hwthread_req is non-zero.
 * While waiting we also need to check if we get given a vcpu to run.
 */
458
kvm_no_guest:
459 460 461 462 463
	lbz	r3, HSTATE_HWTHREAD_REQ(r13)
	cmpwi	r3, 0
	bne	53f
	HMT_MEDIUM
	li	r0, KVM_HWTHREAD_IN_KERNEL
464
	stb	r0, HSTATE_HWTHREAD_STATE(r13)
465 466 467 468 469 470
	/* need to recheck hwthread_req after a barrier, to avoid race */
	sync
	lbz	r3, HSTATE_HWTHREAD_REQ(r13)
	cmpwi	r3, 0
	bne	54f
/*
471
 * We jump to pnv_wakeup_loss, which will return to the caller
472
 * of power7_nap in the powernv cpu offline loop.  The value we
473 474
 * put in r3 becomes the return value for power7_nap. pnv_wakeup_loss
 * requires SRR1 in r12.
475
 */
476 477 478 479
	li	r3, LPCR_PECE0
	mfspr	r4, SPRN_LPCR
	rlwimi	r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
	mtspr	SPRN_LPCR, r4
480
	li	r3, 0
481
	mfspr	r12,SPRN_SRR1
482
	b	pnv_wakeup_loss
483 484

53:	HMT_LOW
485 486 487 488 489 490
	ld	r5, HSTATE_KVM_VCORE(r13)
	cmpdi	r5, 0
	bne	60f
	ld	r3, HSTATE_SPLIT_MODE(r13)
	cmpdi	r3, 0
	beq	kvm_no_guest
491 492 493 494 495 496
	lwz	r0, KVM_SPLIT_DO_SET(r3)
	cmpwi	r0, 0
	bne	kvmhv_do_set
	lwz	r0, KVM_SPLIT_DO_RESTORE(r3)
	cmpwi	r0, 0
	bne	kvmhv_do_restore
497 498
	lbz	r0, KVM_SPLIT_DO_NAP(r3)
	cmpwi	r0, 0
499 500
	beq	kvm_no_guest
	HMT_MEDIUM
501 502
	b	kvm_unsplit_nap
60:	HMT_MEDIUM
503 504 505 506 507
	b	kvm_secondary_got_guest

54:	li	r0, KVM_HWTHREAD_IN_KVM
	stb	r0, HSTATE_HWTHREAD_STATE(r13)
	b	kvm_no_guest
508

509 510 511 512 513 514 515 516 517 518 519 520 521
kvmhv_do_set:
	/* Set LPCR, LPIDR etc. on P9 */
	HMT_MEDIUM
	bl	kvmhv_p9_set_lpcr
	nop
	b	kvm_no_guest

kvmhv_do_restore:
	HMT_MEDIUM
	bl	kvmhv_p9_restore_lpcr
	nop
	b	kvm_no_guest

522 523 524 525 526
/*
 * Here the primary thread is trying to return the core to
 * whole-core mode, so we need to nap.
 */
kvm_unsplit_nap:
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542
	/*
	 * When secondaries are napping in kvm_unsplit_nap() with
	 * hwthread_req = 1, HMI goes ignored even though subcores are
	 * already exited the guest. Hence HMI keeps waking up secondaries
	 * from nap in a loop and secondaries always go back to nap since
	 * no vcore is assigned to them. This makes impossible for primary
	 * thread to get hold of secondary threads resulting into a soft
	 * lockup in KVM path.
	 *
	 * Let us check if HMI is pending and handle it before we go to nap.
	 */
	cmpwi	r12, BOOK3S_INTERRUPT_HMI
	bne	55f
	li	r3, 0			/* NULL argument */
	bl	hmi_exception_realmode
55:
543 544 545 546 547 548 549 550
	/*
	 * Ensure that secondary doesn't nap when it has
	 * its vcore pointer set.
	 */
	sync		/* matches smp_mb() before setting split_info.do_nap */
	ld	r0, HSTATE_KVM_VCORE(r13)
	cmpdi	r0, 0
	bne	kvm_no_guest
551 552 553 554 555 556 557 558
	/* clear any pending message */
BEGIN_FTR_SECTION
	lis	r6, (PPC_DBELL_SERVER << (63-36))@h
	PPC_MSGCLR(6)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
	/* Set kvm_split_mode.napped[tid] = 1 */
	ld	r3, HSTATE_SPLIT_MODE(r13)
	li	r0, 1
559
	lbz	r4, HSTATE_TID(r13)
560 561 562 563 564 565 566 567
	addi	r4, r4, KVM_SPLIT_NAPPED
	stbx	r0, r3, r4
	/* Check the do_nap flag again after setting napped[] */
	sync
	lbz	r0, KVM_SPLIT_DO_NAP(r3)
	cmpwi	r0, 0
	beq	57f
	li	r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
568 569 570
	mfspr	r5, SPRN_LPCR
	rlwimi	r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
	b	kvm_nap_sequence
571 572 573 574 575

57:	li	r0, 0
	stbx	r0, r3, r4
	b	kvm_no_guest

576 577 578 579 580 581
/******************************************************************************
 *                                                                            *
 *                               Entry code                                   *
 *                                                                            *
 *****************************************************************************/

582 583 584 585 586
.global kvmppc_hv_entry
kvmppc_hv_entry:

	/* Required state:
	 *
587
	 * R4 = vcpu pointer (or NULL)
588 589 590
	 * MSR = ~IR|DR
	 * R13 = PACA
	 * R1 = host R1
591
	 * R2 = TOC
592
	 * all other volatile GPRS = free
593
	 * Does not preserve non-volatile GPRs or CR fields
594 595
	 */
	mflr	r0
596
	std	r0, PPC_LR_STKOFF(r1)
597
	stdu	r1, -SFS(r1)
598 599 600 601

	/* Save R1 in the PACA */
	std	r1, HSTATE_HOST_R1(r13)

602 603 604
	li	r6, KVM_GUEST_MODE_HOST_HV
	stb	r6, HSTATE_IN_GUEST(r13)

605 606 607 608 609 610 611 612
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
	/* Store initial timestamp */
	cmpdi	r4, 0
	beq	1f
	addi	r3, r4, VCPU_TB_RMENTRY
	bl	kvmhv_start_timing
1:
#endif
613 614 615 616 617 618 619

	/* Use cr7 as an indication of radix mode */
	ld	r5, HSTATE_KVM_VCORE(r13)
	ld	r9, VCORE_KVM(r5)	/* pointer to struct kvm */
	lbz	r0, KVM_RADIX(r9)
	cmpwi	cr7, r0, 0

620
	/*
621
	 * POWER7/POWER8 host -> guest partition switch code.
622 623 624
	 * We don't have to lock against concurrent tlbies,
	 * but we do have to coordinate across hardware threads.
	 */
625 626 627 628
	/* Set bit in entry map iff exit map is zero. */
	li	r7, 1
	lbz	r6, HSTATE_PTID(r13)
	sld	r7, r7, r6
629 630
	addi	r8, r5, VCORE_ENTRY_EXIT
21:	lwarx	r3, 0, r8
631
	cmpwi	r3, 0x100		/* any threads starting to exit? */
632
	bge	secondary_too_late	/* if so we're too late to the party */
633
	or	r3, r3, r7
634
	stwcx.	r3, 0, r8
635 636 637 638
	bne	21b

	/* Primary thread switches to guest partition. */
	cmpwi	r6,0
639
	bne	10f
640 641 642 643

	/* Radix has already switched LPID and flushed core TLB */
	bne	cr7, 22f

644
	lwz	r7,KVM_LPID(r9)
645 646
BEGIN_FTR_SECTION
	ld	r6,KVM_SDR1(r9)
647 648 649 650
	li	r0,LPID_RSVD		/* switch to reserved LPID */
	mtspr	SPRN_LPID,r0
	ptesync
	mtspr	SPRN_SDR1,r6		/* switch to partition page table */
651
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
652 653
	mtspr	SPRN_LPID,r7
	isync
654

655
	/* See if we need to flush the TLB. Hash has to be done in RM */
656
	lhz	r6,PACAPACAINDEX(r13)	/* test_bit(cpu, need_tlb_flush) */
657 658 659 660 661 662 663 664 665
BEGIN_FTR_SECTION
	/*
	 * On POWER9, individual threads can come in here, but the
	 * TLB is shared between the 4 threads in a core, hence
	 * invalidating on one thread invalidates for all.
	 * Thus we make all 4 threads use the same bit here.
	 */
	clrrdi	r6,r6,2
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
666 667 668 669 670
	clrldi	r7,r6,64-6		/* extract bit number (6 bits) */
	srdi	r6,r6,6			/* doubleword number */
	sldi	r6,r6,3			/* address offset */
	add	r6,r6,r9
	addi	r6,r6,KVM_NEED_FLUSH	/* dword in kvm->arch.need_tlb_flush */
671 672
	li	r8,1
	sld	r8,r8,r7
673
	ld	r7,0(r6)
674
	and.	r7,r7,r8
675
	beq	22f
676
	/* Flush the TLB of any entries for this LPID */
677 678
	lwz	r0,KVM_TLB_SETS(r9)
	mtctr	r0
679 680
	li	r7,0x800		/* IS field = 0b10 */
	ptesync
681 682
	li	r0,0			/* RS for P9 version of tlbiel */
28:	tlbiel	r7			/* On P9, rs=0, RIC=0, PRS=0, R=0 */
683 684
	addi	r7,r7,0x1000
	bdnz	28b
685
	ptesync
686 687 688 689
23:	ldarx	r7,0,r6			/* clear the bit after TLB flushed */
	andc	r7,r7,r8
	stdcx.	r7,0,r6
	bne	23b
690

691 692 693 694
	/* Add timebase offset onto timebase */
22:	ld	r8,VCORE_TB_OFFSET(r5)
	cmpdi	r8,0
	beq	37f
695
	std	r8, VCORE_TB_OFFSET_APPL(r5)
696 697 698 699 700 701 702 703 704 705 706
	mftb	r6		/* current host timebase */
	add	r8,r8,r6
	mtspr	SPRN_TBU40,r8	/* update upper 40 bits */
	mftb	r7		/* check if lower 24 bits overflowed */
	clrldi	r6,r6,40
	clrldi	r7,r7,40
	cmpld	r7,r6
	bge	37f
	addis	r8,r8,0x100	/* if so, increment upper 40 bits */
	mtspr	SPRN_TBU40,r8

707 708 709 710 711 712
	/* Load guest PCR value to select appropriate compat mode */
37:	ld	r7, VCORE_PCR(r5)
	cmpdi	r7, 0
	beq	38f
	mtspr	SPRN_PCR, r7
38:
713 714

BEGIN_FTR_SECTION
715
	/* DPDES and VTB are shared between threads */
716
	ld	r8, VCORE_DPDES(r5)
717
	ld	r7, VCORE_VTB(r5)
718
	mtspr	SPRN_DPDES, r8
719
	mtspr	SPRN_VTB, r7
720 721
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)

722 723 724 725 726
	/* Mark the subcore state as inside guest */
	bl	kvmppc_subcore_enter_guest
	nop
	ld	r5, HSTATE_KVM_VCORE(r13)
	ld	r4, HSTATE_KVM_VCPU(r13)
727
	li	r0,1
728
	stb	r0,VCORE_IN_GUEST(r5)	/* signal secondaries to continue */
729

730
	/* Do we have a guest vcpu to run? */
731
10:	cmpdi	r4, 0
732 733 734 735 736 737
	beq	kvmppc_primary_no_guest
kvmppc_got_guest:
	/* Increment yield count if they have a VPA */
	ld	r3, VCPU_VPA(r4)
	cmpdi	r3, 0
	beq	25f
738 739
	li	r6, LPPACA_YIELDCOUNT
	LWZX_BE	r5, r3, r6
740
	addi	r5, r5, 1
741
	STWX_BE	r5, r3, r6
742 743 744 745 746 747 748 749 750 751 752 753 754 755
	li	r6, 1
	stb	r6, VCPU_VPA_DIRTY(r4)
25:

	/* Save purr/spurr */
	mfspr	r5,SPRN_PURR
	mfspr	r6,SPRN_SPURR
	std	r5,HSTATE_PURR(r13)
	std	r6,HSTATE_SPURR(r13)
	ld	r7,VCPU_PURR(r4)
	ld	r8,VCPU_SPURR(r4)
	mtspr	SPRN_PURR,r7
	mtspr	SPRN_SPURR,r8

756 757 758 759
	/* Save host values of some registers */
BEGIN_FTR_SECTION
	mfspr	r5, SPRN_TIDR
	mfspr	r6, SPRN_PSSCR
760
	mfspr	r7, SPRN_PID
761
	mfspr	r8, SPRN_IAMR
762 763
	std	r5, STACK_SLOT_TID(r1)
	std	r6, STACK_SLOT_PSSCR(r1)
764
	std	r7, STACK_SLOT_PID(r1)
765
	std	r8, STACK_SLOT_IAMR(r1)
766 767
	mfspr	r5, SPRN_HFSCR
	std	r5, STACK_SLOT_HFSCR(r1)
768
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
769 770 771 772 773 774 775 776
BEGIN_FTR_SECTION
	mfspr	r5, SPRN_CIABR
	mfspr	r6, SPRN_DAWR
	mfspr	r7, SPRN_DAWRX
	std	r5, STACK_SLOT_CIABR(r1)
	std	r6, STACK_SLOT_DAWR(r1)
	std	r7, STACK_SLOT_DAWRX(r1)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
777

778 779 780
BEGIN_FTR_SECTION
	/* Set partition DABR */
	/* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
781
	lwz	r5,VCPU_DABRX(r4)
782 783 784 785 786 787
	ld	r6,VCPU_DABR(r4)
	mtspr	SPRN_DABRX,r5
	mtspr	SPRN_DABR,r6
	isync
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)

788
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
789 790 791 792
/*
 * Branch around the call if both CPU_FTR_TM and
 * CPU_FTR_P9_TM_HV_ASSIST are off.
 */
793
BEGIN_FTR_SECTION
794 795
	b	91f
END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
796 797 798
	/*
	 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
	 */
799 800
	mr      r3, r4
	ld      r4, VCPU_MSR(r3)
801
	bl	kvmppc_restore_tm_hv
802
	ld	r4, HSTATE_KVM_VCPU(r13)
803
91:
804 805
#endif

806 807 808 809 810 811
	/* Load guest PMU registers */
	/* R4 is live here (vcpu pointer) */
	li	r3, 1
	sldi	r3, r3, 31		/* MMCR0_FC (freeze counters) bit */
	mtspr	SPRN_MMCR0, r3		/* freeze all counters, disable ints */
	isync
812 813 814 815 816 817
BEGIN_FTR_SECTION
	ld	r3, VCPU_MMCR(r4)
	andi.	r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
	cmpwi	r5, MMCR0_PMAO
	beql	kvmppc_fix_pmao
END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838
	lwz	r3, VCPU_PMC(r4)	/* always load up guest PMU registers */
	lwz	r5, VCPU_PMC + 4(r4)	/* to prevent information leak */
	lwz	r6, VCPU_PMC + 8(r4)
	lwz	r7, VCPU_PMC + 12(r4)
	lwz	r8, VCPU_PMC + 16(r4)
	lwz	r9, VCPU_PMC + 20(r4)
	mtspr	SPRN_PMC1, r3
	mtspr	SPRN_PMC2, r5
	mtspr	SPRN_PMC3, r6
	mtspr	SPRN_PMC4, r7
	mtspr	SPRN_PMC5, r8
	mtspr	SPRN_PMC6, r9
	ld	r3, VCPU_MMCR(r4)
	ld	r5, VCPU_MMCR + 8(r4)
	ld	r6, VCPU_MMCR + 16(r4)
	ld	r7, VCPU_SIAR(r4)
	ld	r8, VCPU_SDAR(r4)
	mtspr	SPRN_MMCR1, r5
	mtspr	SPRN_MMCRA, r6
	mtspr	SPRN_SIAR, r7
	mtspr	SPRN_SDAR, r8
839 840 841
BEGIN_FTR_SECTION
	ld	r5, VCPU_MMCR + 24(r4)
	ld	r6, VCPU_SIER(r4)
842 843 844
	mtspr	SPRN_MMCR2, r5
	mtspr	SPRN_SIER, r6
BEGIN_FTR_SECTION_NESTED(96)
845 846 847 848 849 850
	lwz	r7, VCPU_PMC + 24(r4)
	lwz	r8, VCPU_PMC + 28(r4)
	ld	r9, VCPU_MMCR + 32(r4)
	mtspr	SPRN_SPMC1, r7
	mtspr	SPRN_SPMC2, r8
	mtspr	SPRN_MMCRS, r9
851
END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
852
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881
	mtspr	SPRN_MMCR0, r3
	isync

	/* Load up FP, VMX and VSX registers */
	bl	kvmppc_load_fp

	ld	r14, VCPU_GPR(R14)(r4)
	ld	r15, VCPU_GPR(R15)(r4)
	ld	r16, VCPU_GPR(R16)(r4)
	ld	r17, VCPU_GPR(R17)(r4)
	ld	r18, VCPU_GPR(R18)(r4)
	ld	r19, VCPU_GPR(R19)(r4)
	ld	r20, VCPU_GPR(R20)(r4)
	ld	r21, VCPU_GPR(R21)(r4)
	ld	r22, VCPU_GPR(R22)(r4)
	ld	r23, VCPU_GPR(R23)(r4)
	ld	r24, VCPU_GPR(R24)(r4)
	ld	r25, VCPU_GPR(R25)(r4)
	ld	r26, VCPU_GPR(R26)(r4)
	ld	r27, VCPU_GPR(R27)(r4)
	ld	r28, VCPU_GPR(R28)(r4)
	ld	r29, VCPU_GPR(R29)(r4)
	ld	r30, VCPU_GPR(R30)(r4)
	ld	r31, VCPU_GPR(R31)(r4)

	/* Switch DSCR to guest value */
	ld	r5, VCPU_DSCR(r4)
	mtspr	SPRN_DSCR, r5

882
BEGIN_FTR_SECTION
883
	/* Skip next section on POWER7 */
884 885 886 887 888 889 890 891 892 893 894 895 896
	b	8f
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
	/* Load up POWER8-specific registers */
	ld	r5, VCPU_IAMR(r4)
	lwz	r6, VCPU_PSPB(r4)
	ld	r7, VCPU_FSCR(r4)
	mtspr	SPRN_IAMR, r5
	mtspr	SPRN_PSPB, r6
	mtspr	SPRN_FSCR, r7
	ld	r5, VCPU_DAWR(r4)
	ld	r6, VCPU_DAWRX(r4)
	ld	r7, VCPU_CIABR(r4)
	ld	r8, VCPU_TAR(r4)
897 898 899 900 901
	/*
	 * Handle broken DAWR case by not writing it. This means we
	 * can still store the DAWR register for migration.
	 */
BEGIN_FTR_SECTION
902 903
	mtspr	SPRN_DAWR, r5
	mtspr	SPRN_DAWRX, r6
904
END_FTR_SECTION_IFSET(CPU_FTR_DAWR)
905 906 907
	mtspr	SPRN_CIABR, r7
	mtspr	SPRN_TAR, r8
	ld	r5, VCPU_IC(r4)
908
	ld	r8, VCPU_EBBHR(r4)
909
	mtspr	SPRN_IC, r5
910 911 912
	mtspr	SPRN_EBBHR, r8
	ld	r5, VCPU_EBBRR(r4)
	ld	r6, VCPU_BESCR(r4)
913 914
	lwz	r7, VCPU_GUEST_PID(r4)
	ld	r8, VCPU_WORT(r4)
915 916
	mtspr	SPRN_EBBRR, r5
	mtspr	SPRN_BESCR, r6
917 918 919
	mtspr	SPRN_PID, r7
	mtspr	SPRN_WORT, r8
BEGIN_FTR_SECTION
920
	/* POWER8-only registers */
921 922
	ld	r5, VCPU_TCSCR(r4)
	ld	r6, VCPU_ACOP(r4)
923 924
	ld	r7, VCPU_CSIGR(r4)
	ld	r8, VCPU_TACR(r4)
925 926
	mtspr	SPRN_TCSCR, r5
	mtspr	SPRN_ACOP, r6
927 928
	mtspr	SPRN_CSIGR, r7
	mtspr	SPRN_TACR, r8
929
	nop
930 931 932 933
FTR_SECTION_ELSE
	/* POWER9-only registers */
	ld	r5, VCPU_TID(r4)
	ld	r6, VCPU_PSSCR(r4)
934
	lbz	r8, HSTATE_FAKE_SUSPEND(r13)
935
	oris	r6, r6, PSSCR_EC@h	/* This makes stop trap to HV */
936
	rldimi	r6, r8, PSSCR_FAKE_SUSPEND_LG, 63 - PSSCR_FAKE_SUSPEND_LG
937
	ld	r7, VCPU_HFSCR(r4)
938 939
	mtspr	SPRN_TIDR, r5
	mtspr	SPRN_PSSCR, r6
940
	mtspr	SPRN_HFSCR, r7
941
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
942 943
8:

944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965
	ld	r5, VCPU_SPRG0(r4)
	ld	r6, VCPU_SPRG1(r4)
	ld	r7, VCPU_SPRG2(r4)
	ld	r8, VCPU_SPRG3(r4)
	mtspr	SPRN_SPRG0, r5
	mtspr	SPRN_SPRG1, r6
	mtspr	SPRN_SPRG2, r7
	mtspr	SPRN_SPRG3, r8

	/* Load up DAR and DSISR */
	ld	r5, VCPU_DAR(r4)
	lwz	r6, VCPU_DSISR(r4)
	mtspr	SPRN_DAR, r5
	mtspr	SPRN_DSISR, r6

	/* Restore AMR and UAMOR, set AMOR to all 1s */
	ld	r5,VCPU_AMR(r4)
	ld	r6,VCPU_UAMOR(r4)
	li	r7,-1
	mtspr	SPRN_AMR,r5
	mtspr	SPRN_UAMOR,r6
	mtspr	SPRN_AMOR,r7
966 967 968 969 970 971 972 973 974

	/* Restore state of CTRL run bit; assume 1 on entry */
	lwz	r5,VCPU_CTRL(r4)
	andi.	r5,r5,1
	bne	4f
	mfspr	r6,SPRN_CTRLF
	clrrdi	r6,r6,1
	mtspr	SPRN_CTRLT,r6
4:
975 976 977 978 979 980 981 982 983
	/* Secondary threads wait for primary to have done partition switch */
	ld	r5, HSTATE_KVM_VCORE(r13)
	lbz	r6, HSTATE_PTID(r13)
	cmpwi	r6, 0
	beq	21f
	lbz	r0, VCORE_IN_GUEST(r5)
	cmpwi	r0, 0
	bne	21f
	HMT_LOW
984 985 986 987
20:	lwz	r3, VCORE_ENTRY_EXIT(r5)
	cmpwi	r3, 0x100
	bge	no_switch_exit
	lbz	r0, VCORE_IN_GUEST(r5)
988 989 990 991 992 993 994 995 996
	cmpwi	r0, 0
	beq	20b
	HMT_MEDIUM
21:
	/* Set LPCR. */
	ld	r8,VCORE_LPCR(r5)
	mtspr	SPRN_LPCR,r8
	isync

997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008
	/*
	 * Set the decrementer to the guest decrementer.
	 */
	ld	r8,VCPU_DEC_EXPIRES(r4)
	/* r8 is a host timebase value here, convert to guest TB */
	ld	r5,HSTATE_KVM_VCORE(r13)
	ld	r6,VCORE_TB_OFFSET_APPL(r5)
	add	r8,r8,r6
	mftb	r7
	subf	r3,r7,r8
	mtspr	SPRN_DEC,r3

1009 1010
	/* Check if HDEC expires soon */
	mfspr	r3, SPRN_HDEC
1011 1012
	EXTEND_HDEC(r3)
	cmpdi	r3, 512		/* 1 microsecond */
1013 1014
	blt	hdec_soon

1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037
	/* For hash guest, clear out and reload the SLB */
	ld	r6, VCPU_KVM(r4)
	lbz	r0, KVM_RADIX(r6)
	cmpwi	r0, 0
	bne	9f
	li	r6, 0
	slbmte	r6, r6
	slbia
	ptesync

	/* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
	lwz	r5,VCPU_SLB_MAX(r4)
	cmpwi	r5,0
	beq	9f
	mtctr	r5
	addi	r6,r4,VCPU_SLB
1:	ld	r8,VCPU_SLB_E(r6)
	ld	r9,VCPU_SLB_V(r6)
	slbmte	r9,r8
	addi	r6,r6,VCPU_SLB_SIZE
	bdnz	1b
9:

1038 1039 1040
#ifdef CONFIG_KVM_XICS
	/* We are entering the guest on that thread, push VCPU to XIVE */
	ld	r10, HSTATE_XIVE_TIMA_PHYS(r13)
1041
	cmpldi	cr0, r10, 0
1042 1043 1044 1045
	beq	no_xive
	ld	r11, VCPU_XIVE_SAVED_STATE(r4)
	li	r9, TM_QW1_OS
	eieio
1046
	stdcix	r11,r9,r10
1047 1048 1049 1050
	lwz	r11, VCPU_XIVE_CAM_WORD(r4)
	li	r9, TM_QW1_OS + TM_WORD2
	stwcix	r11,r9,r10
	li	r9, 1
1051
	stb	r9, VCPU_XIVE_PUSHED(r4)
1052
	eieio
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062

	/*
	 * We clear the irq_pending flag. There is a small chance of a
	 * race vs. the escalation interrupt happening on another
	 * processor setting it again, but the only consequence is to
	 * cause a spurrious wakeup on the next H_CEDE which is not an
	 * issue.
	 */
	li	r0,0
	stb	r0, VCPU_IRQ_PENDING(r4)
1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097

	/*
	 * In single escalation mode, if the escalation interrupt is
	 * on, we mask it.
	 */
	lbz	r0, VCPU_XIVE_ESC_ON(r4)
	cmpwi	r0,0
	beq	1f
	ld	r10, VCPU_XIVE_ESC_RADDR(r4)
	li	r9, XIVE_ESB_SET_PQ_01
	ldcix	r0, r10, r9
	sync

	/* We have a possible subtle race here: The escalation interrupt might
	 * have fired and be on its way to the host queue while we mask it,
	 * and if we unmask it early enough (re-cede right away), there is
	 * a theorical possibility that it fires again, thus landing in the
	 * target queue more than once which is a big no-no.
	 *
	 * Fortunately, solving this is rather easy. If the above load setting
	 * PQ to 01 returns a previous value where P is set, then we know the
	 * escalation interrupt is somewhere on its way to the host. In that
	 * case we simply don't clear the xive_esc_on flag below. It will be
	 * eventually cleared by the handler for the escalation interrupt.
	 *
	 * Then, when doing a cede, we check that flag again before re-enabling
	 * the escalation interrupt, and if set, we abort the cede.
	 */
	andi.	r0, r0, XIVE_ESB_VAL_P
	bne-	1f

	/* Now P is 0, we can clear the flag */
	li	r0, 0
	stb	r0, VCPU_XIVE_ESC_ON(r4)
1:
1098 1099 1100
no_xive:
#endif /* CONFIG_KVM_XICS */

1101
deliver_guest_interrupt:
1102
	ld	r6, VCPU_CTR(r4)
1103
	ld	r7, VCPU_XER(r4)
1104 1105 1106 1107

	mtctr	r6
	mtxer	r7

1108
kvmppc_cede_reentry:		/* r4 = vcpu, r13 = paca */
1109 1110
	ld	r10, VCPU_PC(r4)
	ld	r11, VCPU_MSR(r4)
1111 1112
	ld	r6, VCPU_SRR0(r4)
	ld	r7, VCPU_SRR1(r4)
1113 1114
	mtspr	SPRN_SRR0, r6
	mtspr	SPRN_SRR1, r7
1115

1116
	/* r11 = vcpu->arch.msr & ~MSR_HV */
1117 1118 1119 1120
	rldicl	r11, r11, 63 - MSR_HV_LG, 1
	rotldi	r11, r11, 1 + MSR_HV_LG
	ori	r11, r11, MSR_ME

1121
	/* Check if we can deliver an external or decrementer interrupt now */
1122 1123 1124 1125 1126 1127 1128 1129
	ld	r0, VCPU_PENDING_EXC(r4)
	rldicl	r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
	cmpdi	cr1, r0, 0
	andi.	r8, r11, MSR_EE
	mfspr	r8, SPRN_LPCR
	/* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
	rldimi	r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
	mtspr	SPRN_LPCR, r8
1130 1131
	isync
	beq	5f
1132 1133 1134
	li	r0, BOOK3S_INTERRUPT_EXTERNAL
	bne	cr1, 12f
	mfspr	r0, SPRN_DEC
1135 1136 1137 1138 1139 1140 1141
BEGIN_FTR_SECTION
	/* On POWER9 check whether the guest has large decrementer enabled */
	andis.	r8, r8, LPCR_LD@h
	bne	15f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
	extsw	r0, r0
15:	cmpdi	r0, 0
1142 1143
	li	r0, BOOK3S_INTERRUPT_DECREMENTER
	bge	5f
1144

1145
12:	mtspr	SPRN_SRR0, r10
1146
	mr	r10,r0
1147
	mtspr	SPRN_SRR1, r11
1148 1149
	mr	r9, r4
	bl	kvmppc_msr_interrupt
1150
5:
1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
BEGIN_FTR_SECTION
	b	fast_guest_return
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
	/* On POWER9, check for pending doorbell requests */
	lbz	r0, VCPU_DBELL_REQ(r4)
	cmpwi	r0, 0
	beq	fast_guest_return
	ld	r5, HSTATE_KVM_VCORE(r13)
	/* Set DPDES register so the CPU will take a doorbell interrupt */
	li	r0, 1
	mtspr	SPRN_DPDES, r0
	std	r0, VCORE_DPDES(r5)
	/* Make sure other cpus see vcore->dpdes set before dbell req clear */
	lwsync
	/* Clear the pending doorbell request */
	li	r0, 0
	stb	r0, VCPU_DBELL_REQ(r4)
1168

1169 1170 1171 1172 1173 1174 1175
/*
 * Required state:
 * R4 = vcpu
 * R10: value for HSRR0
 * R11: value for HSRR1
 * R13 = PACA
 */
1176
fast_guest_return:
1177 1178
	li	r0,0
	stb	r0,VCPU_CEDED(r4)	/* cancel cede */
1179 1180 1181 1182
	mtspr	SPRN_HSRR0,r10
	mtspr	SPRN_HSRR1,r11

	/* Activate guest mode, so faults get handled by KVM */
1183
	li	r9, KVM_GUEST_MODE_GUEST_HV
1184 1185
	stb	r9, HSTATE_IN_GUEST(r13)

1186 1187 1188 1189 1190 1191
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
	/* Accumulate timing */
	addi	r3, r4, VCPU_TB_GUEST
	bl	kvmhv_accumulate_time
#endif

1192 1193
	/* Enter guest */

1194 1195 1196 1197
BEGIN_FTR_SECTION
	ld	r5, VCPU_CFAR(r4)
	mtspr	SPRN_CFAR, r5
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1198 1199 1200
BEGIN_FTR_SECTION
	ld	r0, VCPU_PPR(r4)
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1201

1202 1203 1204 1205 1206
	ld	r5, VCPU_LR(r4)
	lwz	r6, VCPU_CR(r4)
	mtlr	r5
	mtcr	r6

1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
	ld	r1, VCPU_GPR(R1)(r4)
	ld	r2, VCPU_GPR(R2)(r4)
	ld	r3, VCPU_GPR(R3)(r4)
	ld	r5, VCPU_GPR(R5)(r4)
	ld	r6, VCPU_GPR(R6)(r4)
	ld	r7, VCPU_GPR(R7)(r4)
	ld	r8, VCPU_GPR(R8)(r4)
	ld	r9, VCPU_GPR(R9)(r4)
	ld	r10, VCPU_GPR(R10)(r4)
	ld	r11, VCPU_GPR(R11)(r4)
	ld	r12, VCPU_GPR(R12)(r4)
	ld	r13, VCPU_GPR(R13)(r4)

1220 1221 1222
BEGIN_FTR_SECTION
	mtspr	SPRN_PPR, r0
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1223 1224 1225 1226 1227 1228 1229

/* Move canary into DSISR to check for later */
BEGIN_FTR_SECTION
	li	r0, 0x7fff
	mtspr	SPRN_HDSISR, r0
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)

1230
	ld	r0, VCPU_GPR(R0)(r4)
1231
	ld	r4, VCPU_GPR(R4)(r4)
1232
	HRFI_TO_GUEST
1233 1234
	b	.

1235
secondary_too_late:
1236
	li	r12, 0
1237
	stw	r12, STACK_SLOT_TRAP(r1)
1238 1239
	cmpdi	r4, 0
	beq	11f
1240 1241
	stw	r12, VCPU_TRAP(r4)
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1242 1243
	addi	r3, r4, VCPU_TB_RMEXIT
	bl	kvmhv_accumulate_time
1244
#endif
1245 1246
11:	b	kvmhv_switch_to_host

1247 1248 1249 1250
no_switch_exit:
	HMT_MEDIUM
	li	r12, 0
	b	12f
1251
hdec_soon:
1252
	li	r12, BOOK3S_INTERRUPT_HV_DECREMENTER
1253
12:	stw	r12, VCPU_TRAP(r4)
1254 1255
	mr	r9, r4
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1256 1257 1258
	addi	r3, r4, VCPU_TB_RMEXIT
	bl	kvmhv_accumulate_time
#endif
1259
	b	guest_bypass
1260

1261 1262 1263 1264 1265 1266 1267 1268 1269
/******************************************************************************
 *                                                                            *
 *                               Exit code                                    *
 *                                                                            *
 *****************************************************************************/

/*
 * We come here from the first-level interrupt handlers.
 */
1270 1271
	.globl	kvmppc_interrupt_hv
kvmppc_interrupt_hv:
1272 1273
	/*
	 * Register contents:
1274
	 * R12		= (guest CR << 32) | interrupt vector
1275
	 * R13		= PACA
1276
	 * guest R12 saved in shadow VCPU SCRATCH0
1277
	 * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE
1278 1279
	 * guest R13 saved in SPRN_SCRATCH0
	 */
1280
	std	r9, HSTATE_SCRATCH2(r13)
1281 1282 1283
	lbz	r9, HSTATE_IN_GUEST(r13)
	cmpwi	r9, KVM_GUEST_MODE_HOST_HV
	beq	kvmppc_bad_host_intr
1284 1285
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
	cmpwi	r9, KVM_GUEST_MODE_GUEST
1286
	ld	r9, HSTATE_SCRATCH2(r13)
1287 1288
	beq	kvmppc_interrupt_pr
#endif
1289 1290 1291 1292
	/* We're now back in the host but in guest MMU context */
	li	r9, KVM_GUEST_MODE_HOST_HV
	stb	r9, HSTATE_IN_GUEST(r13)

1293 1294 1295 1296
	ld	r9, HSTATE_KVM_VCPU(r13)

	/* Save registers */

1297 1298 1299 1300 1301 1302 1303 1304 1305
	std	r0, VCPU_GPR(R0)(r9)
	std	r1, VCPU_GPR(R1)(r9)
	std	r2, VCPU_GPR(R2)(r9)
	std	r3, VCPU_GPR(R3)(r9)
	std	r4, VCPU_GPR(R4)(r9)
	std	r5, VCPU_GPR(R5)(r9)
	std	r6, VCPU_GPR(R6)(r9)
	std	r7, VCPU_GPR(R7)(r9)
	std	r8, VCPU_GPR(R8)(r9)
1306
	ld	r0, HSTATE_SCRATCH2(r13)
1307 1308 1309
	std	r0, VCPU_GPR(R9)(r9)
	std	r10, VCPU_GPR(R10)(r9)
	std	r11, VCPU_GPR(R11)(r9)
1310
	ld	r3, HSTATE_SCRATCH0(r13)
1311
	std	r3, VCPU_GPR(R12)(r9)
1312 1313
	/* CR is in the high half of r12 */
	srdi	r4, r12, 32
1314
	stw	r4, VCPU_CR(r9)
1315 1316 1317 1318
BEGIN_FTR_SECTION
	ld	r3, HSTATE_CFAR(r13)
	std	r3, VCPU_CFAR(r9)
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1319 1320 1321 1322
BEGIN_FTR_SECTION
	ld	r4, HSTATE_PPR(r13)
	std	r4, VCPU_PPR(r9)
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1323 1324 1325 1326 1327 1328 1329 1330 1331

	/* Restore R1/R2 so we can handle faults */
	ld	r1, HSTATE_HOST_R1(r13)
	ld	r2, PACATOC(r13)

	mfspr	r10, SPRN_SRR0
	mfspr	r11, SPRN_SRR1
	std	r10, VCPU_SRR0(r9)
	std	r11, VCPU_SRR1(r9)
1332 1333
	/* trap is in the low half of r12, clear CR from the high half */
	clrldi	r12, r12, 32
1334 1335 1336 1337 1338 1339 1340 1341 1342 1343
	andi.	r0, r12, 2		/* need to read HSRR0/1? */
	beq	1f
	mfspr	r10, SPRN_HSRR0
	mfspr	r11, SPRN_HSRR1
	clrrdi	r12, r12, 2
1:	std	r10, VCPU_PC(r9)
	std	r11, VCPU_MSR(r9)

	GET_SCRATCH0(r3)
	mflr	r4
1344
	std	r3, VCPU_GPR(R13)(r9)
1345 1346 1347 1348
	std	r4, VCPU_LR(r9)

	stw	r12,VCPU_TRAP(r9)

1349 1350 1351 1352 1353 1354 1355 1356 1357
	/*
	 * Now that we have saved away SRR0/1 and HSRR0/1,
	 * interrupts are recoverable in principle, so set MSR_RI.
	 * This becomes important for relocation-on interrupts from
	 * the guest, which we can get in radix mode on POWER9.
	 */
	li	r0, MSR_RI
	mtmsrd	r0, 1

1358 1359 1360 1361 1362 1363 1364 1365 1366 1367
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
	addi	r3, r9, VCPU_TB_RMINTR
	mr	r4, r9
	bl	kvmhv_accumulate_time
	ld	r5, VCPU_GPR(R5)(r9)
	ld	r6, VCPU_GPR(R6)(r9)
	ld	r7, VCPU_GPR(R7)(r9)
	ld	r8, VCPU_GPR(R8)(r9)
#endif

1368
	/* Save HEIR (HV emulation assist reg) in emul_inst
1369 1370
	   if this is an HEI (HV emulation interrupt, e40) */
	li	r3,KVM_INST_FETCH_FAILED
1371
	stw	r3,VCPU_LAST_INST(r9)
1372 1373 1374
	cmpwi	r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
	bne	11f
	mfspr	r3,SPRN_HEIR
1375
11:	stw	r3,VCPU_HEIR(r9)
1376 1377

	/* these are volatile across C function calls */
1378 1379 1380 1381
#ifdef CONFIG_RELOCATABLE
	ld	r3, HSTATE_SCRATCH1(r13)
	mtctr	r3
#else
1382
	mfctr	r3
1383
#endif
1384 1385
	mfxer	r4
	std	r3, VCPU_CTR(r9)
1386
	std	r4, VCPU_XER(r9)
1387

1388 1389 1390 1391 1392 1393
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
	/* For softpatch interrupt, go off and do TM instruction emulation */
	cmpwi	r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
	beq	kvmppc_tm_emul
#endif

1394 1395 1396
	/* If this is a page table miss then see if it's theirs or ours */
	cmpwi	r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
	beq	kvmppc_hdsi
1397 1398
	cmpwi	r12, BOOK3S_INTERRUPT_H_INST_STORAGE
	beq	kvmppc_hisi
1399

1400 1401 1402 1403
	/* See if this is a leftover HDEC interrupt */
	cmpwi	r12,BOOK3S_INTERRUPT_HV_DECREMENTER
	bne	2f
	mfspr	r3,SPRN_HDEC
1404 1405
	EXTEND_HDEC(r3)
	cmpdi	r3,0
1406 1407
	mr	r4,r9
	bge	fast_guest_return
1408
2:
1409
	/* See if this is an hcall we can handle in real mode */
1410 1411
	cmpwi	r12,BOOK3S_INTERRUPT_SYSCALL
	beq	hcall_try_real_mode
1412

1413 1414 1415
	/* Hypervisor doorbell - exit only if host IPI flag set */
	cmpwi	r12, BOOK3S_INTERRUPT_H_DOORBELL
	bne	3f
1416 1417
BEGIN_FTR_SECTION
	PPC_MSGSYNC
1418
	lwsync
1419
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1420
	lbz	r0, HSTATE_HOST_IPI(r13)
1421
	cmpwi	r0, 0
1422 1423 1424
	beq	4f
	b	guest_exit_cont
3:
1425 1426 1427 1428 1429 1430 1431
	/* If it's a hypervisor facility unavailable interrupt, save HFSCR */
	cmpwi	r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL
	bne	14f
	mfspr	r3, SPRN_HFSCR
	std	r3, VCPU_HFSCR(r9)
	b	guest_exit_cont
14:
1432 1433
	/* External interrupt ? */
	cmpwi	r12, BOOK3S_INTERRUPT_EXTERNAL
1434
	bne+	guest_exit_cont
1435 1436 1437 1438

	/* External interrupt, first check for host_ipi. If this is
	 * set, we know the host wants us out so let's do it now
	 */
1439
	bl	kvmppc_read_intr
1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454

	/*
	 * Restore the active volatile registers after returning from
	 * a C function.
	 */
	ld	r9, HSTATE_KVM_VCPU(r13)
	li	r12, BOOK3S_INTERRUPT_EXTERNAL

	/*
	 * kvmppc_read_intr return codes:
	 *
	 * Exit to host (r3 > 0)
	 *   1 An interrupt is pending that needs to be handled by the host
	 *     Exit guest and return to host by branching to guest_exit_cont
	 *
1455 1456 1457 1458 1459
	 *   2 Passthrough that needs completion in the host
	 *     Exit guest and return to host by branching to guest_exit_cont
	 *     However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
	 *     to indicate to the host to complete handling the interrupt
	 *
1460 1461 1462 1463 1464 1465 1466 1467 1468
	 * Before returning to guest, we check if any CPU is heading out
	 * to the host and if so, we head out also. If no CPUs are heading
	 * check return values <= 0.
	 *
	 * Return to guest (r3 <= 0)
	 *  0 No external interrupt is pending
	 * -1 A guest wakeup IPI (which has now been cleared)
	 *    In either case, we return to guest to deliver any pending
	 *    guest interrupts.
1469 1470 1471 1472
	 *
	 * -2 A PCI passthrough external interrupt was handled
	 *    (interrupt was delivered directly to guest)
	 *    Return to guest to deliver any pending guest interrupts.
1473 1474
	 */

1475 1476 1477 1478 1479 1480 1481 1482 1483
	cmpdi	r3, 1
	ble	1f

	/* Return code = 2 */
	li	r12, BOOK3S_INTERRUPT_HV_RM_HARD
	stw	r12, VCPU_TRAP(r9)
	b	guest_exit_cont

1:	/* Return code <= 1 */
1484
	cmpdi	r3, 0
1485
	bgt	guest_exit_cont
1486

1487
	/* Return code <= 0 */
1488
4:	ld	r5, HSTATE_KVM_VCORE(r13)
1489 1490
	lwz	r0, VCORE_ENTRY_EXIT(r5)
	cmpwi	r0, 0x100
1491
	mr	r4, r9
1492
	blt	deliver_guest_interrupt
1493

1494
guest_exit_cont:		/* r9 = vcpu, r12 = trap, r13 = paca */
1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514
	/* Save more register state  */
	mfdar	r6
	mfdsisr	r7
	std	r6, VCPU_DAR(r9)
	stw	r7, VCPU_DSISR(r9)
	/* don't overwrite fault_dar/fault_dsisr if HDSI */
	cmpwi	r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
	beq	mc_cont
	std	r6, VCPU_FAULT_DAR(r9)
	stw	r7, VCPU_FAULT_DSISR(r9)

	/* See if it is a machine check */
	cmpwi	r12, BOOK3S_INTERRUPT_MACHINE_CHECK
	beq	machine_check_realmode
mc_cont:
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
	addi	r3, r9, VCPU_TB_RMEXIT
	mr	r4, r9
	bl	kvmhv_accumulate_time
#endif
1515 1516
#ifdef CONFIG_KVM_XICS
	/* We are exiting, pull the VP from the XIVE */
1517
	lbz	r0, VCPU_XIVE_PUSHED(r9)
1518 1519 1520 1521 1522
	cmpwi	cr0, r0, 0
	beq	1f
	li	r7, TM_SPC_PULL_OS_CTX
	li	r6, TM_QW1_OS
	mfmsr	r0
1523
	andi.	r0, r0, MSR_DR		/* in real mode? */
1524 1525 1526 1527 1528 1529
	beq	2f
	ld	r10, HSTATE_XIVE_TIMA_VIRT(r13)
	cmpldi	cr0, r10, 0
	beq	1f
	/* First load to pull the context, we ignore the value */
	eieio
1530
	lwzx	r11, r7, r10
1531 1532 1533 1534 1535 1536 1537 1538
	/* Second load to recover the context state (Words 0 and 1) */
	ldx	r11, r6, r10
	b	3f
2:	ld	r10, HSTATE_XIVE_TIMA_PHYS(r13)
	cmpldi	cr0, r10, 0
	beq	1f
	/* First load to pull the context, we ignore the value */
	eieio
1539
	lwzcix	r11, r7, r10
1540 1541 1542 1543 1544 1545
	/* Second load to recover the context state (Words 0 and 1) */
	ldcix	r11, r6, r10
3:	std	r11, VCPU_XIVE_SAVED_STATE(r9)
	/* Fixup some of the state for the next load */
	li	r10, 0
	li	r0, 0xff
1546
	stb	r10, VCPU_XIVE_PUSHED(r9)
1547 1548
	stb	r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
	stb	r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
1549
	eieio
1550 1551
1:
#endif /* CONFIG_KVM_XICS */
1552

1553
	/* For hash guest, read the guest SLB and save it away */
1554 1555 1556
	ld	r5, VCPU_KVM(r9)
	lbz	r0, KVM_RADIX(r5)
	li	r5, 0
1557 1558
	cmpwi	r0, 0
	bne	3f			/* for radix, save 0 entries */
1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573
	lwz	r0,VCPU_SLB_NR(r9)	/* number of entries in SLB */
	mtctr	r0
	li	r6,0
	addi	r7,r9,VCPU_SLB
1:	slbmfee	r8,r6
	andis.	r0,r8,SLB_ESID_V@h
	beq	2f
	add	r8,r8,r6		/* put index in */
	slbmfev	r3,r6
	std	r8,VCPU_SLB_E(r7)
	std	r3,VCPU_SLB_V(r7)
	addi	r7,r7,VCPU_SLB_SIZE
	addi	r5,r5,1
2:	addi	r6,r6,1
	bdnz	1b
1574 1575 1576 1577 1578
	/* Finally clear out the SLB */
	li	r0,0
	slbmte	r0,r0
	slbia
	ptesync
1579
3:	stw	r5,VCPU_SLB_MAX(r9)
1580

1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598
	/* load host SLB entries */
BEGIN_MMU_FTR_SECTION
	b	0f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
	ld	r8,PACA_SLBSHADOWPTR(r13)

	.rept	SLB_NUM_BOLTED
	li	r3, SLBSHADOW_SAVEAREA
	LDX_BE	r5, r8, r3
	addi	r3, r3, 8
	LDX_BE	r6, r8, r3
	andis.	r7,r5,SLB_ESID_V@h
	beq	1f
	slbmte	r6,r5
1:	addi	r8,r8,16
	.endr
0:

1599
guest_bypass:
1600
	stw	r12, STACK_SLOT_TRAP(r1)
1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619

	/* Save DEC */
	/* Do this before kvmhv_commence_exit so we know TB is guest TB */
	ld	r3, HSTATE_KVM_VCORE(r13)
	mfspr	r5,SPRN_DEC
	mftb	r6
	/* On P9, if the guest has large decr enabled, don't sign extend */
BEGIN_FTR_SECTION
	ld	r4, VCORE_LPCR(r3)
	andis.	r4, r4, LPCR_LD@h
	bne	16f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
	extsw	r5,r5
16:	add	r5,r5,r6
	/* r5 is a guest timebase value here, convert to host TB */
	ld	r4,VCORE_TB_OFFSET_APPL(r3)
	subf	r5,r4,r5
	std	r5,VCPU_DEC_EXPIRES(r9)

1620
	/* Increment exit count, poke other threads to exit */
1621
	mr 	r3, r12
1622
	bl	kvmhv_commence_exit
1623 1624
	nop
	ld	r9, HSTATE_KVM_VCPU(r13)
1625

1626 1627 1628 1629 1630
	/* Stop others sending VCPU interrupts to this physical CPU */
	li	r0, -1
	stw	r0, VCPU_CPU(r9)
	stw	r0, VCPU_THREAD_CPU(r9)

1631
	/* Save guest CTRL register, set runlatch to 1 */
1632
	mfspr	r6,SPRN_CTRLF
1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661
	stw	r6,VCPU_CTRL(r9)
	andi.	r0,r6,1
	bne	4f
	ori	r6,r6,1
	mtspr	SPRN_CTRLT,r6
4:
	/*
	 * Save the guest PURR/SPURR
	 */
	mfspr	r5,SPRN_PURR
	mfspr	r6,SPRN_SPURR
	ld	r7,VCPU_PURR(r9)
	ld	r8,VCPU_SPURR(r9)
	std	r5,VCPU_PURR(r9)
	std	r6,VCPU_SPURR(r9)
	subf	r5,r7,r5
	subf	r6,r8,r6

	/*
	 * Restore host PURR/SPURR and add guest times
	 * so that the time in the guest gets accounted.
	 */
	ld	r3,HSTATE_PURR(r13)
	ld	r4,HSTATE_SPURR(r13)
	add	r3,r3,r5
	add	r4,r4,r6
	mtspr	SPRN_PURR,r3
	mtspr	SPRN_SPURR,r4

1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675
BEGIN_FTR_SECTION
	b	8f
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
	/* Save POWER8-specific registers */
	mfspr	r5, SPRN_IAMR
	mfspr	r6, SPRN_PSPB
	mfspr	r7, SPRN_FSCR
	std	r5, VCPU_IAMR(r9)
	stw	r6, VCPU_PSPB(r9)
	std	r7, VCPU_FSCR(r9)
	mfspr	r5, SPRN_IC
	mfspr	r7, SPRN_TAR
	std	r5, VCPU_IC(r9)
	std	r7, VCPU_TAR(r9)
1676
	mfspr	r8, SPRN_EBBHR
1677 1678 1679
	std	r8, VCPU_EBBHR(r9)
	mfspr	r5, SPRN_EBBRR
	mfspr	r6, SPRN_BESCR
1680 1681
	mfspr	r7, SPRN_PID
	mfspr	r8, SPRN_WORT
1682 1683
	std	r5, VCPU_EBBRR(r9)
	std	r6, VCPU_BESCR(r9)
1684 1685 1686
	stw	r7, VCPU_GUEST_PID(r9)
	std	r8, VCPU_WORT(r9)
BEGIN_FTR_SECTION
1687 1688
	mfspr	r5, SPRN_TCSCR
	mfspr	r6, SPRN_ACOP
1689 1690
	mfspr	r7, SPRN_CSIGR
	mfspr	r8, SPRN_TACR
1691 1692
	std	r5, VCPU_TCSCR(r9)
	std	r6, VCPU_ACOP(r9)
1693 1694
	std	r7, VCPU_CSIGR(r9)
	std	r8, VCPU_TACR(r9)
1695 1696 1697 1698 1699 1700 1701
FTR_SECTION_ELSE
	mfspr	r5, SPRN_TIDR
	mfspr	r6, SPRN_PSSCR
	std	r5, VCPU_TID(r9)
	rldicl	r6, r6, 4, 50		/* r6 &= PSSCR_GUEST_VIS */
	rotldi	r6, r6, 60
	std	r6, VCPU_PSSCR(r9)
1702 1703 1704
	/* Restore host HFSCR value */
	ld	r7, STACK_SLOT_HFSCR(r1)
	mtspr	SPRN_HFSCR, r7
1705
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
1706 1707 1708 1709 1710
	/*
	 * Restore various registers to 0, where non-zero values
	 * set by the guest could disrupt the host.
	 */
	li	r0, 0
1711
	mtspr	SPRN_PSPB, r0
1712
	mtspr	SPRN_WORT, r0
1713
BEGIN_FTR_SECTION
1714
	mtspr	SPRN_IAMR, r0
1715
	mtspr	SPRN_TCSCR, r0
1716 1717 1718 1719
	/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
	li	r0, 1
	sldi	r0, r0, 31
	mtspr	SPRN_MMCRS, r0
1720
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1721 1722
8:

1723 1724 1725 1726 1727 1728 1729
	/* Save and reset AMR and UAMOR before turning on the MMU */
	mfspr	r5,SPRN_AMR
	mfspr	r6,SPRN_UAMOR
	std	r5,VCPU_AMR(r9)
	std	r6,VCPU_UAMOR(r9)
	li	r6,0
	mtspr	SPRN_AMR,r6
1730
	mtspr	SPRN_UAMOR, r6
1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770

	/* Switch DSCR back to host value */
	mfspr	r8, SPRN_DSCR
	ld	r7, HSTATE_DSCR(r13)
	std	r8, VCPU_DSCR(r9)
	mtspr	SPRN_DSCR, r7

	/* Save non-volatile GPRs */
	std	r14, VCPU_GPR(R14)(r9)
	std	r15, VCPU_GPR(R15)(r9)
	std	r16, VCPU_GPR(R16)(r9)
	std	r17, VCPU_GPR(R17)(r9)
	std	r18, VCPU_GPR(R18)(r9)
	std	r19, VCPU_GPR(R19)(r9)
	std	r20, VCPU_GPR(R20)(r9)
	std	r21, VCPU_GPR(R21)(r9)
	std	r22, VCPU_GPR(R22)(r9)
	std	r23, VCPU_GPR(R23)(r9)
	std	r24, VCPU_GPR(R24)(r9)
	std	r25, VCPU_GPR(R25)(r9)
	std	r26, VCPU_GPR(R26)(r9)
	std	r27, VCPU_GPR(R27)(r9)
	std	r28, VCPU_GPR(R28)(r9)
	std	r29, VCPU_GPR(R29)(r9)
	std	r30, VCPU_GPR(R30)(r9)
	std	r31, VCPU_GPR(R31)(r9)

	/* Save SPRGs */
	mfspr	r3, SPRN_SPRG0
	mfspr	r4, SPRN_SPRG1
	mfspr	r5, SPRN_SPRG2
	mfspr	r6, SPRN_SPRG3
	std	r3, VCPU_SPRG0(r9)
	std	r4, VCPU_SPRG1(r9)
	std	r5, VCPU_SPRG2(r9)
	std	r6, VCPU_SPRG3(r9)

	/* save FP state */
	mr	r3, r9
	bl	kvmppc_save_fp
1771

1772
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1773 1774 1775 1776
/*
 * Branch around the call if both CPU_FTR_TM and
 * CPU_FTR_P9_TM_HV_ASSIST are off.
 */
1777
BEGIN_FTR_SECTION
1778 1779
	b	91f
END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
1780 1781 1782
	/*
	 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
	 */
1783 1784
	mr      r3, r9
	ld      r4, VCPU_MSR(r3)
1785
	bl	kvmppc_save_tm_hv
1786
	ld	r9, HSTATE_KVM_VCPU(r13)
1787
91:
1788 1789
#endif

1790 1791 1792 1793
	/* Increment yield count if they have a VPA */
	ld	r8, VCPU_VPA(r9)	/* do they have a VPA? */
	cmpdi	r8, 0
	beq	25f
1794 1795
	li	r4, LPPACA_YIELDCOUNT
	LWZX_BE	r3, r8, r4
1796
	addi	r3, r3, 1
1797
	STWX_BE	r3, r8, r4
1798 1799 1800 1801 1802
	li	r3, 1
	stb	r3, VCPU_VPA_DIRTY(r9)
25:
	/* Save PMU registers if requested */
	/* r8 and cr0.eq are live here */
1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826
BEGIN_FTR_SECTION
	/*
	 * POWER8 seems to have a hardware bug where setting
	 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
	 * when some counters are already negative doesn't seem
	 * to cause a performance monitor alert (and hence interrupt).
	 * The effect of this is that when saving the PMU state,
	 * if there is no PMU alert pending when we read MMCR0
	 * before freezing the counters, but one becomes pending
	 * before we read the counters, we lose it.
	 * To work around this, we need a way to freeze the counters
	 * before reading MMCR0.  Normally, freezing the counters
	 * is done by writing MMCR0 (to set MMCR0[FC]) which
	 * unavoidably writes MMCR0[PMA0] as well.  On POWER8,
	 * we can also freeze the counters using MMCR2, by writing
	 * 1s to all the counter freeze condition bits (there are
	 * 9 bits each for 6 counters).
	 */
	li	r3, -1			/* set all freeze bits */
	clrrdi	r3, r3, 10
	mfspr	r10, SPRN_MMCR2
	mtspr	SPRN_MMCR2, r3
	isync
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1827 1828 1829 1830 1831
	li	r3, 1
	sldi	r3, r3, 31		/* MMCR0_FC (freeze counters) bit */
	mfspr	r4, SPRN_MMCR0		/* save MMCR0 */
	mtspr	SPRN_MMCR0, r3		/* freeze all counters, disable ints */
	mfspr	r6, SPRN_MMCRA
1832
	/* Clear MMCRA in order to disable SDAR updates */
1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847
	li	r7, 0
	mtspr	SPRN_MMCRA, r7
	isync
	beq	21f			/* if no VPA, save PMU stuff anyway */
	lbz	r7, LPPACA_PMCINUSE(r8)
	cmpwi	r7, 0			/* did they ask for PMU stuff to be saved? */
	bne	21f
	std	r3, VCPU_MMCR(r9)	/* if not, set saved MMCR0 to FC */
	b	22f
21:	mfspr	r5, SPRN_MMCR1
	mfspr	r7, SPRN_SIAR
	mfspr	r8, SPRN_SDAR
	std	r4, VCPU_MMCR(r9)
	std	r5, VCPU_MMCR + 8(r9)
	std	r6, VCPU_MMCR + 16(r9)
1848 1849 1850
BEGIN_FTR_SECTION
	std	r10, VCPU_MMCR + 24(r9)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864
	std	r7, VCPU_SIAR(r9)
	std	r8, VCPU_SDAR(r9)
	mfspr	r3, SPRN_PMC1
	mfspr	r4, SPRN_PMC2
	mfspr	r5, SPRN_PMC3
	mfspr	r6, SPRN_PMC4
	mfspr	r7, SPRN_PMC5
	mfspr	r8, SPRN_PMC6
	stw	r3, VCPU_PMC(r9)
	stw	r4, VCPU_PMC + 4(r9)
	stw	r5, VCPU_PMC + 8(r9)
	stw	r6, VCPU_PMC + 12(r9)
	stw	r7, VCPU_PMC + 16(r9)
	stw	r8, VCPU_PMC + 20(r9)
1865 1866
BEGIN_FTR_SECTION
	mfspr	r5, SPRN_SIER
1867 1868
	std	r5, VCPU_SIER(r9)
BEGIN_FTR_SECTION_NESTED(96)
1869 1870 1871 1872 1873 1874 1875 1876
	mfspr	r6, SPRN_SPMC1
	mfspr	r7, SPRN_SPMC2
	mfspr	r8, SPRN_MMCRS
	stw	r6, VCPU_PMC + 24(r9)
	stw	r7, VCPU_PMC + 28(r9)
	std	r8, VCPU_MMCR + 32(r9)
	lis	r4, 0x8000
	mtspr	SPRN_MMCRS, r4
1877
END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
1878
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1879
22:
1880

1881
	/* Restore host values of some registers */
1882 1883 1884 1885 1886
BEGIN_FTR_SECTION
	ld	r5, STACK_SLOT_CIABR(r1)
	ld	r6, STACK_SLOT_DAWR(r1)
	ld	r7, STACK_SLOT_DAWRX(r1)
	mtspr	SPRN_CIABR, r5
1887 1888 1889 1890
	/*
	 * If the DAWR doesn't work, it's ok to write these here as
	 * this value should always be zero
	*/
1891 1892 1893
	mtspr	SPRN_DAWR, r6
	mtspr	SPRN_DAWRX, r7
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1894 1895 1896
BEGIN_FTR_SECTION
	ld	r5, STACK_SLOT_TID(r1)
	ld	r6, STACK_SLOT_PSSCR(r1)
1897
	ld	r7, STACK_SLOT_PID(r1)
1898
	ld	r8, STACK_SLOT_IAMR(r1)
1899 1900
	mtspr	SPRN_TIDR, r5
	mtspr	SPRN_PSSCR, r6
1901
	mtspr	SPRN_PID, r7
1902
	mtspr	SPRN_IAMR, r8
1903
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1904 1905 1906 1907 1908

#ifdef CONFIG_PPC_RADIX_MMU
	/*
	 * Are we running hash or radix ?
	 */
1909 1910 1911
	ld	r5, VCPU_KVM(r9)
	lbz	r0, KVM_RADIX(r5)
	cmpwi	cr2, r0, 0
1912
	beq	cr2, 2f
1913

1914 1915 1916 1917 1918 1919 1920 1921
	/*
	 * Radix: do eieio; tlbsync; ptesync sequence in case we
	 * interrupted the guest between a tlbie and a ptesync.
	 */
	eieio
	tlbsync
	ptesync

1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951
	/* Radix: Handle the case where the guest used an illegal PID */
	LOAD_REG_ADDR(r4, mmu_base_pid)
	lwz	r3, VCPU_GUEST_PID(r9)
	lwz	r5, 0(r4)
	cmpw	cr0,r3,r5
	blt	2f

	/*
	 * Illegal PID, the HW might have prefetched and cached in the TLB
	 * some translations for the  LPID 0 / guest PID combination which
	 * Linux doesn't know about, so we need to flush that PID out of
	 * the TLB. First we need to set LPIDR to 0 so tlbiel applies to
	 * the right context.
	*/
	li	r0,0
	mtspr	SPRN_LPID,r0
	isync

	/* Then do a congruence class local flush */
	ld	r6,VCPU_KVM(r9)
	lwz	r0,KVM_TLB_SETS(r6)
	mtctr	r0
	li	r7,0x400		/* IS field = 0b01 */
	ptesync
	sldi	r0,r3,32		/* RS has PID */
1:	PPC_TLBIEL(7,0,2,1,1)		/* RIC=2, PRS=1, R=1 */
	addi	r7,r7,0x1000
	bdnz	1b
	ptesync

1952
2:
1953
#endif /* CONFIG_PPC_RADIX_MMU */
1954

1955
	/*
1956
	 * POWER7/POWER8 guest -> host partition switch code.
1957 1958
	 * We don't have to lock against tlbies but we do
	 * have to coordinate the hardware threads.
1959
	 * Here STACK_SLOT_TRAP(r1) contains the trap number.
1960
	 */
1961
kvmhv_switch_to_host:
1962
	/* Secondary threads wait for primary to do partition switch */
1963
	ld	r5,HSTATE_KVM_VCORE(r13)
1964 1965
	ld	r4,VCORE_KVM(r5)	/* pointer to struct kvm */
	lbz	r3,HSTATE_PTID(r13)
1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976
	cmpwi	r3,0
	beq	15f
	HMT_LOW
13:	lbz	r3,VCORE_IN_GUEST(r5)
	cmpwi	r3,0
	bne	13b
	HMT_MEDIUM
	b	16f

	/* Primary thread waits for all the secondaries to exit guest */
15:	lwz	r3,VCORE_ENTRY_EXIT(r5)
1977
	rlwinm	r0,r3,32-8,0xff
1978 1979 1980 1981 1982
	clrldi	r3,r3,56
	cmpw	r3,r0
	bne	15b
	isync

1983 1984 1985 1986 1987
	/* Did we actually switch to the guest at all? */
	lbz	r6, VCORE_IN_GUEST(r5)
	cmpwi	r6, 0
	beq	19f

1988
	/* Primary thread switches back to host partition */
1989
	lwz	r7,KVM_HOST_LPID(r4)
1990 1991
BEGIN_FTR_SECTION
	ld	r6,KVM_HOST_SDR1(r4)
1992 1993 1994
	li	r8,LPID_RSVD		/* switch to reserved LPID */
	mtspr	SPRN_LPID,r8
	ptesync
1995 1996
	mtspr	SPRN_SDR1,r6		/* switch to host page table */
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1997 1998
	mtspr	SPRN_LPID,r7
	isync
1999

2000
BEGIN_FTR_SECTION
2001
	/* DPDES and VTB are shared between threads */
2002
	mfspr	r7, SPRN_DPDES
2003
	mfspr	r8, SPRN_VTB
2004
	std	r7, VCORE_DPDES(r5)
2005
	std	r8, VCORE_VTB(r5)
2006 2007 2008 2009 2010
	/* clear DPDES so we don't get guest doorbells in the host */
	li	r8, 0
	mtspr	SPRN_DPDES, r8
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)

2011
	/* If HMI, call kvmppc_realmode_hmi_handler() */
2012
	lwz	r12, STACK_SLOT_TRAP(r1)
2013 2014 2015 2016
	cmpwi	r12, BOOK3S_INTERRUPT_HMI
	bne	27f
	bl	kvmppc_realmode_hmi_handler
	nop
2017
	cmpdi	r3, 0
2018
	/*
2019 2020 2021
	 * At this point kvmppc_realmode_hmi_handler may have resync-ed
	 * the TB, and if it has, we must not subtract the guest timebase
	 * offset from the timebase. So, skip it.
2022 2023 2024 2025
	 *
	 * Also, do not call kvmppc_subcore_exit_guest() because it has
	 * been invoked as part of kvmppc_realmode_hmi_handler().
	 */
2026
	beq	30f
2027 2028

27:
2029
	/* Subtract timebase offset from timebase */
2030
	ld	r8, VCORE_TB_OFFSET_APPL(r5)
2031 2032
	cmpdi	r8,0
	beq	17f
2033 2034
	li	r0, 0
	std	r0, VCORE_TB_OFFSET_APPL(r5)
2035
	mftb	r6			/* current guest timebase */
2036 2037 2038 2039 2040 2041 2042 2043 2044 2045
	subf	r8,r8,r6
	mtspr	SPRN_TBU40,r8		/* update upper 40 bits */
	mftb	r7			/* check if lower 24 bits overflowed */
	clrldi	r6,r6,40
	clrldi	r7,r7,40
	cmpld	r7,r6
	bge	17f
	addis	r8,r8,0x100		/* if so, increment upper 40 bits */
	mtspr	SPRN_TBU40,r8

2046 2047 2048 2049 2050
17:	bl	kvmppc_subcore_exit_guest
	nop
30:	ld	r5,HSTATE_KVM_VCORE(r13)
	ld	r4,VCORE_KVM(r5)	/* pointer to struct kvm */

2051
	/* Reset PCR */
2052
	ld	r0, VCORE_PCR(r5)
2053 2054 2055 2056 2057
	cmpdi	r0, 0
	beq	18f
	li	r0, 0
	mtspr	SPRN_PCR, r0
18:
2058
	/* Signal secondary CPUs to continue */
2059
	stb	r0,VCORE_IN_GUEST(r5)
2060
19:	lis	r8,0x7fff		/* MAX_INT@h */
2061 2062
	mtspr	SPRN_HDEC,r8

2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077
16:
BEGIN_FTR_SECTION
	/* On POWER9 with HPT-on-radix we need to wait for all other threads */
	ld	r3, HSTATE_SPLIT_MODE(r13)
	cmpdi	r3, 0
	beq	47f
	lwz	r8, KVM_SPLIT_DO_RESTORE(r3)
	cmpwi	r8, 0
	beq	47f
	bl	kvmhv_p9_restore_lpcr
	nop
	b	48f
47:
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
	ld	r8,KVM_HOST_LPCR(r4)
2078 2079
	mtspr	SPRN_LPCR,r8
	isync
2080
48:
2081 2082 2083 2084 2085 2086 2087 2088 2089
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
	/* Finish timing, if we have a vcpu */
	ld	r4, HSTATE_KVM_VCPU(r13)
	cmpdi	r4, 0
	li	r3, 0
	beq	2f
	bl	kvmhv_accumulate_time
2:
#endif
2090 2091 2092 2093
	/* Unset guest mode */
	li	r0, KVM_GUEST_MODE_NONE
	stb	r0, HSTATE_IN_GUEST(r13)

2094
	lwz	r12, STACK_SLOT_TRAP(r1)	/* return trap # in r12 */
2095 2096
	ld	r0, SFS+PPC_LR_STKOFF(r1)
	addi	r1, r1, SFS
2097 2098
	mtlr	r0
	blr
2099

2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
 * Softpatch interrupt for transactional memory emulation cases
 * on POWER9 DD2.2.  This is early in the guest exit path - we
 * haven't saved registers or done a treclaim yet.
 */
kvmppc_tm_emul:
	/* Save instruction image in HEIR */
	mfspr	r3, SPRN_HEIR
	stw	r3, VCPU_HEIR(r9)

	/*
	 * The cases we want to handle here are those where the guest
	 * is in real suspend mode and is trying to transition to
	 * transactional mode.
	 */
	lbz	r0, HSTATE_FAKE_SUSPEND(r13)
	cmpwi	r0, 0		/* keep exiting guest if in fake suspend */
	bne	guest_exit_cont
	rldicl	r3, r11, 64 - MSR_TS_S_LG, 62
	cmpwi	r3, 1		/* or if not in suspend state */
	bne	guest_exit_cont

	/* Call C code to do the emulation */
	mr	r3, r9
	bl	kvmhv_p9_tm_emulation_early
	nop
	ld	r9, HSTATE_KVM_VCPU(r13)
	li	r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
	cmpwi	r3, 0
	beq	guest_exit_cont		/* continue exiting if not handled */
	ld	r10, VCPU_PC(r9)
	ld	r11, VCPU_MSR(r9)
	b	fast_interrupt_c_return	/* go back to guest if handled */
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */

2136 2137 2138 2139 2140 2141 2142 2143
/*
 * Check whether an HDSI is an HPTE not found fault or something else.
 * If it is an HPTE not found fault that is due to the guest accessing
 * a page that they have mapped but which we have paged out, then
 * we continue on with the guest exit path.  In all other cases,
 * reflect the HDSI to the guest as a DSI.
 */
kvmppc_hdsi:
2144 2145
	ld	r3, VCPU_KVM(r9)
	lbz	r0, KVM_RADIX(r3)
2146 2147
	mfspr	r4, SPRN_HDAR
	mfspr	r6, SPRN_HDSISR
2148 2149 2150 2151 2152 2153
BEGIN_FTR_SECTION
	/* Look for DSISR canary. If we find it, retry instruction */
	cmpdi	r6, 0x7fff
	beq	6f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
	cmpwi	r0, 0
2154
	bne	.Lradix_hdsi		/* on radix, just save DAR/DSISR/ASDR */
2155 2156
	/* HPTE not found fault or protection fault? */
	andis.	r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
2157
	beq	1f			/* if not, send it to the guest */
2158 2159
	andi.	r0, r11, MSR_DR		/* data relocation enabled? */
	beq	3f
2160 2161 2162 2163
BEGIN_FTR_SECTION
	mfspr	r5, SPRN_ASDR		/* on POWER9, use ASDR to get VSID */
	b	4f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2164
	clrrdi	r0, r4, 28
2165
	PPC_SLBFEE_DOT(R5, R0)		/* if so, look up SLB */
2166 2167
	li	r0, BOOK3S_INTERRUPT_DATA_SEGMENT
	bne	7f			/* if no SLB entry found */
2168 2169 2170 2171 2172
4:	std	r4, VCPU_FAULT_DAR(r9)
	stw	r6, VCPU_FAULT_DSISR(r9)

	/* Search the hash table. */
	mr	r3, r9			/* vcpu pointer */
2173
	li	r7, 1			/* data fault */
2174
	bl	kvmppc_hpte_hv_fault
2175 2176 2177 2178 2179 2180 2181
	ld	r9, HSTATE_KVM_VCPU(r13)
	ld	r10, VCPU_PC(r9)
	ld	r11, VCPU_MSR(r9)
	li	r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
	cmpdi	r3, 0			/* retry the instruction */
	beq	6f
	cmpdi	r3, -1			/* handle in kernel mode */
2182
	beq	guest_exit_cont
2183 2184 2185
	cmpdi	r3, -2			/* MMIO emulation; need instr word */
	beq	2f

2186
	/* Synthesize a DSI (or DSegI) for the guest */
2187 2188
	ld	r4, VCPU_FAULT_DAR(r9)
	mr	r6, r3
2189
1:	li	r0, BOOK3S_INTERRUPT_DATA_STORAGE
2190
	mtspr	SPRN_DSISR, r6
2191
7:	mtspr	SPRN_DAR, r4
2192 2193
	mtspr	SPRN_SRR0, r10
	mtspr	SPRN_SRR1, r11
2194
	mr	r10, r0
2195
	bl	kvmppc_msr_interrupt
2196
fast_interrupt_c_return:
2197
6:	ld	r7, VCPU_CTR(r9)
2198
	ld	r8, VCPU_XER(r9)
2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226
	mtctr	r7
	mtxer	r8
	mr	r4, r9
	b	fast_guest_return

3:	ld	r5, VCPU_KVM(r9)	/* not relocated, use VRMA */
	ld	r5, KVM_VRMA_SLB_V(r5)
	b	4b

	/* If this is for emulated MMIO, load the instruction word */
2:	li	r8, KVM_INST_FETCH_FAILED	/* In case lwz faults */

	/* Set guest mode to 'jump over instruction' so if lwz faults
	 * we'll just continue at the next IP. */
	li	r0, KVM_GUEST_MODE_SKIP
	stb	r0, HSTATE_IN_GUEST(r13)

	/* Do the access with MSR:DR enabled */
	mfmsr	r3
	ori	r4, r3, MSR_DR		/* Enable paging for data */
	mtmsrd	r4
	lwz	r8, 0(r10)
	mtmsrd	r3

	/* Store the result */
	stw	r8, VCPU_LAST_INST(r9)

	/* Unset guest mode. */
2227
	li	r0, KVM_GUEST_MODE_HOST_HV
2228
	stb	r0, HSTATE_IN_GUEST(r13)
2229
	b	guest_exit_cont
2230

2231 2232 2233 2234 2235 2236 2237 2238
.Lradix_hdsi:
	std	r4, VCPU_FAULT_DAR(r9)
	stw	r6, VCPU_FAULT_DSISR(r9)
.Lradix_hisi:
	mfspr	r5, SPRN_ASDR
	std	r5, VCPU_FAULT_GPA(r9)
	b	guest_exit_cont

2239 2240 2241 2242 2243
/*
 * Similarly for an HISI, reflect it to the guest as an ISI unless
 * it is an HPTE not found fault for a page that we have paged out.
 */
kvmppc_hisi:
2244 2245 2246 2247
	ld	r3, VCPU_KVM(r9)
	lbz	r0, KVM_RADIX(r3)
	cmpwi	r0, 0
	bne	.Lradix_hisi		/* for radix, just save ASDR */
2248 2249
	andis.	r0, r11, SRR1_ISI_NOPT@h
	beq	1f
2250 2251
	andi.	r0, r11, MSR_IR		/* instruction relocation enabled? */
	beq	3f
2252 2253 2254 2255
BEGIN_FTR_SECTION
	mfspr	r5, SPRN_ASDR		/* on POWER9, use ASDR to get VSID */
	b	4f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2256
	clrrdi	r0, r10, 28
2257
	PPC_SLBFEE_DOT(R5, R0)		/* if so, look up SLB */
2258 2259
	li	r0, BOOK3S_INTERRUPT_INST_SEGMENT
	bne	7f			/* if no SLB entry found */
2260 2261 2262 2263 2264 2265
4:
	/* Search the hash table. */
	mr	r3, r9			/* vcpu pointer */
	mr	r4, r10
	mr	r6, r11
	li	r7, 0			/* instruction fault */
2266
	bl	kvmppc_hpte_hv_fault
2267 2268 2269 2270 2271
	ld	r9, HSTATE_KVM_VCPU(r13)
	ld	r10, VCPU_PC(r9)
	ld	r11, VCPU_MSR(r9)
	li	r12, BOOK3S_INTERRUPT_H_INST_STORAGE
	cmpdi	r3, 0			/* retry the instruction */
2272
	beq	fast_interrupt_c_return
2273
	cmpdi	r3, -1			/* handle in kernel mode */
2274
	beq	guest_exit_cont
2275

2276
	/* Synthesize an ISI (or ISegI) for the guest */
2277
	mr	r11, r3
2278 2279
1:	li	r0, BOOK3S_INTERRUPT_INST_STORAGE
7:	mtspr	SPRN_SRR0, r10
2280
	mtspr	SPRN_SRR1, r11
2281
	mr	r10, r0
2282
	bl	kvmppc_msr_interrupt
2283
	b	fast_interrupt_c_return
2284 2285 2286 2287 2288

3:	ld	r6, VCPU_KVM(r9)	/* not relocated, use VRMA */
	ld	r5, KVM_VRMA_SLB_V(r6)
	b	4b

2289 2290 2291 2292 2293
/*
 * Try to handle an hcall in real mode.
 * Returns to the guest if we handle it, or continues on up to
 * the kernel if we can't (i.e. if we don't have a handler for
 * it, or if the handler returns H_TOO_HARD).
2294 2295 2296
 *
 * r5 - r8 contain hcall args,
 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
2297 2298
 */
hcall_try_real_mode:
2299
	ld	r3,VCPU_GPR(R3)(r9)
2300
	andi.	r0,r11,MSR_PR
2301 2302
	/* sc 1 from userspace - reflect to guest syscall */
	bne	sc_1_fast_return
2303 2304
	clrrdi	r3,r3,2
	cmpldi	r3,hcall_real_table_end - hcall_real_table
2305
	bge	guest_exit_cont
2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316
	/* See if this hcall is enabled for in-kernel handling */
	ld	r4, VCPU_KVM(r9)
	srdi	r0, r3, 8	/* r0 = (r3 / 4) >> 6 */
	sldi	r0, r0, 3	/* index into kvm->arch.enabled_hcalls[] */
	add	r4, r4, r0
	ld	r0, KVM_ENABLED_HCALLS(r4)
	rlwinm	r4, r3, 32-2, 0x3f	/* r4 = (r3 / 4) & 0x3f */
	srd	r0, r0, r4
	andi.	r0, r0, 1
	beq	guest_exit_cont
	/* Get pointer to handler, if any, and call it */
2317
	LOAD_REG_ADDR(r4, hcall_real_table)
2318
	lwax	r3,r3,r4
2319
	cmpwi	r3,0
2320
	beq	guest_exit_cont
2321 2322
	add	r12,r3,r4
	mtctr	r12
2323
	mr	r3,r9		/* get vcpu pointer */
2324
	ld	r4,VCPU_GPR(R4)(r9)
2325 2326 2327 2328
	bctrl
	cmpdi	r3,H_TOO_HARD
	beq	hcall_real_fallback
	ld	r4,HSTATE_KVM_VCPU(r13)
2329
	std	r3,VCPU_GPR(R3)(r4)
2330 2331 2332 2333
	ld	r10,VCPU_PC(r4)
	ld	r11,VCPU_MSR(r4)
	b	fast_guest_return

2334 2335 2336 2337
sc_1_fast_return:
	mtspr	SPRN_SRR0,r10
	mtspr	SPRN_SRR1,r11
	li	r10, BOOK3S_INTERRUPT_SYSCALL
2338
	bl	kvmppc_msr_interrupt
2339 2340 2341
	mr	r4,r9
	b	fast_guest_return

2342 2343 2344 2345 2346 2347 2348
	/* We've attempted a real mode hcall, but it's punted it back
	 * to userspace.  We need to restore some clobbered volatiles
	 * before resuming the pass-it-to-qemu path */
hcall_real_fallback:
	li	r12,BOOK3S_INTERRUPT_SYSCALL
	ld	r9, HSTATE_KVM_VCPU(r13)

2349
	b	guest_exit_cont
2350 2351 2352 2353

	.globl	hcall_real_table
hcall_real_table:
	.long	0		/* 0 - unused */
2354 2355 2356
	.long	DOTSYM(kvmppc_h_remove) - hcall_real_table
	.long	DOTSYM(kvmppc_h_enter) - hcall_real_table
	.long	DOTSYM(kvmppc_h_read) - hcall_real_table
2357 2358
	.long	DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
	.long	DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
2359 2360
	.long	DOTSYM(kvmppc_h_protect) - hcall_real_table
	.long	DOTSYM(kvmppc_h_get_tce) - hcall_real_table
2361
	.long	DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
2362
	.long	0		/* 0x24 - H_SET_SPRG0 */
2363
	.long	DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377
	.long	0		/* 0x2c */
	.long	0		/* 0x30 */
	.long	0		/* 0x34 */
	.long	0		/* 0x38 */
	.long	0		/* 0x3c */
	.long	0		/* 0x40 */
	.long	0		/* 0x44 */
	.long	0		/* 0x48 */
	.long	0		/* 0x4c */
	.long	0		/* 0x50 */
	.long	0		/* 0x54 */
	.long	0		/* 0x58 */
	.long	0		/* 0x5c */
	.long	0		/* 0x60 */
2378
#ifdef CONFIG_KVM_XICS
2379 2380 2381
	.long	DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
	.long	DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
	.long	DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
2382
	.long	DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
2383
	.long	DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
2384 2385 2386 2387 2388 2389 2390
#else
	.long	0		/* 0x64 - H_EOI */
	.long	0		/* 0x68 - H_CPPR */
	.long	0		/* 0x6c - H_IPI */
	.long	0		/* 0x70 - H_IPOLL */
	.long	0		/* 0x74 - H_XIRR */
#endif
2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416
	.long	0		/* 0x78 */
	.long	0		/* 0x7c */
	.long	0		/* 0x80 */
	.long	0		/* 0x84 */
	.long	0		/* 0x88 */
	.long	0		/* 0x8c */
	.long	0		/* 0x90 */
	.long	0		/* 0x94 */
	.long	0		/* 0x98 */
	.long	0		/* 0x9c */
	.long	0		/* 0xa0 */
	.long	0		/* 0xa4 */
	.long	0		/* 0xa8 */
	.long	0		/* 0xac */
	.long	0		/* 0xb0 */
	.long	0		/* 0xb4 */
	.long	0		/* 0xb8 */
	.long	0		/* 0xbc */
	.long	0		/* 0xc0 */
	.long	0		/* 0xc4 */
	.long	0		/* 0xc8 */
	.long	0		/* 0xcc */
	.long	0		/* 0xd0 */
	.long	0		/* 0xd4 */
	.long	0		/* 0xd8 */
	.long	0		/* 0xdc */
2417
	.long	DOTSYM(kvmppc_h_cede) - hcall_real_table
2418
	.long	DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433
	.long	0		/* 0xe8 */
	.long	0		/* 0xec */
	.long	0		/* 0xf0 */
	.long	0		/* 0xf4 */
	.long	0		/* 0xf8 */
	.long	0		/* 0xfc */
	.long	0		/* 0x100 */
	.long	0		/* 0x104 */
	.long	0		/* 0x108 */
	.long	0		/* 0x10c */
	.long	0		/* 0x110 */
	.long	0		/* 0x114 */
	.long	0		/* 0x118 */
	.long	0		/* 0x11c */
	.long	0		/* 0x120 */
2434
	.long	DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
2435 2436 2437
	.long	0		/* 0x128 */
	.long	0		/* 0x12c */
	.long	0		/* 0x130 */
2438
	.long	DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
2439
	.long	DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
2440
	.long	DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551
	.long	0		/* 0x140 */
	.long	0		/* 0x144 */
	.long	0		/* 0x148 */
	.long	0		/* 0x14c */
	.long	0		/* 0x150 */
	.long	0		/* 0x154 */
	.long	0		/* 0x158 */
	.long	0		/* 0x15c */
	.long	0		/* 0x160 */
	.long	0		/* 0x164 */
	.long	0		/* 0x168 */
	.long	0		/* 0x16c */
	.long	0		/* 0x170 */
	.long	0		/* 0x174 */
	.long	0		/* 0x178 */
	.long	0		/* 0x17c */
	.long	0		/* 0x180 */
	.long	0		/* 0x184 */
	.long	0		/* 0x188 */
	.long	0		/* 0x18c */
	.long	0		/* 0x190 */
	.long	0		/* 0x194 */
	.long	0		/* 0x198 */
	.long	0		/* 0x19c */
	.long	0		/* 0x1a0 */
	.long	0		/* 0x1a4 */
	.long	0		/* 0x1a8 */
	.long	0		/* 0x1ac */
	.long	0		/* 0x1b0 */
	.long	0		/* 0x1b4 */
	.long	0		/* 0x1b8 */
	.long	0		/* 0x1bc */
	.long	0		/* 0x1c0 */
	.long	0		/* 0x1c4 */
	.long	0		/* 0x1c8 */
	.long	0		/* 0x1cc */
	.long	0		/* 0x1d0 */
	.long	0		/* 0x1d4 */
	.long	0		/* 0x1d8 */
	.long	0		/* 0x1dc */
	.long	0		/* 0x1e0 */
	.long	0		/* 0x1e4 */
	.long	0		/* 0x1e8 */
	.long	0		/* 0x1ec */
	.long	0		/* 0x1f0 */
	.long	0		/* 0x1f4 */
	.long	0		/* 0x1f8 */
	.long	0		/* 0x1fc */
	.long	0		/* 0x200 */
	.long	0		/* 0x204 */
	.long	0		/* 0x208 */
	.long	0		/* 0x20c */
	.long	0		/* 0x210 */
	.long	0		/* 0x214 */
	.long	0		/* 0x218 */
	.long	0		/* 0x21c */
	.long	0		/* 0x220 */
	.long	0		/* 0x224 */
	.long	0		/* 0x228 */
	.long	0		/* 0x22c */
	.long	0		/* 0x230 */
	.long	0		/* 0x234 */
	.long	0		/* 0x238 */
	.long	0		/* 0x23c */
	.long	0		/* 0x240 */
	.long	0		/* 0x244 */
	.long	0		/* 0x248 */
	.long	0		/* 0x24c */
	.long	0		/* 0x250 */
	.long	0		/* 0x254 */
	.long	0		/* 0x258 */
	.long	0		/* 0x25c */
	.long	0		/* 0x260 */
	.long	0		/* 0x264 */
	.long	0		/* 0x268 */
	.long	0		/* 0x26c */
	.long	0		/* 0x270 */
	.long	0		/* 0x274 */
	.long	0		/* 0x278 */
	.long	0		/* 0x27c */
	.long	0		/* 0x280 */
	.long	0		/* 0x284 */
	.long	0		/* 0x288 */
	.long	0		/* 0x28c */
	.long	0		/* 0x290 */
	.long	0		/* 0x294 */
	.long	0		/* 0x298 */
	.long	0		/* 0x29c */
	.long	0		/* 0x2a0 */
	.long	0		/* 0x2a4 */
	.long	0		/* 0x2a8 */
	.long	0		/* 0x2ac */
	.long	0		/* 0x2b0 */
	.long	0		/* 0x2b4 */
	.long	0		/* 0x2b8 */
	.long	0		/* 0x2bc */
	.long	0		/* 0x2c0 */
	.long	0		/* 0x2c4 */
	.long	0		/* 0x2c8 */
	.long	0		/* 0x2cc */
	.long	0		/* 0x2d0 */
	.long	0		/* 0x2d4 */
	.long	0		/* 0x2d8 */
	.long	0		/* 0x2dc */
	.long	0		/* 0x2e0 */
	.long	0		/* 0x2e4 */
	.long	0		/* 0x2e8 */
	.long	0		/* 0x2ec */
	.long	0		/* 0x2f0 */
	.long	0		/* 0x2f4 */
	.long	0		/* 0x2f8 */
2552 2553 2554 2555 2556
#ifdef CONFIG_KVM_XICS
	.long	DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
#else
	.long	0		/* 0x2fc - H_XIRR_X*/
#endif
2557
	.long	DOTSYM(kvmppc_h_random) - hcall_real_table
2558
	.globl	hcall_real_table_end
2559 2560
hcall_real_table_end:

2561 2562 2563 2564 2565 2566 2567 2568 2569
_GLOBAL(kvmppc_h_set_xdabr)
	andi.	r0, r5, DABRX_USER | DABRX_KERNEL
	beq	6f
	li	r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
	andc.	r0, r5, r0
	beq	3f
6:	li	r3, H_PARAMETER
	blr

2570
_GLOBAL(kvmppc_h_set_dabr)
2571 2572
	li	r5, DABRX_USER | DABRX_KERNEL
3:
2573 2574 2575
BEGIN_FTR_SECTION
	b	2f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2576
	std	r4,VCPU_DABR(r3)
2577 2578
	stw	r5, VCPU_DABRX(r3)
	mtspr	SPRN_DABRX, r5
2579 2580 2581 2582 2583 2584
	/* Work around P7 bug where DABR can get corrupted on mtspr */
1:	mtspr	SPRN_DABR,r4
	mfspr	r5, SPRN_DABR
	cmpd	r4, r5
	bne	1b
	isync
2585 2586 2587
	li	r3,0
	blr

2588 2589 2590
2:
BEGIN_FTR_SECTION
	/* POWER9 with disabled DAWR */
2591
	li	r3, H_HARDWARE
2592 2593
	blr
END_FTR_SECTION_IFCLR(CPU_FTR_DAWR)
2594
	/* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
2595
	rlwimi	r5, r4, 5, DAWRX_DR | DAWRX_DW
2596
	rlwimi	r5, r4, 2, DAWRX_WT
2597 2598 2599 2600 2601 2602
	clrrdi	r4, r4, 3
	std	r4, VCPU_DAWR(r3)
	std	r5, VCPU_DAWRX(r3)
	mtspr	SPRN_DAWR, r4
	mtspr	SPRN_DAWRX, r5
	li	r3, 0
2603 2604
	blr

2605
_GLOBAL(kvmppc_h_cede)		/* r3 = vcpu pointer, r11 = msr, r13 = paca */
2606 2607 2608 2609 2610 2611 2612
	ori	r11,r11,MSR_EE
	std	r11,VCPU_MSR(r3)
	li	r0,1
	stb	r0,VCPU_CEDED(r3)
	sync			/* order setting ceded vs. testing prodded */
	lbz	r5,VCPU_PRODDED(r3)
	cmpwi	r5,0
2613
	bne	kvm_cede_prodded
2614 2615
	li	r12,0		/* set trap to 0 to say hcall is handled */
	stw	r12,VCPU_TRAP(r3)
2616
	li	r0,H_SUCCESS
2617
	std	r0,VCPU_GPR(R3)(r3)
2618 2619 2620 2621 2622 2623 2624

	/*
	 * Set our bit in the bitmask of napping threads unless all the
	 * other threads are already napping, in which case we send this
	 * up to the host.
	 */
	ld	r5,HSTATE_KVM_VCORE(r13)
2625
	lbz	r6,HSTATE_PTID(r13)
2626 2627 2628 2629 2630 2631 2632
	lwz	r8,VCORE_ENTRY_EXIT(r5)
	clrldi	r8,r8,56
	li	r0,1
	sld	r0,r0,r6
	addi	r6,r5,VCORE_NAPPING_THREADS
31:	lwarx	r4,0,r6
	or	r4,r4,r0
2633 2634
	cmpw	r4,r8
	beq	kvm_cede_exit
2635 2636
	stwcx.	r4,0,r6
	bne	31b
2637
	/* order napping_threads update vs testing entry_exit_map */
2638
	isync
2639
	li	r0,NAPPING_CEDE
2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651
	stb	r0,HSTATE_NAPPING(r13)
	lwz	r7,VCORE_ENTRY_EXIT(r5)
	cmpwi	r7,0x100
	bge	33f		/* another thread already exiting */

/*
 * Although not specifically required by the architecture, POWER7
 * preserves the following registers in nap mode, even if an SMT mode
 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
 */
	/* Save non-volatile GPRs */
2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669
	std	r14, VCPU_GPR(R14)(r3)
	std	r15, VCPU_GPR(R15)(r3)
	std	r16, VCPU_GPR(R16)(r3)
	std	r17, VCPU_GPR(R17)(r3)
	std	r18, VCPU_GPR(R18)(r3)
	std	r19, VCPU_GPR(R19)(r3)
	std	r20, VCPU_GPR(R20)(r3)
	std	r21, VCPU_GPR(R21)(r3)
	std	r22, VCPU_GPR(R22)(r3)
	std	r23, VCPU_GPR(R23)(r3)
	std	r24, VCPU_GPR(R24)(r3)
	std	r25, VCPU_GPR(R25)(r3)
	std	r26, VCPU_GPR(R26)(r3)
	std	r27, VCPU_GPR(R27)(r3)
	std	r28, VCPU_GPR(R28)(r3)
	std	r29, VCPU_GPR(R29)(r3)
	std	r30, VCPU_GPR(R30)(r3)
	std	r31, VCPU_GPR(R31)(r3)
2670 2671

	/* save FP state */
2672
	bl	kvmppc_save_fp
2673

2674
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2675 2676 2677 2678
/*
 * Branch around the call if both CPU_FTR_TM and
 * CPU_FTR_P9_TM_HV_ASSIST are off.
 */
2679
BEGIN_FTR_SECTION
2680 2681
	b	91f
END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
2682 2683 2684
	/*
	 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
	 */
2685 2686
	ld	r3, HSTATE_KVM_VCPU(r13)
	ld      r4, VCPU_MSR(r3)
2687
	bl	kvmppc_save_tm_hv
2688
91:
2689 2690
#endif

2691 2692 2693 2694 2695 2696 2697 2698
	/*
	 * Set DEC to the smaller of DEC and HDEC, so that we wake
	 * no later than the end of our timeslice (HDEC interrupts
	 * don't wake us from nap).
	 */
	mfspr	r3, SPRN_DEC
	mfspr	r4, SPRN_HDEC
	mftb	r5
2699 2700 2701 2702 2703 2704 2705
BEGIN_FTR_SECTION
	/* On P9 check whether the guest has large decrementer mode enabled */
	ld	r6, HSTATE_KVM_VCORE(r13)
	ld	r6, VCORE_LPCR(r6)
	andis.	r6, r6, LPCR_LD@h
	bne	68f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2706
	extsw	r3, r3
2707
68:	EXTEND_HDEC(r4)
2708
	cmpd	r3, r4
2709 2710 2711 2712 2713 2714 2715
	ble	67f
	mtspr	SPRN_DEC, r4
67:
	/* save expiry time of guest decrementer */
	add	r3, r3, r5
	ld	r4, HSTATE_KVM_VCPU(r13)
	ld	r5, HSTATE_KVM_VCORE(r13)
2716
	ld	r6, VCORE_TB_OFFSET_APPL(r5)
2717 2718 2719
	subf	r3, r6, r3	/* convert to host TB value */
	std	r3, VCPU_DEC_EXPIRES(r4)

2720 2721 2722 2723 2724 2725
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
	ld	r4, HSTATE_KVM_VCPU(r13)
	addi	r3, r4, VCPU_TB_CEDE
	bl	kvmhv_accumulate_time
#endif

2726 2727
	lis	r3, LPCR_PECEDP@h	/* Do wake on privileged doorbell */

2728
	/*
2729
	 * Take a nap until a decrementer or external or doobell interrupt
2730
	 * occurs, with PECE1 and PECE0 set in LPCR.
2731
	 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
2732
	 * Also clear the runlatch bit before napping.
2733
	 */
2734
kvm_do_nap:
2735 2736 2737
	mfspr	r0, SPRN_CTRLF
	clrrdi	r0, r0, 1
	mtspr	SPRN_CTRLT, r0
2738

2739 2740
	li	r0,1
	stb	r0,HSTATE_HWTHREAD_REQ(r13)
2741 2742
	mfspr	r5,SPRN_LPCR
	ori	r5,r5,LPCR_PECE0 | LPCR_PECE1
2743
BEGIN_FTR_SECTION
2744
	ori	r5, r5, LPCR_PECEDH
2745
	rlwimi	r5, r3, 0, LPCR_PECEDP
2746
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761

kvm_nap_sequence:		/* desired LPCR value in r5 */
BEGIN_FTR_SECTION
	/*
	 * PSSCR bits:	exit criterion = 1 (wakeup based on LPCR at sreset)
	 *		enable state loss = 1 (allow SMT mode switch)
	 *		requested level = 0 (just stop dispatching)
	 */
	lis	r3, (PSSCR_EC | PSSCR_ESL)@h
	mtspr	SPRN_PSSCR, r3
	/* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
	li	r4, LPCR_PECE_HVEE@higher
	sldi	r4, r4, 32
	or	r5, r5, r4
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2762 2763 2764 2765 2766 2767 2768 2769
	mtspr	SPRN_LPCR,r5
	isync
	li	r0, 0
	std	r0, HSTATE_SCRATCH0(r13)
	ptesync
	ld	r0, HSTATE_SCRATCH0(r13)
1:	cmpd	r0, r0
	bne	1b
2770
BEGIN_FTR_SECTION
2771
	nap
2772 2773 2774
FTR_SECTION_ELSE
	PPC_STOP
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
2775 2776
	b	.

2777 2778 2779 2780 2781
33:	mr	r4, r3
	li	r3, 0
	li	r12, 0
	b	34f

2782
kvm_end_cede:
2783 2784 2785
	/* get vcpu pointer */
	ld	r4, HSTATE_KVM_VCPU(r13)

2786 2787 2788
	/* Woken by external or decrementer interrupt */
	ld	r1, HSTATE_HOST_R1(r13)

2789 2790 2791 2792 2793
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
	addi	r3, r4, VCPU_TB_RMINTR
	bl	kvmhv_accumulate_time
#endif

2794
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2795 2796 2797 2798
/*
 * Branch around the call if both CPU_FTR_TM and
 * CPU_FTR_P9_TM_HV_ASSIST are off.
 */
2799
BEGIN_FTR_SECTION
2800 2801
	b	91f
END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
2802 2803 2804
	/*
	 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
	 */
2805 2806
	mr      r3, r4
	ld      r4, VCPU_MSR(r3)
2807
	bl	kvmppc_restore_tm_hv
2808
	ld	r4, HSTATE_KVM_VCPU(r13)
2809
91:
2810 2811
#endif

2812 2813 2814
	/* load up FP state */
	bl	kvmppc_load_fp

2815 2816 2817
	/* Restore guest decrementer */
	ld	r3, VCPU_DEC_EXPIRES(r4)
	ld	r5, HSTATE_KVM_VCORE(r13)
2818
	ld	r6, VCORE_TB_OFFSET_APPL(r5)
2819 2820 2821 2822 2823
	add	r3, r3, r6	/* convert host TB to guest TB value */
	mftb	r7
	subf	r3, r7, r3
	mtspr	SPRN_DEC, r3

2824
	/* Load NV GPRS */
2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842
	ld	r14, VCPU_GPR(R14)(r4)
	ld	r15, VCPU_GPR(R15)(r4)
	ld	r16, VCPU_GPR(R16)(r4)
	ld	r17, VCPU_GPR(R17)(r4)
	ld	r18, VCPU_GPR(R18)(r4)
	ld	r19, VCPU_GPR(R19)(r4)
	ld	r20, VCPU_GPR(R20)(r4)
	ld	r21, VCPU_GPR(R21)(r4)
	ld	r22, VCPU_GPR(R22)(r4)
	ld	r23, VCPU_GPR(R23)(r4)
	ld	r24, VCPU_GPR(R24)(r4)
	ld	r25, VCPU_GPR(R25)(r4)
	ld	r26, VCPU_GPR(R26)(r4)
	ld	r27, VCPU_GPR(R27)(r4)
	ld	r28, VCPU_GPR(R28)(r4)
	ld	r29, VCPU_GPR(R29)(r4)
	ld	r30, VCPU_GPR(R30)(r4)
	ld	r31, VCPU_GPR(R31)(r4)
2843

2844 2845
	/* Check the wake reason in SRR1 to see why we got here */
	bl	kvmppc_check_wake_reason
2846

2847 2848 2849 2850 2851 2852 2853 2854 2855 2856
	/*
	 * Restore volatile registers since we could have called a
	 * C routine in kvmppc_check_wake_reason
	 *	r4 = VCPU
	 * r3 tells us whether we need to return to host or not
	 * WARNING: it gets checked further down:
	 * should not modify r3 until this check is done.
	 */
	ld	r4, HSTATE_KVM_VCPU(r13)

2857
	/* clear our bit in vcore->napping_threads */
2858 2859
34:	ld	r5,HSTATE_KVM_VCORE(r13)
	lbz	r7,HSTATE_PTID(r13)
2860
	li	r0,1
2861
	sld	r0,r0,r7
2862 2863 2864 2865 2866 2867 2868 2869
	addi	r6,r5,VCORE_NAPPING_THREADS
32:	lwarx	r7,0,r6
	andc	r7,r7,r0
	stwcx.	r7,0,r6
	bne	32b
	li	r0,0
	stb	r0,HSTATE_NAPPING(r13)

2870
	/* See if the wake reason saved in r3 means we need to exit */
2871
	stw	r12, VCPU_TRAP(r4)
2872
	mr	r9, r4
2873 2874
	cmpdi	r3, 0
	bgt	guest_exit_cont
2875

2876 2877 2878
	/* see if any other thread is already exiting */
	lwz	r0,VCORE_ENTRY_EXIT(r5)
	cmpwi	r0,0x100
2879
	bge	guest_exit_cont
2880

2881
	b	kvmppc_cede_reentry	/* if not go back to guest */
2882 2883

	/* cede when already previously prodded case */
2884 2885
kvm_cede_prodded:
	li	r0,0
2886 2887 2888 2889 2890 2891 2892
	stb	r0,VCPU_PRODDED(r3)
	sync			/* order testing prodded vs. clearing ceded */
	stb	r0,VCPU_CEDED(r3)
	li	r3,H_SUCCESS
	blr

	/* we've ceded but we want to give control to the host */
2893
kvm_cede_exit:
2894
	ld	r9, HSTATE_KVM_VCPU(r13)
2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920
#ifdef CONFIG_KVM_XICS
	/* Abort if we still have a pending escalation */
	lbz	r5, VCPU_XIVE_ESC_ON(r9)
	cmpwi	r5, 0
	beq	1f
	li	r0, 0
	stb	r0, VCPU_CEDED(r9)
1:	/* Enable XIVE escalation */
	li	r5, XIVE_ESB_SET_PQ_00
	mfmsr	r0
	andi.	r0, r0, MSR_DR		/* in real mode? */
	beq	1f
	ld	r10, VCPU_XIVE_ESC_VADDR(r9)
	cmpdi	r10, 0
	beq	3f
	ldx	r0, r10, r5
	b	2f
1:	ld	r10, VCPU_XIVE_ESC_RADDR(r9)
	cmpdi	r10, 0
	beq	3f
	ldcix	r0, r10, r5
2:	sync
	li	r0, 1
	stb	r0, VCPU_XIVE_ESC_ON(r9)
#endif /* CONFIG_KVM_XICS */
3:	b	guest_exit_cont
2921

2922 2923 2924
	/* Try to handle a machine check in real mode */
machine_check_realmode:
	mr	r3, r9		/* get vcpu pointer */
2925
	bl	kvmppc_realmode_machine_check
2926 2927 2928
	nop
	ld	r9, HSTATE_KVM_VCPU(r13)
	li	r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2929
	/*
2930 2931 2932 2933 2934 2935
	 * For the guest that is FWNMI capable, deliver all the MCE errors
	 * (handled/unhandled) by exiting the guest with KVM_EXIT_NMI exit
	 * reason. This new approach injects machine check errors in guest
	 * address space to guest with additional information in the form
	 * of RTAS event, thus enabling guest kernel to suitably handle
	 * such errors.
2936
	 *
2937 2938 2939 2940 2941 2942
	 * For the guest that is not FWNMI capable (old QEMU) fallback
	 * to old behaviour for backward compatibility:
	 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest either
	 * through machine check interrupt (set HSRR0 to 0x200).
	 * For handled errors (no-fatal), just go back to guest execution
	 * with current HSRR0.
2943 2944
	 * if we receive machine check with MSR(RI=0) then deliver it to
	 * guest as machine check causing guest to crash.
2945 2946
	 */
	ld	r11, VCPU_MSR(r9)
2947 2948
	rldicl.	r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
	bne	mc_cont			/* if so, exit to host */
2949 2950 2951 2952 2953 2954 2955
	/* Check if guest is capable of handling NMI exit */
	ld	r10, VCPU_KVM(r9)
	lbz	r10, KVM_FWNMI(r10)
	cmpdi	r10, 1			/* FWNMI capable? */
	beq	mc_cont			/* if so, exit with KVM_EXIT_NMI. */

	/* if not, fall through for backward compatibility. */
2956 2957 2958 2959
	andi.	r10, r11, MSR_RI	/* check for unrecoverable exception */
	beq	1f			/* Deliver a machine check to guest */
	ld	r10, VCPU_PC(r9)
	cmpdi	r3, 0		/* Did we handle MCE ? */
2960
	bne	2f	/* Continue guest execution. */
2961
	/* If not, deliver a machine check.  SRR0/1 are already set */
2962
1:	li	r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2963
	bl	kvmppc_msr_interrupt
2964
2:	b	fast_interrupt_c_return
2965

2966 2967
/*
 * Check the reason we woke from nap, and take appropriate action.
2968
 * Returns (in r3):
2969 2970
 *	0 if nothing needs to be done
 *	1 if something happened that needs to be handled by the host
2971
 *	-1 if there was a guest wakeup (IPI or msgsnd)
2972 2973
 *	-2 if we handled a PCI passthrough interrupt (returned by
 *		kvmppc_read_intr only)
2974 2975 2976
 *
 * Also sets r12 to the interrupt vector for any interrupt that needs
 * to be handled now by the host (0x500 for external interrupt), or zero.
2977 2978 2979
 * Modifies all volatile registers (since it may call a C function).
 * This routine calls kvmppc_read_intr, a C function, if an external
 * interrupt is pending.
2980 2981 2982
 */
kvmppc_check_wake_reason:
	mfspr	r6, SPRN_SRR1
2983 2984 2985 2986 2987 2988
BEGIN_FTR_SECTION
	rlwinm	r6, r6, 45-31, 0xf	/* extract wake reason field (P8) */
FTR_SECTION_ELSE
	rlwinm	r6, r6, 45-31, 0xe	/* P7 wake reason field is 3 bits */
ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
	cmpwi	r6, 8			/* was it an external interrupt? */
2989
	beq	7f			/* if so, see what it was */
2990 2991 2992 2993
	li	r3, 0
	li	r12, 0
	cmpwi	r6, 6			/* was it the decrementer? */
	beq	0f
2994 2995 2996
BEGIN_FTR_SECTION
	cmpwi	r6, 5			/* privileged doorbell? */
	beq	0f
2997 2998
	cmpwi	r6, 3			/* hypervisor doorbell? */
	beq	3f
2999
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3000 3001
	cmpwi	r6, 0xa			/* Hypervisor maintenance ? */
	beq	4f
3002 3003 3004
	li	r3, 1			/* anything else, return 1 */
0:	blr

3005 3006
	/* hypervisor doorbell */
3:	li	r12, BOOK3S_INTERRUPT_H_DOORBELL
3007 3008 3009 3010 3011 3012 3013

	/*
	 * Clear the doorbell as we will invoke the handler
	 * explicitly in the guest exit path.
	 */
	lis	r6, (PPC_DBELL_SERVER << (63-36))@h
	PPC_MSGCLR(6)
3014
	/* see if it's a host IPI */
3015
	li	r3, 1
3016 3017 3018 3019
BEGIN_FTR_SECTION
	PPC_MSGSYNC
	lwsync
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
3020 3021 3022
	lbz	r0, HSTATE_HOST_IPI(r13)
	cmpwi	r0, 0
	bnelr
3023
	/* if not, return -1 */
3024
	li	r3, -1
3025 3026
	blr

3027 3028 3029 3030 3031
	/* Woken up due to Hypervisor maintenance interrupt */
4:	li	r12, BOOK3S_INTERRUPT_HMI
	li	r3, 1
	blr

3032 3033 3034 3035 3036 3037 3038
	/* external interrupt - create a stack frame so we can call C */
7:	mflr	r0
	std	r0, PPC_LR_STKOFF(r1)
	stdu	r1, -PPC_MIN_STKFRM(r1)
	bl	kvmppc_read_intr
	nop
	li	r12, BOOK3S_INTERRUPT_EXTERNAL
3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049
	cmpdi	r3, 1
	ble	1f

	/*
	 * Return code of 2 means PCI passthrough interrupt, but
	 * we need to return back to host to complete handling the
	 * interrupt. Trap reason is expected in r12 by guest
	 * exit code.
	 */
	li	r12, BOOK3S_INTERRUPT_HV_RM_HARD
1:
3050 3051 3052 3053
	ld	r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
	addi	r1, r1, PPC_MIN_STKFRM
	mtlr	r0
	blr
3054

3055 3056 3057
/*
 * Save away FP, VMX and VSX registers.
 * r3 = vcpu pointer
3058 3059
 * N.B. r30 and r31 are volatile across this function,
 * thus it is not callable from C.
3060
 */
3061 3062 3063
kvmppc_save_fp:
	mflr	r30
	mr	r31,r3
3064 3065
	mfmsr	r5
	ori	r8,r5,MSR_FP
3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
	oris	r8,r8,MSR_VEC@h
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
	oris	r8,r8,MSR_VSX@h
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
	mtmsrd	r8
3077
	addi	r3,r3,VCPU_FPRS
3078
	bl	store_fp_state
3079 3080
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
3081
	addi	r3,r31,VCPU_VRS
3082
	bl	store_vr_state
3083 3084 3085
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
	mfspr	r6,SPRN_VRSAVE
3086
	stw	r6,VCPU_VRSAVE(r31)
3087
	mtlr	r30
3088 3089 3090 3091 3092
	blr

/*
 * Load up FP, VMX and VSX registers
 * r4 = vcpu pointer
3093 3094
 * N.B. r30 and r31 are volatile across this function,
 * thus it is not callable from C.
3095 3096
 */
kvmppc_load_fp:
3097 3098
	mflr	r30
	mr	r31,r4
3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111
	mfmsr	r9
	ori	r8,r9,MSR_FP
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
	oris	r8,r8,MSR_VEC@h
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
	oris	r8,r8,MSR_VSX@h
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
	mtmsrd	r8
3112
	addi	r3,r4,VCPU_FPRS
3113
	bl	load_fp_state
3114 3115
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
3116
	addi	r3,r31,VCPU_VRS
3117
	bl	load_vr_state
3118 3119
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
3120
	lwz	r7,VCPU_VRSAVE(r31)
3121
	mtspr	SPRN_VRSAVE,r7
3122 3123
	mtlr	r30
	mr	r4,r31
3124
	blr
3125

3126 3127 3128
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
 * Save transactional state and TM-related registers.
3129 3130
 * Called with r3 pointing to the vcpu struct and r4 containing
 * the guest MSR value.
3131
 * This can modify all checkpointed registers, but
3132
 * restores r1 and r2 before exit.
3133
 */
3134 3135 3136
kvmppc_save_tm_hv:
	/* See if we need to handle fake suspend mode */
BEGIN_FTR_SECTION
3137
	b	__kvmppc_save_tm
3138 3139 3140 3141
END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)

	lbz	r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */
	cmpwi	r0, 0
3142
	beq	__kvmppc_save_tm
3143 3144

	/* The following code handles the fake_suspend = 1 case */
3145 3146
	mflr	r0
	std	r0, PPC_LR_STKOFF(r1)
3147
	stdu	r1, -PPC_MIN_STKFRM(r1)
3148 3149 3150 3151 3152 3153 3154

	/* Turn on TM. */
	mfmsr	r8
	li	r0, 1
	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
	mtmsrd	r8

3155 3156
	rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */
	beq	4f
3157
BEGIN_FTR_SECTION
3158
	bl	pnv_power9_force_smt4_catch
3159
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
3160
	nop
3161

3162 3163 3164
	std	r1, HSTATE_HOST_R1(r13)

	/* Clear the MSR RI since r1, r13 may be foobar. */
3165 3166 3167
	li	r5, 0
	mtmsrd	r5, 1

3168 3169
	/* We have to treclaim here because that's the only way to do S->N */
	li	r3, TM_CAUSE_KVM_RESCHED
3170 3171
	TRECLAIM(R3)

3172 3173 3174 3175 3176
	/*
	 * We were in fake suspend, so we are not going to save the
	 * register state as the guest checkpointed state (since
	 * we already have it), therefore we can now use any volatile GPR.
	 */
3177 3178
	/* Reload PACA pointer, stack pointer and TOC. */
	GET_PACA(r13)
3179 3180
	ld	r1, HSTATE_HOST_R1(r13)
	ld	r2, PACATOC(r13)
3181

3182
	/* Set MSR RI now we have r1 and r13 back. */
3183 3184
	li	r5, MSR_RI
	mtmsrd	r5, 1
3185

3186 3187 3188
	HMT_MEDIUM
	ld	r6, HSTATE_DSCR(r13)
	mtspr	SPRN_DSCR, r6
3189 3190 3191 3192 3193 3194
BEGIN_FTR_SECTION_NESTED(96)
	bl	pnv_power9_force_smt4_release
END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96)
	nop

4:
3195 3196 3197 3198 3199 3200
	mfspr	r3, SPRN_PSSCR
	/* PSSCR_FAKE_SUSPEND is a write-only bit, but clear it anyway */
	li	r0, PSSCR_FAKE_SUSPEND
	andc	r3, r3, r0
	mtspr	SPRN_PSSCR, r3

3201
	/* Don't save TEXASR, use value from last exit in real suspend state */
3202 3203 3204 3205 3206 3207
	ld	r9, HSTATE_KVM_VCPU(r13)
	mfspr	r5, SPRN_TFHAR
	mfspr	r6, SPRN_TFIAR
	std	r5, VCPU_TFHAR(r9)
	std	r6, VCPU_TFIAR(r9)

3208
	addi	r1, r1, PPC_MIN_STKFRM
3209 3210 3211 3212 3213 3214
	ld	r0, PPC_LR_STKOFF(r1)
	mtlr	r0
	blr

/*
 * Restore transactional state and TM-related registers.
3215 3216
 * Called with r3 pointing to the vcpu struct
 * and r4 containing the guest MSR value.
3217
 * This potentially modifies all checkpointed registers.
3218
 * It restores r1 and r2 from the PACA.
3219
 */
3220 3221 3222 3223 3224 3225 3226
kvmppc_restore_tm_hv:
	/*
	 * If we are doing TM emulation for the guest on a POWER9 DD2,
	 * then we don't actually do a trechkpt -- we either set up
	 * fake-suspend mode, or emulate a TM rollback.
	 */
BEGIN_FTR_SECTION
3227
	b	__kvmppc_restore_tm
3228
END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
3229 3230 3231
	mflr	r0
	std	r0, PPC_LR_STKOFF(r1)

3232 3233 3234 3235
	li	r0, 0
	stb	r0, HSTATE_FAKE_SUSPEND(r13)

	/* Turn on TM so we can restore TM SPRs */
3236
	mfmsr	r5
3237 3238
	li	r0, 1
	rldimi	r5, r0, MSR_TM_LG, 63-MSR_TM_LG
3239 3240 3241 3242 3243 3244
	mtmsrd	r5

	/*
	 * The user may change these outside of a transaction, so they must
	 * always be context switched.
	 */
3245 3246 3247
	ld	r5, VCPU_TFHAR(r3)
	ld	r6, VCPU_TFIAR(r3)
	ld	r7, VCPU_TEXASR(r3)
3248 3249 3250 3251
	mtspr	SPRN_TFHAR, r5
	mtspr	SPRN_TFIAR, r6
	mtspr	SPRN_TEXASR, r7

3252
	rldicl. r5, r4, 64 - MSR_TS_S_LG, 62
3253 3254
	beqlr		/* TM not active in guest */

3255
	/* Make sure the failure summary is set */
3256 3257 3258
	oris	r7, r7, (TEXASR_FS)@h
	mtspr	SPRN_TEXASR, r7

3259 3260 3261
	cmpwi	r5, 1		/* check for suspended state */
	bgt	10f
	stb	r5, HSTATE_FAKE_SUSPEND(r13)
3262
	b	9f		/* and return */
3263 3264 3265 3266 3267
10:	stdu	r1, -PPC_MIN_STKFRM(r1)
	/* guest is in transactional state, so simulate rollback */
	bl	kvmhv_emulate_tm_rollback
	nop
	addi	r1, r1, PPC_MIN_STKFRM
3268 3269 3270 3271
9:	ld	r0, PPC_LR_STKOFF(r1)
	mtlr	r0
	blr
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
3272

3273 3274 3275
/*
 * We come here if we get any exception or interrupt while we are
 * executing host real mode code while in guest MMU context.
3276 3277 3278 3279 3280 3281 3282 3283
 * r12 is (CR << 32) | vector
 * r13 points to our PACA
 * r12 is saved in HSTATE_SCRATCH0(r13)
 * ctr is saved in HSTATE_SCRATCH1(r13) if RELOCATABLE
 * r9 is saved in HSTATE_SCRATCH2(r13)
 * r13 is saved in HSPRG1
 * cfar is saved in HSTATE_CFAR(r13)
 * ppr is saved in HSTATE_PPR(r13)
3284 3285
 */
kvmppc_bad_host_intr:
3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333
	/*
	 * Switch to the emergency stack, but start half-way down in
	 * case we were already on it.
	 */
	mr	r9, r1
	std	r1, PACAR1(r13)
	ld	r1, PACAEMERGSP(r13)
	subi	r1, r1, THREAD_SIZE/2 + INT_FRAME_SIZE
	std	r9, 0(r1)
	std	r0, GPR0(r1)
	std	r9, GPR1(r1)
	std	r2, GPR2(r1)
	SAVE_4GPRS(3, r1)
	SAVE_2GPRS(7, r1)
	srdi	r0, r12, 32
	clrldi	r12, r12, 32
	std	r0, _CCR(r1)
	std	r12, _TRAP(r1)
	andi.	r0, r12, 2
	beq	1f
	mfspr	r3, SPRN_HSRR0
	mfspr	r4, SPRN_HSRR1
	mfspr	r5, SPRN_HDAR
	mfspr	r6, SPRN_HDSISR
	b	2f
1:	mfspr	r3, SPRN_SRR0
	mfspr	r4, SPRN_SRR1
	mfspr	r5, SPRN_DAR
	mfspr	r6, SPRN_DSISR
2:	std	r3, _NIP(r1)
	std	r4, _MSR(r1)
	std	r5, _DAR(r1)
	std	r6, _DSISR(r1)
	ld	r9, HSTATE_SCRATCH2(r13)
	ld	r12, HSTATE_SCRATCH0(r13)
	GET_SCRATCH0(r0)
	SAVE_4GPRS(9, r1)
	std	r0, GPR13(r1)
	SAVE_NVGPRS(r1)
	ld	r5, HSTATE_CFAR(r13)
	std	r5, ORIG_GPR3(r1)
	mflr	r3
#ifdef CONFIG_RELOCATABLE
	ld	r4, HSTATE_SCRATCH1(r13)
#else
	mfctr	r4
#endif
	mfxer	r5
3334
	lbz	r6, PACAIRQSOFTMASK(r13)
3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351
	std	r3, _LINK(r1)
	std	r4, _CTR(r1)
	std	r5, _XER(r1)
	std	r6, SOFTE(r1)
	ld	r2, PACATOC(r13)
	LOAD_REG_IMMEDIATE(3, 0x7265677368657265)
	std	r3, STACK_FRAME_OVERHEAD-16(r1)

	/*
	 * On POWER9 do a minimal restore of the MMU and call C code,
	 * which will print a message and panic.
	 * XXX On POWER7 and POWER8, we just spin here since we don't
	 * know what the other threads are doing (and we don't want to
	 * coordinate with them) - but at least we now have register state
	 * in memory that we might be able to look at from another CPU.
	 */
BEGIN_FTR_SECTION
3352
	b	.
3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
	ld	r9, HSTATE_KVM_VCPU(r13)
	ld	r10, VCPU_KVM(r9)

	li	r0, 0
	mtspr	SPRN_AMR, r0
	mtspr	SPRN_IAMR, r0
	mtspr	SPRN_CIABR, r0
	mtspr	SPRN_DAWRX, r0

BEGIN_MMU_FTR_SECTION
	b	4f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)

	slbmte	r0, r0
	slbia
	ptesync
	ld	r8, PACA_SLBSHADOWPTR(r13)
	.rept	SLB_NUM_BOLTED
	li	r3, SLBSHADOW_SAVEAREA
	LDX_BE	r5, r8, r3
	addi	r3, r3, 8
	LDX_BE	r6, r8, r3
	andis.	r7, r5, SLB_ESID_V@h
	beq	3f
	slbmte	r6, r5
3:	addi	r8, r8, 16
	.endr

4:	lwz	r7, KVM_HOST_LPID(r10)
	mtspr	SPRN_LPID, r7
	mtspr	SPRN_PID, r0
	ld	r8, KVM_HOST_LPCR(r10)
	mtspr	SPRN_LPCR, r8
	isync
	li	r0, KVM_GUEST_MODE_NONE
	stb	r0, HSTATE_IN_GUEST(r13)

	/*
	 * Turn on the MMU and jump to C code
	 */
	bcl	20, 31, .+4
5:	mflr	r3
	addi	r3, r3, 9f - 5b
3397 3398
	li	r4, -1
	rldimi	r3, r4, 62, 0	/* ensure 0xc000000000000000 bits are set */
3399 3400 3401
	ld	r4, PACAKMSR(r13)
	mtspr	SPRN_SRR0, r3
	mtspr	SPRN_SRR1, r4
3402
	RFI_TO_KERNEL
3403 3404 3405
9:	addi	r3, r1, STACK_FRAME_OVERHEAD
	bl	kvmppc_bad_interrupt
	b	9b
3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422

/*
 * This mimics the MSR transition on IRQ delivery.  The new guest MSR is taken
 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
 *   r11 has the guest MSR value (in/out)
 *   r9 has a vcpu pointer (in)
 *   r0 is used as a scratch register
 */
kvmppc_msr_interrupt:
	rldicl	r0, r11, 64 - MSR_TS_S_LG, 62
	cmpwi	r0, 2 /* Check if we are in transactional state..  */
	ld	r11, VCPU_INTR_MSR(r9)
	bne	1f
	/* ... if transactional, change to suspended */
	li	r0, 1
1:	rldimi	r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
	blr
3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440

/*
 * This works around a hardware bug on POWER8E processors, where
 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
 * performance monitor interrupt.  Instead, when we need to have
 * an interrupt pending, we have to arrange for a counter to overflow.
 */
kvmppc_fix_pmao:
	li	r3, 0
	mtspr	SPRN_MMCR2, r3
	lis	r3, (MMCR0_PMXE | MMCR0_FCECE)@h
	ori	r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
	mtspr	SPRN_MMCR0, r3
	lis	r3, 0x7fff
	ori	r3, r3, 0xffff
	mtspr	SPRN_PMC6, r3
	isync
	blr
3441 3442 3443 3444 3445 3446 3447 3448

#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
/*
 * Start timing an activity
 * r3 = pointer to time accumulation struct, r4 = vcpu
 */
kvmhv_start_timing:
	ld	r5, HSTATE_KVM_VCORE(r13)
3449 3450 3451
	ld	r6, VCORE_TB_OFFSET_APPL(r5)
	mftb	r5
	subf	r5, r6, r5	/* subtract current timebase offset */
3452 3453 3454 3455 3456 3457 3458 3459 3460 3461
	std	r3, VCPU_CUR_ACTIVITY(r4)
	std	r5, VCPU_ACTIVITY_START(r4)
	blr

/*
 * Accumulate time to one activity and start another.
 * r3 = pointer to new time accumulation struct, r4 = vcpu
 */
kvmhv_accumulate_time:
	ld	r5, HSTATE_KVM_VCORE(r13)
3462 3463
	ld	r8, VCORE_TB_OFFSET_APPL(r5)
	ld	r5, VCPU_CUR_ACTIVITY(r4)
3464 3465 3466
	ld	r6, VCPU_ACTIVITY_START(r4)
	std	r3, VCPU_CUR_ACTIVITY(r4)
	mftb	r7
3467
	subf	r7, r8, r7	/* subtract current timebase offset */
3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493
	std	r7, VCPU_ACTIVITY_START(r4)
	cmpdi	r5, 0
	beqlr
	subf	r3, r6, r7
	ld	r8, TAS_SEQCOUNT(r5)
	cmpdi	r8, 0
	addi	r8, r8, 1
	std	r8, TAS_SEQCOUNT(r5)
	lwsync
	ld	r7, TAS_TOTAL(r5)
	add	r7, r7, r3
	std	r7, TAS_TOTAL(r5)
	ld	r6, TAS_MIN(r5)
	ld	r7, TAS_MAX(r5)
	beq	3f
	cmpd	r3, r6
	bge	1f
3:	std	r3, TAS_MIN(r5)
1:	cmpd	r3, r7
	ble	2f
	std	r3, TAS_MAX(r5)
2:	lwsync
	addi	r8, r8, 1
	std	r8, TAS_SEQCOUNT(r5)
	blr
#endif