book3s_hv_rmhandlers.S 88.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
 *
 * Derived from book3s_rmhandlers.S and other files, which are:
 *
 * Copyright SUSE Linux Products GmbH 2009
 *
 * Authors: Alexander Graf <agraf@suse.de>
 */

#include <asm/ppc_asm.h>
#include <asm/kvm_asm.h>
#include <asm/reg.h>
23
#include <asm/mmu.h>
24
#include <asm/page.h>
25 26
#include <asm/ptrace.h>
#include <asm/hvcall.h>
27 28
#include <asm/asm-offsets.h>
#include <asm/exception-64s.h>
29
#include <asm/kvm_book3s_asm.h>
30
#include <asm/book3s/64/mmu-hash.h>
31
#include <asm/tm.h>
32
#include <asm/opal.h>
33
#include <asm/xive-regs.h>
34
#include <asm/thread_info.h>
35

36 37 38 39 40 41
/* Sign-extend HDEC if not on POWER9 */
#define EXTEND_HDEC(reg)			\
BEGIN_FTR_SECTION;				\
	extsw	reg, reg;			\
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)

42
#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
43

44 45 46 47
/* Values in HSTATE_NAPPING(r13) */
#define NAPPING_CEDE	1
#define NAPPING_NOVCPU	2

48
/* Stack frame offsets for kvmppc_hv_entry */
49
#define SFS			160
50 51 52 53 54 55 56 57
#define STACK_SLOT_TRAP		(SFS-4)
#define STACK_SLOT_TID		(SFS-16)
#define STACK_SLOT_PSSCR	(SFS-24)
#define STACK_SLOT_PID		(SFS-32)
#define STACK_SLOT_IAMR		(SFS-40)
#define STACK_SLOT_CIABR	(SFS-48)
#define STACK_SLOT_DAWR		(SFS-56)
#define STACK_SLOT_DAWRX	(SFS-64)
58
#define STACK_SLOT_HFSCR	(SFS-72)
59

60
/*
61
 * Call kvmppc_hv_entry in real mode.
62 63 64 65 66 67
 * Must be called with interrupts hard-disabled.
 *
 * Input Registers:
 *
 * LR = return address to continue at after eventually re-enabling MMU
 */
68
_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
69 70 71
	mflr	r0
	std	r0, PPC_LR_STKOFF(r1)
	stdu	r1, -112(r1)
72
	mfmsr	r10
73
	std	r10, HSTATE_HOST_MSR(r13)
74
	LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
75 76 77 78 79 80 81
	li	r0,MSR_RI
	andc	r0,r10,r0
	li	r6,MSR_IR | MSR_DR
	andc	r6,r10,r6
	mtmsrd	r0,1		/* clear RI in MSR */
	mtsrr0	r5
	mtsrr1	r6
82
	RFI_TO_KERNEL
83

84
kvmppc_call_hv_entry:
85 86 87 88 89 90 91 92 93 94 95 96 97
BEGIN_FTR_SECTION
	/* On P9, do LPCR setting, if necessary */
	ld	r3, HSTATE_SPLIT_MODE(r13)
	cmpdi	r3, 0
	beq	46f
	lwz	r4, KVM_SPLIT_DO_SET(r3)
	cmpwi	r4, 0
	beq	46f
	bl	kvmhv_p9_set_lpcr
	nop
46:
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)

98
	ld	r4, HSTATE_KVM_VCPU(r13)
99 100 101 102
	bl	kvmppc_hv_entry

	/* Back from guest - restore host state and return to caller */

103
BEGIN_FTR_SECTION
104 105 106 107 108
	/* Restore host DABR and DABRX */
	ld	r5,HSTATE_DABR(r13)
	li	r6,7
	mtspr	SPRN_DABR,r5
	mtspr	SPRN_DABRX,r6
109
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
110 111

	/* Restore SPRG3 */
S
Scott Wood 已提交
112 113
	ld	r3,PACA_SPRG_VDSO(r13)
	mtspr	SPRN_SPRG_VDSO_WRITE,r3
114 115

	/* Reload the host's PMU registers */
116
	lbz	r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */
117 118
	cmpwi	r4, 0
	beq	23f			/* skip if not */
119
BEGIN_FTR_SECTION
120
	ld	r3, HSTATE_MMCR0(r13)
121 122 123 124
	andi.	r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
	cmpwi	r4, MMCR0_PMAO
	beql	kvmppc_fix_pmao
END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
125 126 127 128 129 130
	lwz	r3, HSTATE_PMC1(r13)
	lwz	r4, HSTATE_PMC2(r13)
	lwz	r5, HSTATE_PMC3(r13)
	lwz	r6, HSTATE_PMC4(r13)
	lwz	r8, HSTATE_PMC5(r13)
	lwz	r9, HSTATE_PMC6(r13)
131 132 133 134 135 136
	mtspr	SPRN_PMC1, r3
	mtspr	SPRN_PMC2, r4
	mtspr	SPRN_PMC3, r5
	mtspr	SPRN_PMC4, r6
	mtspr	SPRN_PMC5, r8
	mtspr	SPRN_PMC6, r9
137 138 139 140 141
	ld	r3, HSTATE_MMCR0(r13)
	ld	r4, HSTATE_MMCR1(r13)
	ld	r5, HSTATE_MMCRA(r13)
	ld	r6, HSTATE_SIAR(r13)
	ld	r7, HSTATE_SDAR(r13)
142 143
	mtspr	SPRN_MMCR1, r4
	mtspr	SPRN_MMCRA, r5
144 145 146
	mtspr	SPRN_SIAR, r6
	mtspr	SPRN_SDAR, r7
BEGIN_FTR_SECTION
147 148
	ld	r8, HSTATE_MMCR2(r13)
	ld	r9, HSTATE_SIER(r13)
149 150 151
	mtspr	SPRN_MMCR2, r8
	mtspr	SPRN_SIER, r9
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
152 153 154 155
	mtspr	SPRN_MMCR0, r3
	isync
23:

156 157 158 159 160 161 162 163 164
	/*
	 * Reload DEC.  HDEC interrupts were disabled when
	 * we reloaded the host's LPCR value.
	 */
	ld	r3, HSTATE_DECEXP(r13)
	mftb	r4
	subf	r4, r4, r3
	mtspr	SPRN_DEC, r4

165 166 167 168
	/* hwthread_req may have got set by cede or no vcpu, so clear it */
	li	r0, 0
	stb	r0, HSTATE_HWTHREAD_REQ(r13)

169
	/*
170 171 172 173 174 175 176
	 * For external interrupts we need to call the Linux
	 * handler to process the interrupt. We do that by jumping
	 * to absolute address 0x500 for external interrupts.
	 * The [h]rfid at the end of the handler will return to
	 * the book3s_hv_interrupts.S code. For other interrupts
	 * we do the rfid to get back to the book3s_hv_interrupts.S
	 * code here.
177 178 179 180 181
	 */
	ld	r8, 112+PPC_LR_STKOFF(r1)
	addi	r1, r1, 112
	ld	r7, HSTATE_HOST_MSR(r13)

182 183 184
	/* Return the trap number on this thread as the return value */
	mr	r3, r12

185 186 187 188 189 190 191 192 193
	/*
	 * If we came back from the guest via a relocation-on interrupt,
	 * we will be in virtual mode at this point, which makes it a
	 * little easier to get back to the caller.
	 */
	mfmsr	r0
	andi.	r0, r0, MSR_IR		/* in real mode? */
	bne	.Lvirt_return

194
	/* RFI into the highmem handler */
195 196 197 198 199 200
	mfmsr	r6
	li	r0, MSR_RI
	andc	r6, r6, r0
	mtmsrd	r6, 1			/* Clear RI in MSR */
	mtsrr0	r8
	mtsrr1	r7
201
	RFI_TO_KERNEL
202

203
	/* Virtual-mode return */
204
.Lvirt_return:
205
	mtlr	r8
206 207
	blr

208 209
kvmppc_primary_no_guest:
	/* We handle this much like a ceded vcpu */
210
	/* put the HDEC into the DEC, since HDEC interrupts don't wake us */
211 212
	/* HDEC may be larger than DEC for arch >= v3.00, but since the */
	/* HDEC value came from DEC in the first place, it will fit */
213 214
	mfspr	r3, SPRN_HDEC
	mtspr	SPRN_DEC, r3
215 216 217 218 219 220 221 222 223 224 225 226 227
	/*
	 * Make sure the primary has finished the MMU switch.
	 * We should never get here on a secondary thread, but
	 * check it for robustness' sake.
	 */
	ld	r5, HSTATE_KVM_VCORE(r13)
65:	lbz	r0, VCORE_IN_GUEST(r5)
	cmpwi	r0, 0
	beq	65b
	/* Set LPCR. */
	ld	r8,VCORE_LPCR(r5)
	mtspr	SPRN_LPCR,r8
	isync
228 229 230 231 232 233 234 235 236 237
	/* set our bit in napping_threads */
	ld	r5, HSTATE_KVM_VCORE(r13)
	lbz	r7, HSTATE_PTID(r13)
	li	r0, 1
	sld	r0, r0, r7
	addi	r6, r5, VCORE_NAPPING_THREADS
1:	lwarx	r3, 0, r6
	or	r3, r3, r0
	stwcx.	r3, 0, r6
	bne	1b
238
	/* order napping_threads update vs testing entry_exit_map */
239 240 241 242 243 244 245 246
	isync
	li	r12, 0
	lwz	r7, VCORE_ENTRY_EXIT(r5)
	cmpwi	r7, 0x100
	bge	kvm_novcpu_exit	/* another thread already exiting */
	li	r3, NAPPING_NOVCPU
	stb	r3, HSTATE_NAPPING(r13)

247
	li	r3, 0		/* Don't wake on privileged (OS) doorbell */
248 249
	b	kvm_do_nap

250 251 252 253 254 255 256
/*
 * kvm_novcpu_wakeup
 *	Entered from kvm_start_guest if kvm_hstate.napping is set
 *	to NAPPING_NOVCPU
 *		r2 = kernel TOC
 *		r13 = paca
 */
257 258 259 260 261 262
kvm_novcpu_wakeup:
	ld	r1, HSTATE_HOST_R1(r13)
	ld	r5, HSTATE_KVM_VCORE(r13)
	li	r0, 0
	stb	r0, HSTATE_NAPPING(r13)

263 264
	/* check the wake reason */
	bl	kvmppc_check_wake_reason
265

266 267 268 269 270 271 272
	/*
	 * Restore volatile registers since we could have called
	 * a C routine in kvmppc_check_wake_reason.
	 *	r5 = VCORE
	 */
	ld	r5, HSTATE_KVM_VCORE(r13)

273 274 275 276 277 278 279 280 281 282
	/* see if any other thread is already exiting */
	lwz	r0, VCORE_ENTRY_EXIT(r5)
	cmpwi	r0, 0x100
	bge	kvm_novcpu_exit

	/* clear our bit in napping_threads */
	lbz	r7, HSTATE_PTID(r13)
	li	r0, 1
	sld	r0, r0, r7
	addi	r6, r5, VCORE_NAPPING_THREADS
283 284 285
4:	lwarx	r7, 0, r6
	andc	r7, r7, r0
	stwcx.	r7, 0, r6
286 287
	bne	4b

288
	/* See if the wake reason means we need to exit */
289 290 291
	cmpdi	r3, 0
	bge	kvm_novcpu_exit

292 293
	/* See if our timeslice has expired (HDEC is negative) */
	mfspr	r0, SPRN_HDEC
294
	EXTEND_HDEC(r0)
295
	li	r12, BOOK3S_INTERRUPT_HV_DECREMENTER
296
	cmpdi	r0, 0
297 298
	blt	kvm_novcpu_exit

299 300 301
	/* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
	ld	r4, HSTATE_KVM_VCPU(r13)
	cmpdi	r4, 0
302 303 304 305 306 307 308
	beq	kvmppc_primary_no_guest

#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
	addi	r3, r4, VCPU_TB_RMENTRY
	bl	kvmhv_start_timing
#endif
	b	kvmppc_got_guest
309 310

kvm_novcpu_exit:
311 312 313 314 315 316 317
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
	ld	r4, HSTATE_KVM_VCPU(r13)
	cmpdi	r4, 0
	beq	13f
	addi	r3, r4, VCPU_TB_RMEXIT
	bl	kvmhv_accumulate_time
#endif
318
13:	mr	r3, r12
319
	stw	r12, STACK_SLOT_TRAP(r1)
320 321
	bl	kvmhv_commence_exit
	nop
322
	lwz	r12, STACK_SLOT_TRAP(r1)
323
	b	kvmhv_switch_to_host
324

325
/*
326
 * We come in here when wakened from nap mode.
327 328
 * Relocation is off and most register values are lost.
 * r13 points to the PACA.
329
 * r3 contains the SRR1 wakeup value, SRR1 is trashed.
330 331 332
 */
	.globl	kvm_start_guest
kvm_start_guest:
333
	/* Set runlatch bit the minute you wake up from nap */
334 335 336
	mfspr	r0, SPRN_CTRLF
	ori 	r0, r0, 1
	mtspr	SPRN_CTRLT, r0
337

338 339 340 341 342 343
	/*
	 * Could avoid this and pass it through in r3. For now,
	 * code expects it to be in SRR1.
	 */
	mtspr	SPRN_SRR1,r3

344 345
	ld	r2,PACATOC(r13)

346 347
	li	r0,KVM_HWTHREAD_IN_KVM
	stb	r0,HSTATE_HWTHREAD_STATE(r13)
348

349 350 351
	/* NV GPR values from power7_idle() will no longer be valid */
	li	r0,1
	stb	r0,PACA_NAPSTATELOST(r13)
352

353 354
	/* were we napping due to cede? */
	lbz	r0,HSTATE_NAPPING(r13)
355 356 357 358 359 360 361
	cmpwi	r0,NAPPING_CEDE
	beq	kvm_end_cede
	cmpwi	r0,NAPPING_NOVCPU
	beq	kvm_novcpu_wakeup

	ld	r1,PACAEMERGSP(r13)
	subi	r1,r1,STACK_FRAME_OVERHEAD
362 363 364 365 366 367 368

	/*
	 * We weren't napping due to cede, so this must be a secondary
	 * thread being woken up to run a guest, or being woken up due
	 * to a stray IPI.  (Or due to some machine check or hypervisor
	 * maintenance interrupt while the core is in KVM.)
	 */
369 370

	/* Check the wake reason in SRR1 to see why we got here */
371
	bl	kvmppc_check_wake_reason
372 373 374 375 376
	/*
	 * kvmppc_check_wake_reason could invoke a C routine, but we
	 * have no volatile registers to restore when we return.
	 */

377 378
	cmpdi	r3, 0
	bge	kvm_no_guest
379

380 381 382 383
	/* get vcore pointer, NULL if we have nothing to run */
	ld	r5,HSTATE_KVM_VCORE(r13)
	cmpdi	r5,0
	/* if we have no vcore to run, go back to sleep */
384
	beq	kvm_no_guest
385

386 387
kvm_secondary_got_guest:

388
	/* Set HSTATE_DSCR(r13) to something sensible */
389
	ld	r6, PACA_DSCR_DEFAULT(r13)
390
	std	r6, HSTATE_DSCR(r13)
391

392 393 394 395
	/* On thread 0 of a subcore, set HDEC to max */
	lbz	r4, HSTATE_PTID(r13)
	cmpwi	r4, 0
	bne	63f
396 397
	LOAD_REG_ADDR(r6, decrementer_max)
	ld	r6, 0(r6)
398 399 400 401 402
	mtspr	SPRN_HDEC, r6
	/* and set per-LPAR registers, if doing dynamic micro-threading */
	ld	r6, HSTATE_SPLIT_MODE(r13)
	cmpdi	r6, 0
	beq	63f
403
BEGIN_FTR_SECTION
404 405 406 407 408 409 410
	ld	r0, KVM_SPLIT_RPR(r6)
	mtspr	SPRN_RPR, r0
	ld	r0, KVM_SPLIT_PMMAR(r6)
	mtspr	SPRN_PMMAR, r0
	ld	r0, KVM_SPLIT_LDBAR(r6)
	mtspr	SPRN_LDBAR, r0
	isync
411 412 413 414
FTR_SECTION_ELSE
	/* On P9 we use the split_info for coordinating LPCR changes */
	lwz	r4, KVM_SPLIT_DO_SET(r6)
	cmpwi	r4, 0
415
	beq	1f
416 417 418
	mr	r3, r6
	bl	kvmhv_p9_set_lpcr
	nop
419
1:
420
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
421 422
63:
	/* Order load of vcpu after load of vcore */
423
	lwsync
424
	ld	r4, HSTATE_KVM_VCPU(r13)
425
	bl	kvmppc_hv_entry
426 427

	/* Back from the guest, go back to nap */
428
	/* Clear our vcpu and vcore pointers so we don't come back in early */
429
	li	r0, 0
430
	std	r0, HSTATE_KVM_VCPU(r13)
431
	/*
432
	 * Once we clear HSTATE_KVM_VCORE(r13), the code in
433 434 435
	 * kvmppc_run_core() is going to assume that all our vcpu
	 * state is visible in memory.  This lwsync makes sure
	 * that that is true.
436
	 */
437
	lwsync
438
	std	r0, HSTATE_KVM_VCORE(r13)
439

440 441 442 443 444 445 446 447 448 449 450 451
	/*
	 * All secondaries exiting guest will fall through this path.
	 * Before proceeding, just check for HMI interrupt and
	 * invoke opal hmi handler. By now we are sure that the
	 * primary thread on this core/subcore has already made partition
	 * switch/TB resync and we are good to call opal hmi handler.
	 */
	cmpwi	r12, BOOK3S_INTERRUPT_HMI
	bne	kvm_no_guest

	li	r3,0			/* NULL argument */
	bl	hmi_exception_realmode
452 453 454 455 456 457
/*
 * At this point we have finished executing in the guest.
 * We need to wait for hwthread_req to become zero, since
 * we may not turn on the MMU while hwthread_req is non-zero.
 * While waiting we also need to check if we get given a vcpu to run.
 */
458
kvm_no_guest:
459 460 461 462 463
	lbz	r3, HSTATE_HWTHREAD_REQ(r13)
	cmpwi	r3, 0
	bne	53f
	HMT_MEDIUM
	li	r0, KVM_HWTHREAD_IN_KERNEL
464
	stb	r0, HSTATE_HWTHREAD_STATE(r13)
465 466 467 468 469 470
	/* need to recheck hwthread_req after a barrier, to avoid race */
	sync
	lbz	r3, HSTATE_HWTHREAD_REQ(r13)
	cmpwi	r3, 0
	bne	54f
/*
471
 * We jump to pnv_wakeup_loss, which will return to the caller
472
 * of power7_nap in the powernv cpu offline loop.  The value we
473 474
 * put in r3 becomes the return value for power7_nap. pnv_wakeup_loss
 * requires SRR1 in r12.
475
 */
476 477 478 479
	li	r3, LPCR_PECE0
	mfspr	r4, SPRN_LPCR
	rlwimi	r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
	mtspr	SPRN_LPCR, r4
480
	li	r3, 0
481
	mfspr	r12,SPRN_SRR1
482
	b	pnv_wakeup_loss
483 484

53:	HMT_LOW
485 486 487 488 489 490
	ld	r5, HSTATE_KVM_VCORE(r13)
	cmpdi	r5, 0
	bne	60f
	ld	r3, HSTATE_SPLIT_MODE(r13)
	cmpdi	r3, 0
	beq	kvm_no_guest
491 492 493 494 495 496
	lwz	r0, KVM_SPLIT_DO_SET(r3)
	cmpwi	r0, 0
	bne	kvmhv_do_set
	lwz	r0, KVM_SPLIT_DO_RESTORE(r3)
	cmpwi	r0, 0
	bne	kvmhv_do_restore
497 498
	lbz	r0, KVM_SPLIT_DO_NAP(r3)
	cmpwi	r0, 0
499 500
	beq	kvm_no_guest
	HMT_MEDIUM
501 502
	b	kvm_unsplit_nap
60:	HMT_MEDIUM
503 504 505 506 507
	b	kvm_secondary_got_guest

54:	li	r0, KVM_HWTHREAD_IN_KVM
	stb	r0, HSTATE_HWTHREAD_STATE(r13)
	b	kvm_no_guest
508

509 510 511 512 513 514 515 516 517 518 519 520 521
kvmhv_do_set:
	/* Set LPCR, LPIDR etc. on P9 */
	HMT_MEDIUM
	bl	kvmhv_p9_set_lpcr
	nop
	b	kvm_no_guest

kvmhv_do_restore:
	HMT_MEDIUM
	bl	kvmhv_p9_restore_lpcr
	nop
	b	kvm_no_guest

522 523 524 525 526
/*
 * Here the primary thread is trying to return the core to
 * whole-core mode, so we need to nap.
 */
kvm_unsplit_nap:
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542
	/*
	 * When secondaries are napping in kvm_unsplit_nap() with
	 * hwthread_req = 1, HMI goes ignored even though subcores are
	 * already exited the guest. Hence HMI keeps waking up secondaries
	 * from nap in a loop and secondaries always go back to nap since
	 * no vcore is assigned to them. This makes impossible for primary
	 * thread to get hold of secondary threads resulting into a soft
	 * lockup in KVM path.
	 *
	 * Let us check if HMI is pending and handle it before we go to nap.
	 */
	cmpwi	r12, BOOK3S_INTERRUPT_HMI
	bne	55f
	li	r3, 0			/* NULL argument */
	bl	hmi_exception_realmode
55:
543 544 545 546 547 548 549 550
	/*
	 * Ensure that secondary doesn't nap when it has
	 * its vcore pointer set.
	 */
	sync		/* matches smp_mb() before setting split_info.do_nap */
	ld	r0, HSTATE_KVM_VCORE(r13)
	cmpdi	r0, 0
	bne	kvm_no_guest
551 552 553 554 555 556 557 558
	/* clear any pending message */
BEGIN_FTR_SECTION
	lis	r6, (PPC_DBELL_SERVER << (63-36))@h
	PPC_MSGCLR(6)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
	/* Set kvm_split_mode.napped[tid] = 1 */
	ld	r3, HSTATE_SPLIT_MODE(r13)
	li	r0, 1
559
	lbz	r4, HSTATE_TID(r13)
560 561 562 563 564 565 566 567
	addi	r4, r4, KVM_SPLIT_NAPPED
	stbx	r0, r3, r4
	/* Check the do_nap flag again after setting napped[] */
	sync
	lbz	r0, KVM_SPLIT_DO_NAP(r3)
	cmpwi	r0, 0
	beq	57f
	li	r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
568 569 570
	mfspr	r5, SPRN_LPCR
	rlwimi	r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
	b	kvm_nap_sequence
571 572 573 574 575

57:	li	r0, 0
	stbx	r0, r3, r4
	b	kvm_no_guest

576 577 578 579 580 581
/******************************************************************************
 *                                                                            *
 *                               Entry code                                   *
 *                                                                            *
 *****************************************************************************/

582 583 584 585 586
.global kvmppc_hv_entry
kvmppc_hv_entry:

	/* Required state:
	 *
587
	 * R4 = vcpu pointer (or NULL)
588 589 590
	 * MSR = ~IR|DR
	 * R13 = PACA
	 * R1 = host R1
591
	 * R2 = TOC
592
	 * all other volatile GPRS = free
593
	 * Does not preserve non-volatile GPRs or CR fields
594 595
	 */
	mflr	r0
596
	std	r0, PPC_LR_STKOFF(r1)
597
	stdu	r1, -SFS(r1)
598 599 600 601

	/* Save R1 in the PACA */
	std	r1, HSTATE_HOST_R1(r13)

602 603 604
	li	r6, KVM_GUEST_MODE_HOST_HV
	stb	r6, HSTATE_IN_GUEST(r13)

605 606 607 608 609 610 611 612
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
	/* Store initial timestamp */
	cmpdi	r4, 0
	beq	1f
	addi	r3, r4, VCPU_TB_RMENTRY
	bl	kvmhv_start_timing
1:
#endif
613 614 615 616 617 618 619

	/* Use cr7 as an indication of radix mode */
	ld	r5, HSTATE_KVM_VCORE(r13)
	ld	r9, VCORE_KVM(r5)	/* pointer to struct kvm */
	lbz	r0, KVM_RADIX(r9)
	cmpwi	cr7, r0, 0

620
	/*
621
	 * POWER7/POWER8 host -> guest partition switch code.
622 623 624
	 * We don't have to lock against concurrent tlbies,
	 * but we do have to coordinate across hardware threads.
	 */
625 626 627 628
	/* Set bit in entry map iff exit map is zero. */
	li	r7, 1
	lbz	r6, HSTATE_PTID(r13)
	sld	r7, r7, r6
629 630
	addi	r8, r5, VCORE_ENTRY_EXIT
21:	lwarx	r3, 0, r8
631
	cmpwi	r3, 0x100		/* any threads starting to exit? */
632
	bge	secondary_too_late	/* if so we're too late to the party */
633
	or	r3, r3, r7
634
	stwcx.	r3, 0, r8
635 636 637 638
	bne	21b

	/* Primary thread switches to guest partition. */
	cmpwi	r6,0
639
	bne	10f
640
	lwz	r7,KVM_LPID(r9)
641 642
BEGIN_FTR_SECTION
	ld	r6,KVM_SDR1(r9)
643 644 645 646
	li	r0,LPID_RSVD		/* switch to reserved LPID */
	mtspr	SPRN_LPID,r0
	ptesync
	mtspr	SPRN_SDR1,r6		/* switch to partition page table */
647
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
648 649
	mtspr	SPRN_LPID,r7
	isync
650 651 652

	/* See if we need to flush the TLB */
	lhz	r6,PACAPACAINDEX(r13)	/* test_bit(cpu, need_tlb_flush) */
653 654 655 656 657 658 659 660 661
BEGIN_FTR_SECTION
	/*
	 * On POWER9, individual threads can come in here, but the
	 * TLB is shared between the 4 threads in a core, hence
	 * invalidating on one thread invalidates for all.
	 * Thus we make all 4 threads use the same bit here.
	 */
	clrrdi	r6,r6,2
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
662 663 664 665 666
	clrldi	r7,r6,64-6		/* extract bit number (6 bits) */
	srdi	r6,r6,6			/* doubleword number */
	sldi	r6,r6,3			/* address offset */
	add	r6,r6,r9
	addi	r6,r6,KVM_NEED_FLUSH	/* dword in kvm->arch.need_tlb_flush */
667 668
	li	r8,1
	sld	r8,r8,r7
669
	ld	r7,0(r6)
670
	and.	r7,r7,r8
671
	beq	22f
672
	/* Flush the TLB of any entries for this LPID */
673 674
	lwz	r0,KVM_TLB_SETS(r9)
	mtctr	r0
675 676
	li	r7,0x800		/* IS field = 0b10 */
	ptesync
677 678 679
	li	r0,0			/* RS for P9 version of tlbiel */
	bne	cr7, 29f
28:	tlbiel	r7			/* On P9, rs=0, RIC=0, PRS=0, R=0 */
680 681
	addi	r7,r7,0x1000
	bdnz	28b
682 683 684 685 686 687 688 689 690
	b	30f
29:	PPC_TLBIEL(7,0,2,1,1)		/* for radix, RIC=2, PRS=1, R=1 */
	addi	r7,r7,0x1000
	bdnz	29b
30:	ptesync
23:	ldarx	r7,0,r6			/* clear the bit after TLB flushed */
	andc	r7,r7,r8
	stdcx.	r7,0,r6
	bne	23b
691

692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
	/* Add timebase offset onto timebase */
22:	ld	r8,VCORE_TB_OFFSET(r5)
	cmpdi	r8,0
	beq	37f
	mftb	r6		/* current host timebase */
	add	r8,r8,r6
	mtspr	SPRN_TBU40,r8	/* update upper 40 bits */
	mftb	r7		/* check if lower 24 bits overflowed */
	clrldi	r6,r6,40
	clrldi	r7,r7,40
	cmpld	r7,r6
	bge	37f
	addis	r8,r8,0x100	/* if so, increment upper 40 bits */
	mtspr	SPRN_TBU40,r8

707 708 709 710 711 712
	/* Load guest PCR value to select appropriate compat mode */
37:	ld	r7, VCORE_PCR(r5)
	cmpdi	r7, 0
	beq	38f
	mtspr	SPRN_PCR, r7
38:
713 714

BEGIN_FTR_SECTION
715
	/* DPDES and VTB are shared between threads */
716
	ld	r8, VCORE_DPDES(r5)
717
	ld	r7, VCORE_VTB(r5)
718
	mtspr	SPRN_DPDES, r8
719
	mtspr	SPRN_VTB, r7
720 721
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)

722 723 724 725 726
	/* Mark the subcore state as inside guest */
	bl	kvmppc_subcore_enter_guest
	nop
	ld	r5, HSTATE_KVM_VCORE(r13)
	ld	r4, HSTATE_KVM_VCPU(r13)
727
	li	r0,1
728
	stb	r0,VCORE_IN_GUEST(r5)	/* signal secondaries to continue */
729

730
	/* Do we have a guest vcpu to run? */
731
10:	cmpdi	r4, 0
732 733 734 735 736 737
	beq	kvmppc_primary_no_guest
kvmppc_got_guest:
	/* Increment yield count if they have a VPA */
	ld	r3, VCPU_VPA(r4)
	cmpdi	r3, 0
	beq	25f
738 739
	li	r6, LPPACA_YIELDCOUNT
	LWZX_BE	r5, r3, r6
740
	addi	r5, r5, 1
741
	STWX_BE	r5, r3, r6
742 743 744 745 746 747 748 749 750 751 752 753 754 755
	li	r6, 1
	stb	r6, VCPU_VPA_DIRTY(r4)
25:

	/* Save purr/spurr */
	mfspr	r5,SPRN_PURR
	mfspr	r6,SPRN_SPURR
	std	r5,HSTATE_PURR(r13)
	std	r6,HSTATE_SPURR(r13)
	ld	r7,VCPU_PURR(r4)
	ld	r8,VCPU_SPURR(r4)
	mtspr	SPRN_PURR,r7
	mtspr	SPRN_SPURR,r8

756 757 758 759
	/* Save host values of some registers */
BEGIN_FTR_SECTION
	mfspr	r5, SPRN_TIDR
	mfspr	r6, SPRN_PSSCR
760
	mfspr	r7, SPRN_PID
761
	mfspr	r8, SPRN_IAMR
762 763
	std	r5, STACK_SLOT_TID(r1)
	std	r6, STACK_SLOT_PSSCR(r1)
764
	std	r7, STACK_SLOT_PID(r1)
765
	std	r8, STACK_SLOT_IAMR(r1)
766 767
	mfspr	r5, SPRN_HFSCR
	std	r5, STACK_SLOT_HFSCR(r1)
768
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
769 770 771 772 773 774 775 776
BEGIN_FTR_SECTION
	mfspr	r5, SPRN_CIABR
	mfspr	r6, SPRN_DAWR
	mfspr	r7, SPRN_DAWRX
	std	r5, STACK_SLOT_CIABR(r1)
	std	r6, STACK_SLOT_DAWR(r1)
	std	r7, STACK_SLOT_DAWRX(r1)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
777

778 779 780
BEGIN_FTR_SECTION
	/* Set partition DABR */
	/* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
781
	lwz	r5,VCPU_DABRX(r4)
782 783 784 785 786 787
	ld	r6,VCPU_DABR(r4)
	mtspr	SPRN_DABRX,r5
	mtspr	SPRN_DABR,r6
	isync
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)

788
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
789 790 791 792
/*
 * Branch around the call if both CPU_FTR_TM and
 * CPU_FTR_P9_TM_HV_ASSIST are off.
 */
793
BEGIN_FTR_SECTION
794 795
	b	91f
END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
796 797 798
	/*
	 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
	 */
799
	bl	kvmppc_restore_tm
800
91:
801 802
#endif

803 804 805 806 807 808
	/* Load guest PMU registers */
	/* R4 is live here (vcpu pointer) */
	li	r3, 1
	sldi	r3, r3, 31		/* MMCR0_FC (freeze counters) bit */
	mtspr	SPRN_MMCR0, r3		/* freeze all counters, disable ints */
	isync
809 810 811 812 813 814
BEGIN_FTR_SECTION
	ld	r3, VCPU_MMCR(r4)
	andi.	r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
	cmpwi	r5, MMCR0_PMAO
	beql	kvmppc_fix_pmao
END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835
	lwz	r3, VCPU_PMC(r4)	/* always load up guest PMU registers */
	lwz	r5, VCPU_PMC + 4(r4)	/* to prevent information leak */
	lwz	r6, VCPU_PMC + 8(r4)
	lwz	r7, VCPU_PMC + 12(r4)
	lwz	r8, VCPU_PMC + 16(r4)
	lwz	r9, VCPU_PMC + 20(r4)
	mtspr	SPRN_PMC1, r3
	mtspr	SPRN_PMC2, r5
	mtspr	SPRN_PMC3, r6
	mtspr	SPRN_PMC4, r7
	mtspr	SPRN_PMC5, r8
	mtspr	SPRN_PMC6, r9
	ld	r3, VCPU_MMCR(r4)
	ld	r5, VCPU_MMCR + 8(r4)
	ld	r6, VCPU_MMCR + 16(r4)
	ld	r7, VCPU_SIAR(r4)
	ld	r8, VCPU_SDAR(r4)
	mtspr	SPRN_MMCR1, r5
	mtspr	SPRN_MMCRA, r6
	mtspr	SPRN_SIAR, r7
	mtspr	SPRN_SDAR, r8
836 837 838
BEGIN_FTR_SECTION
	ld	r5, VCPU_MMCR + 24(r4)
	ld	r6, VCPU_SIER(r4)
839 840 841
	mtspr	SPRN_MMCR2, r5
	mtspr	SPRN_SIER, r6
BEGIN_FTR_SECTION_NESTED(96)
842 843 844 845 846 847
	lwz	r7, VCPU_PMC + 24(r4)
	lwz	r8, VCPU_PMC + 28(r4)
	ld	r9, VCPU_MMCR + 32(r4)
	mtspr	SPRN_SPMC1, r7
	mtspr	SPRN_SPMC2, r8
	mtspr	SPRN_MMCRS, r9
848
END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
849
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878
	mtspr	SPRN_MMCR0, r3
	isync

	/* Load up FP, VMX and VSX registers */
	bl	kvmppc_load_fp

	ld	r14, VCPU_GPR(R14)(r4)
	ld	r15, VCPU_GPR(R15)(r4)
	ld	r16, VCPU_GPR(R16)(r4)
	ld	r17, VCPU_GPR(R17)(r4)
	ld	r18, VCPU_GPR(R18)(r4)
	ld	r19, VCPU_GPR(R19)(r4)
	ld	r20, VCPU_GPR(R20)(r4)
	ld	r21, VCPU_GPR(R21)(r4)
	ld	r22, VCPU_GPR(R22)(r4)
	ld	r23, VCPU_GPR(R23)(r4)
	ld	r24, VCPU_GPR(R24)(r4)
	ld	r25, VCPU_GPR(R25)(r4)
	ld	r26, VCPU_GPR(R26)(r4)
	ld	r27, VCPU_GPR(R27)(r4)
	ld	r28, VCPU_GPR(R28)(r4)
	ld	r29, VCPU_GPR(R29)(r4)
	ld	r30, VCPU_GPR(R30)(r4)
	ld	r31, VCPU_GPR(R31)(r4)

	/* Switch DSCR to guest value */
	ld	r5, VCPU_DSCR(r4)
	mtspr	SPRN_DSCR, r5

879
BEGIN_FTR_SECTION
880
	/* Skip next section on POWER7 */
881 882 883 884 885 886 887 888 889 890 891 892 893
	b	8f
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
	/* Load up POWER8-specific registers */
	ld	r5, VCPU_IAMR(r4)
	lwz	r6, VCPU_PSPB(r4)
	ld	r7, VCPU_FSCR(r4)
	mtspr	SPRN_IAMR, r5
	mtspr	SPRN_PSPB, r6
	mtspr	SPRN_FSCR, r7
	ld	r5, VCPU_DAWR(r4)
	ld	r6, VCPU_DAWRX(r4)
	ld	r7, VCPU_CIABR(r4)
	ld	r8, VCPU_TAR(r4)
894 895 896 897 898
	/*
	 * Handle broken DAWR case by not writing it. This means we
	 * can still store the DAWR register for migration.
	 */
BEGIN_FTR_SECTION
899 900
	mtspr	SPRN_DAWR, r5
	mtspr	SPRN_DAWRX, r6
901
END_FTR_SECTION_IFSET(CPU_FTR_DAWR)
902 903 904
	mtspr	SPRN_CIABR, r7
	mtspr	SPRN_TAR, r8
	ld	r5, VCPU_IC(r4)
905
	ld	r8, VCPU_EBBHR(r4)
906
	mtspr	SPRN_IC, r5
907 908 909
	mtspr	SPRN_EBBHR, r8
	ld	r5, VCPU_EBBRR(r4)
	ld	r6, VCPU_BESCR(r4)
910 911
	lwz	r7, VCPU_GUEST_PID(r4)
	ld	r8, VCPU_WORT(r4)
912 913
	mtspr	SPRN_EBBRR, r5
	mtspr	SPRN_BESCR, r6
914 915
	mtspr	SPRN_PID, r7
	mtspr	SPRN_WORT, r8
916 917 918
BEGIN_FTR_SECTION
	PPC_INVALIDATE_ERAT
END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
919
BEGIN_FTR_SECTION
920
	/* POWER8-only registers */
921 922
	ld	r5, VCPU_TCSCR(r4)
	ld	r6, VCPU_ACOP(r4)
923 924
	ld	r7, VCPU_CSIGR(r4)
	ld	r8, VCPU_TACR(r4)
925 926
	mtspr	SPRN_TCSCR, r5
	mtspr	SPRN_ACOP, r6
927 928
	mtspr	SPRN_CSIGR, r7
	mtspr	SPRN_TACR, r8
929
	nop
930 931 932 933
FTR_SECTION_ELSE
	/* POWER9-only registers */
	ld	r5, VCPU_TID(r4)
	ld	r6, VCPU_PSSCR(r4)
934
	lbz	r8, HSTATE_FAKE_SUSPEND(r13)
935
	oris	r6, r6, PSSCR_EC@h	/* This makes stop trap to HV */
936
	rldimi	r6, r8, PSSCR_FAKE_SUSPEND_LG, 63 - PSSCR_FAKE_SUSPEND_LG
937
	ld	r7, VCPU_HFSCR(r4)
938 939
	mtspr	SPRN_TIDR, r5
	mtspr	SPRN_PSSCR, r6
940
	mtspr	SPRN_HFSCR, r7
941
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
942 943
8:

944 945 946 947
	/*
	 * Set the decrementer to the guest decrementer.
	 */
	ld	r8,VCPU_DEC_EXPIRES(r4)
948 949 950 951
	/* r8 is a host timebase value here, convert to guest TB */
	ld	r5,HSTATE_KVM_VCORE(r13)
	ld	r6,VCORE_TB_OFFSET(r5)
	add	r8,r8,r6
952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977
	mftb	r7
	subf	r3,r7,r8
	mtspr	SPRN_DEC,r3

	ld	r5, VCPU_SPRG0(r4)
	ld	r6, VCPU_SPRG1(r4)
	ld	r7, VCPU_SPRG2(r4)
	ld	r8, VCPU_SPRG3(r4)
	mtspr	SPRN_SPRG0, r5
	mtspr	SPRN_SPRG1, r6
	mtspr	SPRN_SPRG2, r7
	mtspr	SPRN_SPRG3, r8

	/* Load up DAR and DSISR */
	ld	r5, VCPU_DAR(r4)
	lwz	r6, VCPU_DSISR(r4)
	mtspr	SPRN_DAR, r5
	mtspr	SPRN_DSISR, r6

	/* Restore AMR and UAMOR, set AMOR to all 1s */
	ld	r5,VCPU_AMR(r4)
	ld	r6,VCPU_UAMOR(r4)
	li	r7,-1
	mtspr	SPRN_AMR,r5
	mtspr	SPRN_UAMOR,r6
	mtspr	SPRN_AMOR,r7
978 979 980 981 982 983 984 985 986

	/* Restore state of CTRL run bit; assume 1 on entry */
	lwz	r5,VCPU_CTRL(r4)
	andi.	r5,r5,1
	bne	4f
	mfspr	r6,SPRN_CTRLF
	clrrdi	r6,r6,1
	mtspr	SPRN_CTRLT,r6
4:
987 988 989 990 991 992 993 994 995
	/* Secondary threads wait for primary to have done partition switch */
	ld	r5, HSTATE_KVM_VCORE(r13)
	lbz	r6, HSTATE_PTID(r13)
	cmpwi	r6, 0
	beq	21f
	lbz	r0, VCORE_IN_GUEST(r5)
	cmpwi	r0, 0
	bne	21f
	HMT_LOW
996 997 998 999
20:	lwz	r3, VCORE_ENTRY_EXIT(r5)
	cmpwi	r3, 0x100
	bge	no_switch_exit
	lbz	r0, VCORE_IN_GUEST(r5)
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
	cmpwi	r0, 0
	beq	20b
	HMT_MEDIUM
21:
	/* Set LPCR. */
	ld	r8,VCORE_LPCR(r5)
	mtspr	SPRN_LPCR,r8
	isync

	/* Check if HDEC expires soon */
	mfspr	r3, SPRN_HDEC
1011 1012
	EXTEND_HDEC(r3)
	cmpdi	r3, 512		/* 1 microsecond */
1013 1014
	blt	hdec_soon

1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037
	/* For hash guest, clear out and reload the SLB */
	ld	r6, VCPU_KVM(r4)
	lbz	r0, KVM_RADIX(r6)
	cmpwi	r0, 0
	bne	9f
	li	r6, 0
	slbmte	r6, r6
	slbia
	ptesync

	/* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
	lwz	r5,VCPU_SLB_MAX(r4)
	cmpwi	r5,0
	beq	9f
	mtctr	r5
	addi	r6,r4,VCPU_SLB
1:	ld	r8,VCPU_SLB_E(r6)
	ld	r9,VCPU_SLB_V(r6)
	slbmte	r9,r8
	addi	r6,r6,VCPU_SLB_SIZE
	bdnz	1b
9:

1038 1039 1040
#ifdef CONFIG_KVM_XICS
	/* We are entering the guest on that thread, push VCPU to XIVE */
	ld	r10, HSTATE_XIVE_TIMA_PHYS(r13)
1041
	cmpldi	cr0, r10, 0
1042 1043 1044 1045
	beq	no_xive
	ld	r11, VCPU_XIVE_SAVED_STATE(r4)
	li	r9, TM_QW1_OS
	eieio
1046
	stdcix	r11,r9,r10
1047 1048 1049 1050
	lwz	r11, VCPU_XIVE_CAM_WORD(r4)
	li	r9, TM_QW1_OS + TM_WORD2
	stwcix	r11,r9,r10
	li	r9, 1
1051
	stb	r9, VCPU_XIVE_PUSHED(r4)
1052
	eieio
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062

	/*
	 * We clear the irq_pending flag. There is a small chance of a
	 * race vs. the escalation interrupt happening on another
	 * processor setting it again, but the only consequence is to
	 * cause a spurrious wakeup on the next H_CEDE which is not an
	 * issue.
	 */
	li	r0,0
	stb	r0, VCPU_IRQ_PENDING(r4)
1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097

	/*
	 * In single escalation mode, if the escalation interrupt is
	 * on, we mask it.
	 */
	lbz	r0, VCPU_XIVE_ESC_ON(r4)
	cmpwi	r0,0
	beq	1f
	ld	r10, VCPU_XIVE_ESC_RADDR(r4)
	li	r9, XIVE_ESB_SET_PQ_01
	ldcix	r0, r10, r9
	sync

	/* We have a possible subtle race here: The escalation interrupt might
	 * have fired and be on its way to the host queue while we mask it,
	 * and if we unmask it early enough (re-cede right away), there is
	 * a theorical possibility that it fires again, thus landing in the
	 * target queue more than once which is a big no-no.
	 *
	 * Fortunately, solving this is rather easy. If the above load setting
	 * PQ to 01 returns a previous value where P is set, then we know the
	 * escalation interrupt is somewhere on its way to the host. In that
	 * case we simply don't clear the xive_esc_on flag below. It will be
	 * eventually cleared by the handler for the escalation interrupt.
	 *
	 * Then, when doing a cede, we check that flag again before re-enabling
	 * the escalation interrupt, and if set, we abort the cede.
	 */
	andi.	r0, r0, XIVE_ESB_VAL_P
	bne-	1f

	/* Now P is 0, we can clear the flag */
	li	r0, 0
	stb	r0, VCPU_XIVE_ESC_ON(r4)
1:
1098 1099 1100
no_xive:
#endif /* CONFIG_KVM_XICS */

1101
deliver_guest_interrupt:
1102
	ld	r6, VCPU_CTR(r4)
1103
	ld	r7, VCPU_XER(r4)
1104 1105 1106 1107

	mtctr	r6
	mtxer	r7

1108
kvmppc_cede_reentry:		/* r4 = vcpu, r13 = paca */
1109 1110
	ld	r10, VCPU_PC(r4)
	ld	r11, VCPU_MSR(r4)
1111 1112
	ld	r6, VCPU_SRR0(r4)
	ld	r7, VCPU_SRR1(r4)
1113 1114
	mtspr	SPRN_SRR0, r6
	mtspr	SPRN_SRR1, r7
1115

1116
	/* r11 = vcpu->arch.msr & ~MSR_HV */
1117 1118 1119 1120
	rldicl	r11, r11, 63 - MSR_HV_LG, 1
	rotldi	r11, r11, 1 + MSR_HV_LG
	ori	r11, r11, MSR_ME

1121
	/* Check if we can deliver an external or decrementer interrupt now */
1122 1123 1124 1125 1126 1127 1128 1129
	ld	r0, VCPU_PENDING_EXC(r4)
	rldicl	r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
	cmpdi	cr1, r0, 0
	andi.	r8, r11, MSR_EE
	mfspr	r8, SPRN_LPCR
	/* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
	rldimi	r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
	mtspr	SPRN_LPCR, r8
1130 1131
	isync
	beq	5f
1132 1133 1134
	li	r0, BOOK3S_INTERRUPT_EXTERNAL
	bne	cr1, 12f
	mfspr	r0, SPRN_DEC
1135 1136 1137 1138 1139 1140 1141
BEGIN_FTR_SECTION
	/* On POWER9 check whether the guest has large decrementer enabled */
	andis.	r8, r8, LPCR_LD@h
	bne	15f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
	extsw	r0, r0
15:	cmpdi	r0, 0
1142 1143
	li	r0, BOOK3S_INTERRUPT_DECREMENTER
	bge	5f
1144

1145
12:	mtspr	SPRN_SRR0, r10
1146
	mr	r10,r0
1147
	mtspr	SPRN_SRR1, r11
1148 1149
	mr	r9, r4
	bl	kvmppc_msr_interrupt
1150
5:
1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
BEGIN_FTR_SECTION
	b	fast_guest_return
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
	/* On POWER9, check for pending doorbell requests */
	lbz	r0, VCPU_DBELL_REQ(r4)
	cmpwi	r0, 0
	beq	fast_guest_return
	ld	r5, HSTATE_KVM_VCORE(r13)
	/* Set DPDES register so the CPU will take a doorbell interrupt */
	li	r0, 1
	mtspr	SPRN_DPDES, r0
	std	r0, VCORE_DPDES(r5)
	/* Make sure other cpus see vcore->dpdes set before dbell req clear */
	lwsync
	/* Clear the pending doorbell request */
	li	r0, 0
	stb	r0, VCPU_DBELL_REQ(r4)
1168

1169 1170 1171 1172 1173 1174 1175
/*
 * Required state:
 * R4 = vcpu
 * R10: value for HSRR0
 * R11: value for HSRR1
 * R13 = PACA
 */
1176
fast_guest_return:
1177 1178
	li	r0,0
	stb	r0,VCPU_CEDED(r4)	/* cancel cede */
1179 1180 1181 1182
	mtspr	SPRN_HSRR0,r10
	mtspr	SPRN_HSRR1,r11

	/* Activate guest mode, so faults get handled by KVM */
1183
	li	r9, KVM_GUEST_MODE_GUEST_HV
1184 1185
	stb	r9, HSTATE_IN_GUEST(r13)

1186 1187 1188 1189 1190 1191
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
	/* Accumulate timing */
	addi	r3, r4, VCPU_TB_GUEST
	bl	kvmhv_accumulate_time
#endif

1192 1193
	/* Enter guest */

1194 1195 1196 1197
BEGIN_FTR_SECTION
	ld	r5, VCPU_CFAR(r4)
	mtspr	SPRN_CFAR, r5
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1198 1199 1200
BEGIN_FTR_SECTION
	ld	r0, VCPU_PPR(r4)
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1201

1202 1203 1204 1205 1206
	ld	r5, VCPU_LR(r4)
	lwz	r6, VCPU_CR(r4)
	mtlr	r5
	mtcr	r6

1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
	ld	r1, VCPU_GPR(R1)(r4)
	ld	r2, VCPU_GPR(R2)(r4)
	ld	r3, VCPU_GPR(R3)(r4)
	ld	r5, VCPU_GPR(R5)(r4)
	ld	r6, VCPU_GPR(R6)(r4)
	ld	r7, VCPU_GPR(R7)(r4)
	ld	r8, VCPU_GPR(R8)(r4)
	ld	r9, VCPU_GPR(R9)(r4)
	ld	r10, VCPU_GPR(R10)(r4)
	ld	r11, VCPU_GPR(R11)(r4)
	ld	r12, VCPU_GPR(R12)(r4)
	ld	r13, VCPU_GPR(R13)(r4)

1220 1221 1222
BEGIN_FTR_SECTION
	mtspr	SPRN_PPR, r0
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1223 1224 1225 1226 1227 1228 1229

/* Move canary into DSISR to check for later */
BEGIN_FTR_SECTION
	li	r0, 0x7fff
	mtspr	SPRN_HDSISR, r0
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)

1230
	ld	r0, VCPU_GPR(R0)(r4)
1231
	ld	r4, VCPU_GPR(R4)(r4)
1232
	HRFI_TO_GUEST
1233 1234
	b	.

1235
secondary_too_late:
1236
	li	r12, 0
1237 1238
	cmpdi	r4, 0
	beq	11f
1239 1240
	stw	r12, VCPU_TRAP(r4)
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1241 1242
	addi	r3, r4, VCPU_TB_RMEXIT
	bl	kvmhv_accumulate_time
1243
#endif
1244 1245
11:	b	kvmhv_switch_to_host

1246 1247 1248 1249
no_switch_exit:
	HMT_MEDIUM
	li	r12, 0
	b	12f
1250
hdec_soon:
1251
	li	r12, BOOK3S_INTERRUPT_HV_DECREMENTER
1252
12:	stw	r12, VCPU_TRAP(r4)
1253 1254
	mr	r9, r4
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1255 1256 1257
	addi	r3, r4, VCPU_TB_RMEXIT
	bl	kvmhv_accumulate_time
#endif
1258
	b	guest_bypass
1259

1260 1261 1262 1263 1264 1265 1266 1267 1268
/******************************************************************************
 *                                                                            *
 *                               Exit code                                    *
 *                                                                            *
 *****************************************************************************/

/*
 * We come here from the first-level interrupt handlers.
 */
1269 1270
	.globl	kvmppc_interrupt_hv
kvmppc_interrupt_hv:
1271 1272
	/*
	 * Register contents:
1273
	 * R12		= (guest CR << 32) | interrupt vector
1274
	 * R13		= PACA
1275
	 * guest R12 saved in shadow VCPU SCRATCH0
1276
	 * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE
1277 1278
	 * guest R13 saved in SPRN_SCRATCH0
	 */
1279
	std	r9, HSTATE_SCRATCH2(r13)
1280 1281 1282
	lbz	r9, HSTATE_IN_GUEST(r13)
	cmpwi	r9, KVM_GUEST_MODE_HOST_HV
	beq	kvmppc_bad_host_intr
1283 1284
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
	cmpwi	r9, KVM_GUEST_MODE_GUEST
1285
	ld	r9, HSTATE_SCRATCH2(r13)
1286 1287
	beq	kvmppc_interrupt_pr
#endif
1288 1289 1290 1291
	/* We're now back in the host but in guest MMU context */
	li	r9, KVM_GUEST_MODE_HOST_HV
	stb	r9, HSTATE_IN_GUEST(r13)

1292 1293 1294 1295
	ld	r9, HSTATE_KVM_VCPU(r13)

	/* Save registers */

1296 1297 1298 1299 1300 1301 1302 1303 1304
	std	r0, VCPU_GPR(R0)(r9)
	std	r1, VCPU_GPR(R1)(r9)
	std	r2, VCPU_GPR(R2)(r9)
	std	r3, VCPU_GPR(R3)(r9)
	std	r4, VCPU_GPR(R4)(r9)
	std	r5, VCPU_GPR(R5)(r9)
	std	r6, VCPU_GPR(R6)(r9)
	std	r7, VCPU_GPR(R7)(r9)
	std	r8, VCPU_GPR(R8)(r9)
1305
	ld	r0, HSTATE_SCRATCH2(r13)
1306 1307 1308
	std	r0, VCPU_GPR(R9)(r9)
	std	r10, VCPU_GPR(R10)(r9)
	std	r11, VCPU_GPR(R11)(r9)
1309
	ld	r3, HSTATE_SCRATCH0(r13)
1310
	std	r3, VCPU_GPR(R12)(r9)
1311 1312
	/* CR is in the high half of r12 */
	srdi	r4, r12, 32
1313
	stw	r4, VCPU_CR(r9)
1314 1315 1316 1317
BEGIN_FTR_SECTION
	ld	r3, HSTATE_CFAR(r13)
	std	r3, VCPU_CFAR(r9)
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1318 1319 1320 1321
BEGIN_FTR_SECTION
	ld	r4, HSTATE_PPR(r13)
	std	r4, VCPU_PPR(r9)
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1322 1323 1324 1325 1326 1327 1328 1329 1330

	/* Restore R1/R2 so we can handle faults */
	ld	r1, HSTATE_HOST_R1(r13)
	ld	r2, PACATOC(r13)

	mfspr	r10, SPRN_SRR0
	mfspr	r11, SPRN_SRR1
	std	r10, VCPU_SRR0(r9)
	std	r11, VCPU_SRR1(r9)
1331 1332
	/* trap is in the low half of r12, clear CR from the high half */
	clrldi	r12, r12, 32
1333 1334 1335 1336 1337 1338 1339 1340 1341 1342
	andi.	r0, r12, 2		/* need to read HSRR0/1? */
	beq	1f
	mfspr	r10, SPRN_HSRR0
	mfspr	r11, SPRN_HSRR1
	clrrdi	r12, r12, 2
1:	std	r10, VCPU_PC(r9)
	std	r11, VCPU_MSR(r9)

	GET_SCRATCH0(r3)
	mflr	r4
1343
	std	r3, VCPU_GPR(R13)(r9)
1344 1345 1346 1347
	std	r4, VCPU_LR(r9)

	stw	r12,VCPU_TRAP(r9)

1348 1349 1350 1351 1352 1353 1354 1355 1356
	/*
	 * Now that we have saved away SRR0/1 and HSRR0/1,
	 * interrupts are recoverable in principle, so set MSR_RI.
	 * This becomes important for relocation-on interrupts from
	 * the guest, which we can get in radix mode on POWER9.
	 */
	li	r0, MSR_RI
	mtmsrd	r0, 1

1357 1358 1359 1360 1361 1362 1363 1364 1365 1366
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
	addi	r3, r9, VCPU_TB_RMINTR
	mr	r4, r9
	bl	kvmhv_accumulate_time
	ld	r5, VCPU_GPR(R5)(r9)
	ld	r6, VCPU_GPR(R6)(r9)
	ld	r7, VCPU_GPR(R7)(r9)
	ld	r8, VCPU_GPR(R8)(r9)
#endif

1367
	/* Save HEIR (HV emulation assist reg) in emul_inst
1368 1369
	   if this is an HEI (HV emulation interrupt, e40) */
	li	r3,KVM_INST_FETCH_FAILED
1370
	stw	r3,VCPU_LAST_INST(r9)
1371 1372 1373
	cmpwi	r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
	bne	11f
	mfspr	r3,SPRN_HEIR
1374
11:	stw	r3,VCPU_HEIR(r9)
1375 1376

	/* these are volatile across C function calls */
1377 1378 1379 1380
#ifdef CONFIG_RELOCATABLE
	ld	r3, HSTATE_SCRATCH1(r13)
	mtctr	r3
#else
1381
	mfctr	r3
1382
#endif
1383 1384
	mfxer	r4
	std	r3, VCPU_CTR(r9)
1385
	std	r4, VCPU_XER(r9)
1386

1387 1388 1389 1390 1391 1392
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
	/* For softpatch interrupt, go off and do TM instruction emulation */
	cmpwi	r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
	beq	kvmppc_tm_emul
#endif

1393 1394 1395
	/* If this is a page table miss then see if it's theirs or ours */
	cmpwi	r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
	beq	kvmppc_hdsi
1396 1397
	cmpwi	r12, BOOK3S_INTERRUPT_H_INST_STORAGE
	beq	kvmppc_hisi
1398

1399 1400 1401 1402
	/* See if this is a leftover HDEC interrupt */
	cmpwi	r12,BOOK3S_INTERRUPT_HV_DECREMENTER
	bne	2f
	mfspr	r3,SPRN_HDEC
1403 1404
	EXTEND_HDEC(r3)
	cmpdi	r3,0
1405 1406
	mr	r4,r9
	bge	fast_guest_return
1407
2:
1408
	/* See if this is an hcall we can handle in real mode */
1409 1410
	cmpwi	r12,BOOK3S_INTERRUPT_SYSCALL
	beq	hcall_try_real_mode
1411

1412 1413 1414
	/* Hypervisor doorbell - exit only if host IPI flag set */
	cmpwi	r12, BOOK3S_INTERRUPT_H_DOORBELL
	bne	3f
1415 1416
BEGIN_FTR_SECTION
	PPC_MSGSYNC
1417
	lwsync
1418
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1419
	lbz	r0, HSTATE_HOST_IPI(r13)
1420
	cmpwi	r0, 0
1421 1422 1423
	beq	4f
	b	guest_exit_cont
3:
1424 1425 1426 1427 1428 1429 1430
	/* If it's a hypervisor facility unavailable interrupt, save HFSCR */
	cmpwi	r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL
	bne	14f
	mfspr	r3, SPRN_HFSCR
	std	r3, VCPU_HFSCR(r9)
	b	guest_exit_cont
14:
1431 1432
	/* External interrupt ? */
	cmpwi	r12, BOOK3S_INTERRUPT_EXTERNAL
1433
	bne+	guest_exit_cont
1434 1435 1436 1437

	/* External interrupt, first check for host_ipi. If this is
	 * set, we know the host wants us out so let's do it now
	 */
1438
	bl	kvmppc_read_intr
1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453

	/*
	 * Restore the active volatile registers after returning from
	 * a C function.
	 */
	ld	r9, HSTATE_KVM_VCPU(r13)
	li	r12, BOOK3S_INTERRUPT_EXTERNAL

	/*
	 * kvmppc_read_intr return codes:
	 *
	 * Exit to host (r3 > 0)
	 *   1 An interrupt is pending that needs to be handled by the host
	 *     Exit guest and return to host by branching to guest_exit_cont
	 *
1454 1455 1456 1457 1458
	 *   2 Passthrough that needs completion in the host
	 *     Exit guest and return to host by branching to guest_exit_cont
	 *     However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
	 *     to indicate to the host to complete handling the interrupt
	 *
1459 1460 1461 1462 1463 1464 1465 1466 1467
	 * Before returning to guest, we check if any CPU is heading out
	 * to the host and if so, we head out also. If no CPUs are heading
	 * check return values <= 0.
	 *
	 * Return to guest (r3 <= 0)
	 *  0 No external interrupt is pending
	 * -1 A guest wakeup IPI (which has now been cleared)
	 *    In either case, we return to guest to deliver any pending
	 *    guest interrupts.
1468 1469 1470 1471
	 *
	 * -2 A PCI passthrough external interrupt was handled
	 *    (interrupt was delivered directly to guest)
	 *    Return to guest to deliver any pending guest interrupts.
1472 1473
	 */

1474 1475 1476 1477 1478 1479 1480 1481 1482
	cmpdi	r3, 1
	ble	1f

	/* Return code = 2 */
	li	r12, BOOK3S_INTERRUPT_HV_RM_HARD
	stw	r12, VCPU_TRAP(r9)
	b	guest_exit_cont

1:	/* Return code <= 1 */
1483
	cmpdi	r3, 0
1484
	bgt	guest_exit_cont
1485

1486
	/* Return code <= 0 */
1487
4:	ld	r5, HSTATE_KVM_VCORE(r13)
1488 1489
	lwz	r0, VCORE_ENTRY_EXIT(r5)
	cmpwi	r0, 0x100
1490
	mr	r4, r9
1491
	blt	deliver_guest_interrupt
1492

1493
guest_exit_cont:		/* r9 = vcpu, r12 = trap, r13 = paca */
1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513
	/* Save more register state  */
	mfdar	r6
	mfdsisr	r7
	std	r6, VCPU_DAR(r9)
	stw	r7, VCPU_DSISR(r9)
	/* don't overwrite fault_dar/fault_dsisr if HDSI */
	cmpwi	r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
	beq	mc_cont
	std	r6, VCPU_FAULT_DAR(r9)
	stw	r7, VCPU_FAULT_DSISR(r9)

	/* See if it is a machine check */
	cmpwi	r12, BOOK3S_INTERRUPT_MACHINE_CHECK
	beq	machine_check_realmode
mc_cont:
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
	addi	r3, r9, VCPU_TB_RMEXIT
	mr	r4, r9
	bl	kvmhv_accumulate_time
#endif
1514 1515
#ifdef CONFIG_KVM_XICS
	/* We are exiting, pull the VP from the XIVE */
1516
	lbz	r0, VCPU_XIVE_PUSHED(r9)
1517 1518 1519 1520 1521
	cmpwi	cr0, r0, 0
	beq	1f
	li	r7, TM_SPC_PULL_OS_CTX
	li	r6, TM_QW1_OS
	mfmsr	r0
1522
	andi.	r0, r0, MSR_DR		/* in real mode? */
1523 1524 1525 1526 1527 1528
	beq	2f
	ld	r10, HSTATE_XIVE_TIMA_VIRT(r13)
	cmpldi	cr0, r10, 0
	beq	1f
	/* First load to pull the context, we ignore the value */
	eieio
1529
	lwzx	r11, r7, r10
1530 1531 1532 1533 1534 1535 1536 1537
	/* Second load to recover the context state (Words 0 and 1) */
	ldx	r11, r6, r10
	b	3f
2:	ld	r10, HSTATE_XIVE_TIMA_PHYS(r13)
	cmpldi	cr0, r10, 0
	beq	1f
	/* First load to pull the context, we ignore the value */
	eieio
1538
	lwzcix	r11, r7, r10
1539 1540 1541 1542 1543 1544
	/* Second load to recover the context state (Words 0 and 1) */
	ldcix	r11, r6, r10
3:	std	r11, VCPU_XIVE_SAVED_STATE(r9)
	/* Fixup some of the state for the next load */
	li	r10, 0
	li	r0, 0xff
1545
	stb	r10, VCPU_XIVE_PUSHED(r9)
1546 1547
	stb	r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
	stb	r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
1548
	eieio
1549 1550
1:
#endif /* CONFIG_KVM_XICS */
1551

1552
	/* For hash guest, read the guest SLB and save it away */
1553 1554 1555
	ld	r5, VCPU_KVM(r9)
	lbz	r0, KVM_RADIX(r5)
	li	r5, 0
1556 1557
	cmpwi	r0, 0
	bne	3f			/* for radix, save 0 entries */
1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572
	lwz	r0,VCPU_SLB_NR(r9)	/* number of entries in SLB */
	mtctr	r0
	li	r6,0
	addi	r7,r9,VCPU_SLB
1:	slbmfee	r8,r6
	andis.	r0,r8,SLB_ESID_V@h
	beq	2f
	add	r8,r8,r6		/* put index in */
	slbmfev	r3,r6
	std	r8,VCPU_SLB_E(r7)
	std	r3,VCPU_SLB_V(r7)
	addi	r7,r7,VCPU_SLB_SIZE
	addi	r5,r5,1
2:	addi	r6,r6,1
	bdnz	1b
1573 1574 1575 1576 1577
	/* Finally clear out the SLB */
	li	r0,0
	slbmte	r0,r0
	slbia
	ptesync
1578
3:	stw	r5,VCPU_SLB_MAX(r9)
1579

1580
guest_bypass:
1581
	mr 	r3, r12
1582 1583
	/* Increment exit count, poke other threads to exit */
	bl	kvmhv_commence_exit
1584 1585 1586
	nop
	ld	r9, HSTATE_KVM_VCPU(r13)
	lwz	r12, VCPU_TRAP(r9)
1587

1588 1589 1590 1591 1592
	/* Stop others sending VCPU interrupts to this physical CPU */
	li	r0, -1
	stw	r0, VCPU_CPU(r9)
	stw	r0, VCPU_THREAD_CPU(r9)

1593
	/* Save guest CTRL register, set runlatch to 1 */
1594
	mfspr	r6,SPRN_CTRLF
1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623
	stw	r6,VCPU_CTRL(r9)
	andi.	r0,r6,1
	bne	4f
	ori	r6,r6,1
	mtspr	SPRN_CTRLT,r6
4:
	/*
	 * Save the guest PURR/SPURR
	 */
	mfspr	r5,SPRN_PURR
	mfspr	r6,SPRN_SPURR
	ld	r7,VCPU_PURR(r9)
	ld	r8,VCPU_SPURR(r9)
	std	r5,VCPU_PURR(r9)
	std	r6,VCPU_SPURR(r9)
	subf	r5,r7,r5
	subf	r6,r8,r6

	/*
	 * Restore host PURR/SPURR and add guest times
	 * so that the time in the guest gets accounted.
	 */
	ld	r3,HSTATE_PURR(r13)
	ld	r4,HSTATE_SPURR(r13)
	add	r3,r3,r5
	add	r4,r4,r6
	mtspr	SPRN_PURR,r3
	mtspr	SPRN_SPURR,r4

1624
	/* Save DEC */
1625
	ld	r3, HSTATE_KVM_VCORE(r13)
1626 1627
	mfspr	r5,SPRN_DEC
	mftb	r6
1628 1629 1630 1631 1632 1633
	/* On P9, if the guest has large decr enabled, don't sign extend */
BEGIN_FTR_SECTION
	ld	r4, VCORE_LPCR(r3)
	andis.	r4, r4, LPCR_LD@h
	bne	16f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1634
	extsw	r5,r5
1635
16:	add	r5,r5,r6
1636 1637 1638
	/* r5 is a guest timebase value here, convert to host TB */
	ld	r4,VCORE_TB_OFFSET(r3)
	subf	r5,r4,r5
1639 1640
	std	r5,VCPU_DEC_EXPIRES(r9)

1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654
BEGIN_FTR_SECTION
	b	8f
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
	/* Save POWER8-specific registers */
	mfspr	r5, SPRN_IAMR
	mfspr	r6, SPRN_PSPB
	mfspr	r7, SPRN_FSCR
	std	r5, VCPU_IAMR(r9)
	stw	r6, VCPU_PSPB(r9)
	std	r7, VCPU_FSCR(r9)
	mfspr	r5, SPRN_IC
	mfspr	r7, SPRN_TAR
	std	r5, VCPU_IC(r9)
	std	r7, VCPU_TAR(r9)
1655
	mfspr	r8, SPRN_EBBHR
1656 1657 1658
	std	r8, VCPU_EBBHR(r9)
	mfspr	r5, SPRN_EBBRR
	mfspr	r6, SPRN_BESCR
1659 1660
	mfspr	r7, SPRN_PID
	mfspr	r8, SPRN_WORT
1661 1662
	std	r5, VCPU_EBBRR(r9)
	std	r6, VCPU_BESCR(r9)
1663 1664 1665
	stw	r7, VCPU_GUEST_PID(r9)
	std	r8, VCPU_WORT(r9)
BEGIN_FTR_SECTION
1666 1667
	mfspr	r5, SPRN_TCSCR
	mfspr	r6, SPRN_ACOP
1668 1669
	mfspr	r7, SPRN_CSIGR
	mfspr	r8, SPRN_TACR
1670 1671
	std	r5, VCPU_TCSCR(r9)
	std	r6, VCPU_ACOP(r9)
1672 1673
	std	r7, VCPU_CSIGR(r9)
	std	r8, VCPU_TACR(r9)
1674 1675 1676 1677 1678 1679 1680
FTR_SECTION_ELSE
	mfspr	r5, SPRN_TIDR
	mfspr	r6, SPRN_PSSCR
	std	r5, VCPU_TID(r9)
	rldicl	r6, r6, 4, 50		/* r6 &= PSSCR_GUEST_VIS */
	rotldi	r6, r6, 60
	std	r6, VCPU_PSSCR(r9)
1681 1682 1683
	/* Restore host HFSCR value */
	ld	r7, STACK_SLOT_HFSCR(r1)
	mtspr	SPRN_HFSCR, r7
1684
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
1685 1686 1687 1688 1689
	/*
	 * Restore various registers to 0, where non-zero values
	 * set by the guest could disrupt the host.
	 */
	li	r0, 0
1690
	mtspr	SPRN_PSPB, r0
1691
	mtspr	SPRN_WORT, r0
1692
BEGIN_FTR_SECTION
1693
	mtspr	SPRN_IAMR, r0
1694
	mtspr	SPRN_TCSCR, r0
1695 1696 1697 1698
	/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
	li	r0, 1
	sldi	r0, r0, 31
	mtspr	SPRN_MMCRS, r0
1699
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1700 1701
8:

1702 1703 1704 1705 1706 1707 1708
	/* Save and reset AMR and UAMOR before turning on the MMU */
	mfspr	r5,SPRN_AMR
	mfspr	r6,SPRN_UAMOR
	std	r5,VCPU_AMR(r9)
	std	r6,VCPU_UAMOR(r9)
	li	r6,0
	mtspr	SPRN_AMR,r6
1709
	mtspr	SPRN_UAMOR, r6
1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749

	/* Switch DSCR back to host value */
	mfspr	r8, SPRN_DSCR
	ld	r7, HSTATE_DSCR(r13)
	std	r8, VCPU_DSCR(r9)
	mtspr	SPRN_DSCR, r7

	/* Save non-volatile GPRs */
	std	r14, VCPU_GPR(R14)(r9)
	std	r15, VCPU_GPR(R15)(r9)
	std	r16, VCPU_GPR(R16)(r9)
	std	r17, VCPU_GPR(R17)(r9)
	std	r18, VCPU_GPR(R18)(r9)
	std	r19, VCPU_GPR(R19)(r9)
	std	r20, VCPU_GPR(R20)(r9)
	std	r21, VCPU_GPR(R21)(r9)
	std	r22, VCPU_GPR(R22)(r9)
	std	r23, VCPU_GPR(R23)(r9)
	std	r24, VCPU_GPR(R24)(r9)
	std	r25, VCPU_GPR(R25)(r9)
	std	r26, VCPU_GPR(R26)(r9)
	std	r27, VCPU_GPR(R27)(r9)
	std	r28, VCPU_GPR(R28)(r9)
	std	r29, VCPU_GPR(R29)(r9)
	std	r30, VCPU_GPR(R30)(r9)
	std	r31, VCPU_GPR(R31)(r9)

	/* Save SPRGs */
	mfspr	r3, SPRN_SPRG0
	mfspr	r4, SPRN_SPRG1
	mfspr	r5, SPRN_SPRG2
	mfspr	r6, SPRN_SPRG3
	std	r3, VCPU_SPRG0(r9)
	std	r4, VCPU_SPRG1(r9)
	std	r5, VCPU_SPRG2(r9)
	std	r6, VCPU_SPRG3(r9)

	/* save FP state */
	mr	r3, r9
	bl	kvmppc_save_fp
1750

1751
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1752 1753 1754 1755
/*
 * Branch around the call if both CPU_FTR_TM and
 * CPU_FTR_P9_TM_HV_ASSIST are off.
 */
1756
BEGIN_FTR_SECTION
1757 1758
	b	91f
END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
1759 1760 1761
	/*
	 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
	 */
1762
	bl	kvmppc_save_tm
1763
91:
1764 1765
#endif

1766 1767 1768 1769
	/* Increment yield count if they have a VPA */
	ld	r8, VCPU_VPA(r9)	/* do they have a VPA? */
	cmpdi	r8, 0
	beq	25f
1770 1771
	li	r4, LPPACA_YIELDCOUNT
	LWZX_BE	r3, r8, r4
1772
	addi	r3, r3, 1
1773
	STWX_BE	r3, r8, r4
1774 1775 1776 1777 1778
	li	r3, 1
	stb	r3, VCPU_VPA_DIRTY(r9)
25:
	/* Save PMU registers if requested */
	/* r8 and cr0.eq are live here */
1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802
BEGIN_FTR_SECTION
	/*
	 * POWER8 seems to have a hardware bug where setting
	 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
	 * when some counters are already negative doesn't seem
	 * to cause a performance monitor alert (and hence interrupt).
	 * The effect of this is that when saving the PMU state,
	 * if there is no PMU alert pending when we read MMCR0
	 * before freezing the counters, but one becomes pending
	 * before we read the counters, we lose it.
	 * To work around this, we need a way to freeze the counters
	 * before reading MMCR0.  Normally, freezing the counters
	 * is done by writing MMCR0 (to set MMCR0[FC]) which
	 * unavoidably writes MMCR0[PMA0] as well.  On POWER8,
	 * we can also freeze the counters using MMCR2, by writing
	 * 1s to all the counter freeze condition bits (there are
	 * 9 bits each for 6 counters).
	 */
	li	r3, -1			/* set all freeze bits */
	clrrdi	r3, r3, 10
	mfspr	r10, SPRN_MMCR2
	mtspr	SPRN_MMCR2, r3
	isync
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1803 1804 1805 1806 1807
	li	r3, 1
	sldi	r3, r3, 31		/* MMCR0_FC (freeze counters) bit */
	mfspr	r4, SPRN_MMCR0		/* save MMCR0 */
	mtspr	SPRN_MMCR0, r3		/* freeze all counters, disable ints */
	mfspr	r6, SPRN_MMCRA
1808
	/* Clear MMCRA in order to disable SDAR updates */
1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823
	li	r7, 0
	mtspr	SPRN_MMCRA, r7
	isync
	beq	21f			/* if no VPA, save PMU stuff anyway */
	lbz	r7, LPPACA_PMCINUSE(r8)
	cmpwi	r7, 0			/* did they ask for PMU stuff to be saved? */
	bne	21f
	std	r3, VCPU_MMCR(r9)	/* if not, set saved MMCR0 to FC */
	b	22f
21:	mfspr	r5, SPRN_MMCR1
	mfspr	r7, SPRN_SIAR
	mfspr	r8, SPRN_SDAR
	std	r4, VCPU_MMCR(r9)
	std	r5, VCPU_MMCR + 8(r9)
	std	r6, VCPU_MMCR + 16(r9)
1824 1825 1826
BEGIN_FTR_SECTION
	std	r10, VCPU_MMCR + 24(r9)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840
	std	r7, VCPU_SIAR(r9)
	std	r8, VCPU_SDAR(r9)
	mfspr	r3, SPRN_PMC1
	mfspr	r4, SPRN_PMC2
	mfspr	r5, SPRN_PMC3
	mfspr	r6, SPRN_PMC4
	mfspr	r7, SPRN_PMC5
	mfspr	r8, SPRN_PMC6
	stw	r3, VCPU_PMC(r9)
	stw	r4, VCPU_PMC + 4(r9)
	stw	r5, VCPU_PMC + 8(r9)
	stw	r6, VCPU_PMC + 12(r9)
	stw	r7, VCPU_PMC + 16(r9)
	stw	r8, VCPU_PMC + 20(r9)
1841 1842
BEGIN_FTR_SECTION
	mfspr	r5, SPRN_SIER
1843 1844
	std	r5, VCPU_SIER(r9)
BEGIN_FTR_SECTION_NESTED(96)
1845 1846 1847 1848 1849 1850 1851 1852
	mfspr	r6, SPRN_SPMC1
	mfspr	r7, SPRN_SPMC2
	mfspr	r8, SPRN_MMCRS
	stw	r6, VCPU_PMC + 24(r9)
	stw	r7, VCPU_PMC + 28(r9)
	std	r8, VCPU_MMCR + 32(r9)
	lis	r4, 0x8000
	mtspr	SPRN_MMCRS, r4
1853
END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
1854
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1855
22:
1856

1857
	/* Restore host values of some registers */
1858 1859 1860 1861 1862
BEGIN_FTR_SECTION
	ld	r5, STACK_SLOT_CIABR(r1)
	ld	r6, STACK_SLOT_DAWR(r1)
	ld	r7, STACK_SLOT_DAWRX(r1)
	mtspr	SPRN_CIABR, r5
1863 1864 1865 1866
	/*
	 * If the DAWR doesn't work, it's ok to write these here as
	 * this value should always be zero
	*/
1867 1868 1869
	mtspr	SPRN_DAWR, r6
	mtspr	SPRN_DAWRX, r7
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1870 1871 1872
BEGIN_FTR_SECTION
	ld	r5, STACK_SLOT_TID(r1)
	ld	r6, STACK_SLOT_PSSCR(r1)
1873
	ld	r7, STACK_SLOT_PID(r1)
1874
	ld	r8, STACK_SLOT_IAMR(r1)
1875 1876
	mtspr	SPRN_TIDR, r5
	mtspr	SPRN_PSSCR, r6
1877
	mtspr	SPRN_PID, r7
1878
	mtspr	SPRN_IAMR, r8
1879
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1880 1881 1882 1883 1884

#ifdef CONFIG_PPC_RADIX_MMU
	/*
	 * Are we running hash or radix ?
	 */
1885 1886 1887
	ld	r5, VCPU_KVM(r9)
	lbz	r0, KVM_RADIX(r5)
	cmpwi	cr2, r0, 0
1888
	beq	cr2, 4f
1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920

	/* Radix: Handle the case where the guest used an illegal PID */
	LOAD_REG_ADDR(r4, mmu_base_pid)
	lwz	r3, VCPU_GUEST_PID(r9)
	lwz	r5, 0(r4)
	cmpw	cr0,r3,r5
	blt	2f

	/*
	 * Illegal PID, the HW might have prefetched and cached in the TLB
	 * some translations for the  LPID 0 / guest PID combination which
	 * Linux doesn't know about, so we need to flush that PID out of
	 * the TLB. First we need to set LPIDR to 0 so tlbiel applies to
	 * the right context.
	*/
	li	r0,0
	mtspr	SPRN_LPID,r0
	isync

	/* Then do a congruence class local flush */
	ld	r6,VCPU_KVM(r9)
	lwz	r0,KVM_TLB_SETS(r6)
	mtctr	r0
	li	r7,0x400		/* IS field = 0b01 */
	ptesync
	sldi	r0,r3,32		/* RS has PID */
1:	PPC_TLBIEL(7,0,2,1,1)		/* RIC=2, PRS=1, R=1 */
	addi	r7,r7,0x1000
	bdnz	1b
	ptesync

2:	/* Flush the ERAT on radix P9 DD1 guest exit */
1921 1922 1923
BEGIN_FTR_SECTION
	PPC_INVALIDATE_ERAT
END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
1924
4:
1925
#endif /* CONFIG_PPC_RADIX_MMU */
1926

1927
	/*
1928
	 * POWER7/POWER8 guest -> host partition switch code.
1929 1930 1931
	 * We don't have to lock against tlbies but we do
	 * have to coordinate the hardware threads.
	 */
1932
kvmhv_switch_to_host:
1933
	/* Secondary threads wait for primary to do partition switch */
1934
	ld	r5,HSTATE_KVM_VCORE(r13)
1935 1936
	ld	r4,VCORE_KVM(r5)	/* pointer to struct kvm */
	lbz	r3,HSTATE_PTID(r13)
1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947
	cmpwi	r3,0
	beq	15f
	HMT_LOW
13:	lbz	r3,VCORE_IN_GUEST(r5)
	cmpwi	r3,0
	bne	13b
	HMT_MEDIUM
	b	16f

	/* Primary thread waits for all the secondaries to exit guest */
15:	lwz	r3,VCORE_ENTRY_EXIT(r5)
1948
	rlwinm	r0,r3,32-8,0xff
1949 1950 1951 1952 1953
	clrldi	r3,r3,56
	cmpw	r3,r0
	bne	15b
	isync

1954 1955 1956 1957 1958
	/* Did we actually switch to the guest at all? */
	lbz	r6, VCORE_IN_GUEST(r5)
	cmpwi	r6, 0
	beq	19f

1959
	/* Primary thread switches back to host partition */
1960
	lwz	r7,KVM_HOST_LPID(r4)
1961 1962
BEGIN_FTR_SECTION
	ld	r6,KVM_HOST_SDR1(r4)
1963 1964 1965
	li	r8,LPID_RSVD		/* switch to reserved LPID */
	mtspr	SPRN_LPID,r8
	ptesync
1966 1967
	mtspr	SPRN_SDR1,r6		/* switch to host page table */
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1968 1969
	mtspr	SPRN_LPID,r7
	isync
1970

1971
BEGIN_FTR_SECTION
1972
	/* DPDES and VTB are shared between threads */
1973
	mfspr	r7, SPRN_DPDES
1974
	mfspr	r8, SPRN_VTB
1975
	std	r7, VCORE_DPDES(r5)
1976
	std	r8, VCORE_VTB(r5)
1977 1978 1979 1980 1981
	/* clear DPDES so we don't get guest doorbells in the host */
	li	r8, 0
	mtspr	SPRN_DPDES, r8
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)

1982 1983 1984 1985 1986
	/* If HMI, call kvmppc_realmode_hmi_handler() */
	cmpwi	r12, BOOK3S_INTERRUPT_HMI
	bne	27f
	bl	kvmppc_realmode_hmi_handler
	nop
1987
	cmpdi	r3, 0
1988 1989
	li	r12, BOOK3S_INTERRUPT_HMI
	/*
1990 1991 1992
	 * At this point kvmppc_realmode_hmi_handler may have resync-ed
	 * the TB, and if it has, we must not subtract the guest timebase
	 * offset from the timebase. So, skip it.
1993 1994 1995 1996
	 *
	 * Also, do not call kvmppc_subcore_exit_guest() because it has
	 * been invoked as part of kvmppc_realmode_hmi_handler().
	 */
1997
	beq	30f
1998 1999

27:
2000 2001 2002 2003
	/* Subtract timebase offset from timebase */
	ld	r8,VCORE_TB_OFFSET(r5)
	cmpdi	r8,0
	beq	17f
2004
	mftb	r6			/* current guest timebase */
2005 2006 2007 2008 2009 2010 2011 2012 2013 2014
	subf	r8,r8,r6
	mtspr	SPRN_TBU40,r8		/* update upper 40 bits */
	mftb	r7			/* check if lower 24 bits overflowed */
	clrldi	r6,r6,40
	clrldi	r7,r7,40
	cmpld	r7,r6
	bge	17f
	addis	r8,r8,0x100		/* if so, increment upper 40 bits */
	mtspr	SPRN_TBU40,r8

2015 2016 2017 2018 2019
17:	bl	kvmppc_subcore_exit_guest
	nop
30:	ld	r5,HSTATE_KVM_VCORE(r13)
	ld	r4,VCORE_KVM(r5)	/* pointer to struct kvm */

2020
	/* Reset PCR */
2021
	ld	r0, VCORE_PCR(r5)
2022 2023 2024 2025 2026
	cmpdi	r0, 0
	beq	18f
	li	r0, 0
	mtspr	SPRN_PCR, r0
18:
2027
	/* Signal secondary CPUs to continue */
2028
	stb	r0,VCORE_IN_GUEST(r5)
2029
19:	lis	r8,0x7fff		/* MAX_INT@h */
2030 2031
	mtspr	SPRN_HDEC,r8

2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048
16:
BEGIN_FTR_SECTION
	/* On POWER9 with HPT-on-radix we need to wait for all other threads */
	ld	r3, HSTATE_SPLIT_MODE(r13)
	cmpdi	r3, 0
	beq	47f
	lwz	r8, KVM_SPLIT_DO_RESTORE(r3)
	cmpwi	r8, 0
	beq	47f
	stw	r12, STACK_SLOT_TRAP(r1)
	bl	kvmhv_p9_restore_lpcr
	nop
	lwz	r12, STACK_SLOT_TRAP(r1)
	b	48f
47:
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
	ld	r8,KVM_HOST_LPCR(r4)
2049 2050
	mtspr	SPRN_LPCR,r8
	isync
2051
48:
2052
	/* load host SLB entries */
2053 2054 2055
BEGIN_MMU_FTR_SECTION
	b	0f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
2056
	ld	r8,PACA_SLBSHADOWPTR(r13)
2057 2058

	.rept	SLB_NUM_BOLTED
2059 2060 2061 2062
	li	r3, SLBSHADOW_SAVEAREA
	LDX_BE	r5, r8, r3
	addi	r3, r3, 8
	LDX_BE	r6, r8, r3
2063 2064 2065 2066 2067
	andis.	r7,r5,SLB_ESID_V@h
	beq	1f
	slbmte	r6,r5
1:	addi	r8,r8,16
	.endr
2068
0:
2069 2070 2071 2072 2073 2074 2075 2076 2077
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
	/* Finish timing, if we have a vcpu */
	ld	r4, HSTATE_KVM_VCPU(r13)
	cmpdi	r4, 0
	li	r3, 0
	beq	2f
	bl	kvmhv_accumulate_time
2:
#endif
2078 2079 2080 2081
	/* Unset guest mode */
	li	r0, KVM_GUEST_MODE_NONE
	stb	r0, HSTATE_IN_GUEST(r13)

2082 2083
	ld	r0, SFS+PPC_LR_STKOFF(r1)
	addi	r1, r1, SFS
2084 2085
	mtlr	r0
	blr
2086

2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
 * Softpatch interrupt for transactional memory emulation cases
 * on POWER9 DD2.2.  This is early in the guest exit path - we
 * haven't saved registers or done a treclaim yet.
 */
kvmppc_tm_emul:
	/* Save instruction image in HEIR */
	mfspr	r3, SPRN_HEIR
	stw	r3, VCPU_HEIR(r9)

	/*
	 * The cases we want to handle here are those where the guest
	 * is in real suspend mode and is trying to transition to
	 * transactional mode.
	 */
	lbz	r0, HSTATE_FAKE_SUSPEND(r13)
	cmpwi	r0, 0		/* keep exiting guest if in fake suspend */
	bne	guest_exit_cont
	rldicl	r3, r11, 64 - MSR_TS_S_LG, 62
	cmpwi	r3, 1		/* or if not in suspend state */
	bne	guest_exit_cont

	/* Call C code to do the emulation */
	mr	r3, r9
	bl	kvmhv_p9_tm_emulation_early
	nop
	ld	r9, HSTATE_KVM_VCPU(r13)
	li	r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
	cmpwi	r3, 0
	beq	guest_exit_cont		/* continue exiting if not handled */
	ld	r10, VCPU_PC(r9)
	ld	r11, VCPU_MSR(r9)
	b	fast_interrupt_c_return	/* go back to guest if handled */
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */

2123 2124 2125 2126 2127 2128 2129 2130
/*
 * Check whether an HDSI is an HPTE not found fault or something else.
 * If it is an HPTE not found fault that is due to the guest accessing
 * a page that they have mapped but which we have paged out, then
 * we continue on with the guest exit path.  In all other cases,
 * reflect the HDSI to the guest as a DSI.
 */
kvmppc_hdsi:
2131 2132
	ld	r3, VCPU_KVM(r9)
	lbz	r0, KVM_RADIX(r3)
2133 2134
	mfspr	r4, SPRN_HDAR
	mfspr	r6, SPRN_HDSISR
2135 2136 2137 2138 2139 2140
BEGIN_FTR_SECTION
	/* Look for DSISR canary. If we find it, retry instruction */
	cmpdi	r6, 0x7fff
	beq	6f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
	cmpwi	r0, 0
2141
	bne	.Lradix_hdsi		/* on radix, just save DAR/DSISR/ASDR */
2142 2143
	/* HPTE not found fault or protection fault? */
	andis.	r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
2144
	beq	1f			/* if not, send it to the guest */
2145 2146
	andi.	r0, r11, MSR_DR		/* data relocation enabled? */
	beq	3f
2147 2148 2149 2150
BEGIN_FTR_SECTION
	mfspr	r5, SPRN_ASDR		/* on POWER9, use ASDR to get VSID */
	b	4f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2151
	clrrdi	r0, r4, 28
2152
	PPC_SLBFEE_DOT(R5, R0)		/* if so, look up SLB */
2153 2154
	li	r0, BOOK3S_INTERRUPT_DATA_SEGMENT
	bne	7f			/* if no SLB entry found */
2155 2156 2157 2158 2159
4:	std	r4, VCPU_FAULT_DAR(r9)
	stw	r6, VCPU_FAULT_DSISR(r9)

	/* Search the hash table. */
	mr	r3, r9			/* vcpu pointer */
2160
	li	r7, 1			/* data fault */
2161
	bl	kvmppc_hpte_hv_fault
2162 2163 2164 2165 2166 2167 2168
	ld	r9, HSTATE_KVM_VCPU(r13)
	ld	r10, VCPU_PC(r9)
	ld	r11, VCPU_MSR(r9)
	li	r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
	cmpdi	r3, 0			/* retry the instruction */
	beq	6f
	cmpdi	r3, -1			/* handle in kernel mode */
2169
	beq	guest_exit_cont
2170 2171 2172
	cmpdi	r3, -2			/* MMIO emulation; need instr word */
	beq	2f

2173
	/* Synthesize a DSI (or DSegI) for the guest */
2174 2175
	ld	r4, VCPU_FAULT_DAR(r9)
	mr	r6, r3
2176
1:	li	r0, BOOK3S_INTERRUPT_DATA_STORAGE
2177
	mtspr	SPRN_DSISR, r6
2178
7:	mtspr	SPRN_DAR, r4
2179 2180
	mtspr	SPRN_SRR0, r10
	mtspr	SPRN_SRR1, r11
2181
	mr	r10, r0
2182
	bl	kvmppc_msr_interrupt
2183
fast_interrupt_c_return:
2184
6:	ld	r7, VCPU_CTR(r9)
2185
	ld	r8, VCPU_XER(r9)
2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213
	mtctr	r7
	mtxer	r8
	mr	r4, r9
	b	fast_guest_return

3:	ld	r5, VCPU_KVM(r9)	/* not relocated, use VRMA */
	ld	r5, KVM_VRMA_SLB_V(r5)
	b	4b

	/* If this is for emulated MMIO, load the instruction word */
2:	li	r8, KVM_INST_FETCH_FAILED	/* In case lwz faults */

	/* Set guest mode to 'jump over instruction' so if lwz faults
	 * we'll just continue at the next IP. */
	li	r0, KVM_GUEST_MODE_SKIP
	stb	r0, HSTATE_IN_GUEST(r13)

	/* Do the access with MSR:DR enabled */
	mfmsr	r3
	ori	r4, r3, MSR_DR		/* Enable paging for data */
	mtmsrd	r4
	lwz	r8, 0(r10)
	mtmsrd	r3

	/* Store the result */
	stw	r8, VCPU_LAST_INST(r9)

	/* Unset guest mode. */
2214
	li	r0, KVM_GUEST_MODE_HOST_HV
2215
	stb	r0, HSTATE_IN_GUEST(r13)
2216
	b	guest_exit_cont
2217

2218 2219 2220 2221 2222 2223 2224 2225
.Lradix_hdsi:
	std	r4, VCPU_FAULT_DAR(r9)
	stw	r6, VCPU_FAULT_DSISR(r9)
.Lradix_hisi:
	mfspr	r5, SPRN_ASDR
	std	r5, VCPU_FAULT_GPA(r9)
	b	guest_exit_cont

2226 2227 2228 2229 2230
/*
 * Similarly for an HISI, reflect it to the guest as an ISI unless
 * it is an HPTE not found fault for a page that we have paged out.
 */
kvmppc_hisi:
2231 2232 2233 2234
	ld	r3, VCPU_KVM(r9)
	lbz	r0, KVM_RADIX(r3)
	cmpwi	r0, 0
	bne	.Lradix_hisi		/* for radix, just save ASDR */
2235 2236
	andis.	r0, r11, SRR1_ISI_NOPT@h
	beq	1f
2237 2238
	andi.	r0, r11, MSR_IR		/* instruction relocation enabled? */
	beq	3f
2239 2240 2241 2242
BEGIN_FTR_SECTION
	mfspr	r5, SPRN_ASDR		/* on POWER9, use ASDR to get VSID */
	b	4f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2243
	clrrdi	r0, r10, 28
2244
	PPC_SLBFEE_DOT(R5, R0)		/* if so, look up SLB */
2245 2246
	li	r0, BOOK3S_INTERRUPT_INST_SEGMENT
	bne	7f			/* if no SLB entry found */
2247 2248 2249 2250 2251 2252
4:
	/* Search the hash table. */
	mr	r3, r9			/* vcpu pointer */
	mr	r4, r10
	mr	r6, r11
	li	r7, 0			/* instruction fault */
2253
	bl	kvmppc_hpte_hv_fault
2254 2255 2256 2257 2258
	ld	r9, HSTATE_KVM_VCPU(r13)
	ld	r10, VCPU_PC(r9)
	ld	r11, VCPU_MSR(r9)
	li	r12, BOOK3S_INTERRUPT_H_INST_STORAGE
	cmpdi	r3, 0			/* retry the instruction */
2259
	beq	fast_interrupt_c_return
2260
	cmpdi	r3, -1			/* handle in kernel mode */
2261
	beq	guest_exit_cont
2262

2263
	/* Synthesize an ISI (or ISegI) for the guest */
2264
	mr	r11, r3
2265 2266
1:	li	r0, BOOK3S_INTERRUPT_INST_STORAGE
7:	mtspr	SPRN_SRR0, r10
2267
	mtspr	SPRN_SRR1, r11
2268
	mr	r10, r0
2269
	bl	kvmppc_msr_interrupt
2270
	b	fast_interrupt_c_return
2271 2272 2273 2274 2275

3:	ld	r6, VCPU_KVM(r9)	/* not relocated, use VRMA */
	ld	r5, KVM_VRMA_SLB_V(r6)
	b	4b

2276 2277 2278 2279 2280
/*
 * Try to handle an hcall in real mode.
 * Returns to the guest if we handle it, or continues on up to
 * the kernel if we can't (i.e. if we don't have a handler for
 * it, or if the handler returns H_TOO_HARD).
2281 2282 2283
 *
 * r5 - r8 contain hcall args,
 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
2284 2285
 */
hcall_try_real_mode:
2286
	ld	r3,VCPU_GPR(R3)(r9)
2287
	andi.	r0,r11,MSR_PR
2288 2289
	/* sc 1 from userspace - reflect to guest syscall */
	bne	sc_1_fast_return
2290 2291
	clrrdi	r3,r3,2
	cmpldi	r3,hcall_real_table_end - hcall_real_table
2292
	bge	guest_exit_cont
2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303
	/* See if this hcall is enabled for in-kernel handling */
	ld	r4, VCPU_KVM(r9)
	srdi	r0, r3, 8	/* r0 = (r3 / 4) >> 6 */
	sldi	r0, r0, 3	/* index into kvm->arch.enabled_hcalls[] */
	add	r4, r4, r0
	ld	r0, KVM_ENABLED_HCALLS(r4)
	rlwinm	r4, r3, 32-2, 0x3f	/* r4 = (r3 / 4) & 0x3f */
	srd	r0, r0, r4
	andi.	r0, r0, 1
	beq	guest_exit_cont
	/* Get pointer to handler, if any, and call it */
2304
	LOAD_REG_ADDR(r4, hcall_real_table)
2305
	lwax	r3,r3,r4
2306
	cmpwi	r3,0
2307
	beq	guest_exit_cont
2308 2309
	add	r12,r3,r4
	mtctr	r12
2310
	mr	r3,r9		/* get vcpu pointer */
2311
	ld	r4,VCPU_GPR(R4)(r9)
2312 2313 2314 2315
	bctrl
	cmpdi	r3,H_TOO_HARD
	beq	hcall_real_fallback
	ld	r4,HSTATE_KVM_VCPU(r13)
2316
	std	r3,VCPU_GPR(R3)(r4)
2317 2318 2319 2320
	ld	r10,VCPU_PC(r4)
	ld	r11,VCPU_MSR(r4)
	b	fast_guest_return

2321 2322 2323 2324
sc_1_fast_return:
	mtspr	SPRN_SRR0,r10
	mtspr	SPRN_SRR1,r11
	li	r10, BOOK3S_INTERRUPT_SYSCALL
2325
	bl	kvmppc_msr_interrupt
2326 2327 2328
	mr	r4,r9
	b	fast_guest_return

2329 2330 2331 2332 2333 2334 2335
	/* We've attempted a real mode hcall, but it's punted it back
	 * to userspace.  We need to restore some clobbered volatiles
	 * before resuming the pass-it-to-qemu path */
hcall_real_fallback:
	li	r12,BOOK3S_INTERRUPT_SYSCALL
	ld	r9, HSTATE_KVM_VCPU(r13)

2336
	b	guest_exit_cont
2337 2338 2339 2340

	.globl	hcall_real_table
hcall_real_table:
	.long	0		/* 0 - unused */
2341 2342 2343
	.long	DOTSYM(kvmppc_h_remove) - hcall_real_table
	.long	DOTSYM(kvmppc_h_enter) - hcall_real_table
	.long	DOTSYM(kvmppc_h_read) - hcall_real_table
2344 2345
	.long	DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
	.long	DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
2346 2347
	.long	DOTSYM(kvmppc_h_protect) - hcall_real_table
	.long	DOTSYM(kvmppc_h_get_tce) - hcall_real_table
2348
	.long	DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
2349
	.long	0		/* 0x24 - H_SET_SPRG0 */
2350
	.long	DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364
	.long	0		/* 0x2c */
	.long	0		/* 0x30 */
	.long	0		/* 0x34 */
	.long	0		/* 0x38 */
	.long	0		/* 0x3c */
	.long	0		/* 0x40 */
	.long	0		/* 0x44 */
	.long	0		/* 0x48 */
	.long	0		/* 0x4c */
	.long	0		/* 0x50 */
	.long	0		/* 0x54 */
	.long	0		/* 0x58 */
	.long	0		/* 0x5c */
	.long	0		/* 0x60 */
2365
#ifdef CONFIG_KVM_XICS
2366 2367 2368
	.long	DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
	.long	DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
	.long	DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
2369
	.long	DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
2370
	.long	DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
2371 2372 2373 2374 2375 2376 2377
#else
	.long	0		/* 0x64 - H_EOI */
	.long	0		/* 0x68 - H_CPPR */
	.long	0		/* 0x6c - H_IPI */
	.long	0		/* 0x70 - H_IPOLL */
	.long	0		/* 0x74 - H_XIRR */
#endif
2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403
	.long	0		/* 0x78 */
	.long	0		/* 0x7c */
	.long	0		/* 0x80 */
	.long	0		/* 0x84 */
	.long	0		/* 0x88 */
	.long	0		/* 0x8c */
	.long	0		/* 0x90 */
	.long	0		/* 0x94 */
	.long	0		/* 0x98 */
	.long	0		/* 0x9c */
	.long	0		/* 0xa0 */
	.long	0		/* 0xa4 */
	.long	0		/* 0xa8 */
	.long	0		/* 0xac */
	.long	0		/* 0xb0 */
	.long	0		/* 0xb4 */
	.long	0		/* 0xb8 */
	.long	0		/* 0xbc */
	.long	0		/* 0xc0 */
	.long	0		/* 0xc4 */
	.long	0		/* 0xc8 */
	.long	0		/* 0xcc */
	.long	0		/* 0xd0 */
	.long	0		/* 0xd4 */
	.long	0		/* 0xd8 */
	.long	0		/* 0xdc */
2404
	.long	DOTSYM(kvmppc_h_cede) - hcall_real_table
2405
	.long	DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420
	.long	0		/* 0xe8 */
	.long	0		/* 0xec */
	.long	0		/* 0xf0 */
	.long	0		/* 0xf4 */
	.long	0		/* 0xf8 */
	.long	0		/* 0xfc */
	.long	0		/* 0x100 */
	.long	0		/* 0x104 */
	.long	0		/* 0x108 */
	.long	0		/* 0x10c */
	.long	0		/* 0x110 */
	.long	0		/* 0x114 */
	.long	0		/* 0x118 */
	.long	0		/* 0x11c */
	.long	0		/* 0x120 */
2421
	.long	DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
2422 2423 2424
	.long	0		/* 0x128 */
	.long	0		/* 0x12c */
	.long	0		/* 0x130 */
2425
	.long	DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
2426
	.long	DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
2427
	.long	DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538
	.long	0		/* 0x140 */
	.long	0		/* 0x144 */
	.long	0		/* 0x148 */
	.long	0		/* 0x14c */
	.long	0		/* 0x150 */
	.long	0		/* 0x154 */
	.long	0		/* 0x158 */
	.long	0		/* 0x15c */
	.long	0		/* 0x160 */
	.long	0		/* 0x164 */
	.long	0		/* 0x168 */
	.long	0		/* 0x16c */
	.long	0		/* 0x170 */
	.long	0		/* 0x174 */
	.long	0		/* 0x178 */
	.long	0		/* 0x17c */
	.long	0		/* 0x180 */
	.long	0		/* 0x184 */
	.long	0		/* 0x188 */
	.long	0		/* 0x18c */
	.long	0		/* 0x190 */
	.long	0		/* 0x194 */
	.long	0		/* 0x198 */
	.long	0		/* 0x19c */
	.long	0		/* 0x1a0 */
	.long	0		/* 0x1a4 */
	.long	0		/* 0x1a8 */
	.long	0		/* 0x1ac */
	.long	0		/* 0x1b0 */
	.long	0		/* 0x1b4 */
	.long	0		/* 0x1b8 */
	.long	0		/* 0x1bc */
	.long	0		/* 0x1c0 */
	.long	0		/* 0x1c4 */
	.long	0		/* 0x1c8 */
	.long	0		/* 0x1cc */
	.long	0		/* 0x1d0 */
	.long	0		/* 0x1d4 */
	.long	0		/* 0x1d8 */
	.long	0		/* 0x1dc */
	.long	0		/* 0x1e0 */
	.long	0		/* 0x1e4 */
	.long	0		/* 0x1e8 */
	.long	0		/* 0x1ec */
	.long	0		/* 0x1f0 */
	.long	0		/* 0x1f4 */
	.long	0		/* 0x1f8 */
	.long	0		/* 0x1fc */
	.long	0		/* 0x200 */
	.long	0		/* 0x204 */
	.long	0		/* 0x208 */
	.long	0		/* 0x20c */
	.long	0		/* 0x210 */
	.long	0		/* 0x214 */
	.long	0		/* 0x218 */
	.long	0		/* 0x21c */
	.long	0		/* 0x220 */
	.long	0		/* 0x224 */
	.long	0		/* 0x228 */
	.long	0		/* 0x22c */
	.long	0		/* 0x230 */
	.long	0		/* 0x234 */
	.long	0		/* 0x238 */
	.long	0		/* 0x23c */
	.long	0		/* 0x240 */
	.long	0		/* 0x244 */
	.long	0		/* 0x248 */
	.long	0		/* 0x24c */
	.long	0		/* 0x250 */
	.long	0		/* 0x254 */
	.long	0		/* 0x258 */
	.long	0		/* 0x25c */
	.long	0		/* 0x260 */
	.long	0		/* 0x264 */
	.long	0		/* 0x268 */
	.long	0		/* 0x26c */
	.long	0		/* 0x270 */
	.long	0		/* 0x274 */
	.long	0		/* 0x278 */
	.long	0		/* 0x27c */
	.long	0		/* 0x280 */
	.long	0		/* 0x284 */
	.long	0		/* 0x288 */
	.long	0		/* 0x28c */
	.long	0		/* 0x290 */
	.long	0		/* 0x294 */
	.long	0		/* 0x298 */
	.long	0		/* 0x29c */
	.long	0		/* 0x2a0 */
	.long	0		/* 0x2a4 */
	.long	0		/* 0x2a8 */
	.long	0		/* 0x2ac */
	.long	0		/* 0x2b0 */
	.long	0		/* 0x2b4 */
	.long	0		/* 0x2b8 */
	.long	0		/* 0x2bc */
	.long	0		/* 0x2c0 */
	.long	0		/* 0x2c4 */
	.long	0		/* 0x2c8 */
	.long	0		/* 0x2cc */
	.long	0		/* 0x2d0 */
	.long	0		/* 0x2d4 */
	.long	0		/* 0x2d8 */
	.long	0		/* 0x2dc */
	.long	0		/* 0x2e0 */
	.long	0		/* 0x2e4 */
	.long	0		/* 0x2e8 */
	.long	0		/* 0x2ec */
	.long	0		/* 0x2f0 */
	.long	0		/* 0x2f4 */
	.long	0		/* 0x2f8 */
2539 2540 2541 2542 2543
#ifdef CONFIG_KVM_XICS
	.long	DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
#else
	.long	0		/* 0x2fc - H_XIRR_X*/
#endif
2544
	.long	DOTSYM(kvmppc_h_random) - hcall_real_table
2545
	.globl	hcall_real_table_end
2546 2547
hcall_real_table_end:

2548 2549 2550 2551 2552 2553 2554 2555 2556
_GLOBAL(kvmppc_h_set_xdabr)
	andi.	r0, r5, DABRX_USER | DABRX_KERNEL
	beq	6f
	li	r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
	andc.	r0, r5, r0
	beq	3f
6:	li	r3, H_PARAMETER
	blr

2557
_GLOBAL(kvmppc_h_set_dabr)
2558 2559
	li	r5, DABRX_USER | DABRX_KERNEL
3:
2560 2561 2562
BEGIN_FTR_SECTION
	b	2f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2563
	std	r4,VCPU_DABR(r3)
2564 2565
	stw	r5, VCPU_DABRX(r3)
	mtspr	SPRN_DABRX, r5
2566 2567 2568 2569 2570 2571
	/* Work around P7 bug where DABR can get corrupted on mtspr */
1:	mtspr	SPRN_DABR,r4
	mfspr	r5, SPRN_DABR
	cmpd	r4, r5
	bne	1b
	isync
2572 2573 2574
	li	r3,0
	blr

2575 2576 2577 2578 2579 2580
2:
BEGIN_FTR_SECTION
	/* POWER9 with disabled DAWR */
	li	r3, H_UNSUPPORTED
	blr
END_FTR_SECTION_IFCLR(CPU_FTR_DAWR)
2581
	/* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
2582
	rlwimi	r5, r4, 5, DAWRX_DR | DAWRX_DW
2583
	rlwimi	r5, r4, 2, DAWRX_WT
2584 2585 2586 2587 2588 2589
	clrrdi	r4, r4, 3
	std	r4, VCPU_DAWR(r3)
	std	r5, VCPU_DAWRX(r3)
	mtspr	SPRN_DAWR, r4
	mtspr	SPRN_DAWRX, r5
	li	r3, 0
2590 2591
	blr

2592
_GLOBAL(kvmppc_h_cede)		/* r3 = vcpu pointer, r11 = msr, r13 = paca */
2593 2594 2595 2596 2597 2598 2599
	ori	r11,r11,MSR_EE
	std	r11,VCPU_MSR(r3)
	li	r0,1
	stb	r0,VCPU_CEDED(r3)
	sync			/* order setting ceded vs. testing prodded */
	lbz	r5,VCPU_PRODDED(r3)
	cmpwi	r5,0
2600
	bne	kvm_cede_prodded
2601 2602
	li	r12,0		/* set trap to 0 to say hcall is handled */
	stw	r12,VCPU_TRAP(r3)
2603
	li	r0,H_SUCCESS
2604
	std	r0,VCPU_GPR(R3)(r3)
2605 2606 2607 2608 2609 2610 2611

	/*
	 * Set our bit in the bitmask of napping threads unless all the
	 * other threads are already napping, in which case we send this
	 * up to the host.
	 */
	ld	r5,HSTATE_KVM_VCORE(r13)
2612
	lbz	r6,HSTATE_PTID(r13)
2613 2614 2615 2616 2617 2618 2619
	lwz	r8,VCORE_ENTRY_EXIT(r5)
	clrldi	r8,r8,56
	li	r0,1
	sld	r0,r0,r6
	addi	r6,r5,VCORE_NAPPING_THREADS
31:	lwarx	r4,0,r6
	or	r4,r4,r0
2620 2621
	cmpw	r4,r8
	beq	kvm_cede_exit
2622 2623
	stwcx.	r4,0,r6
	bne	31b
2624
	/* order napping_threads update vs testing entry_exit_map */
2625
	isync
2626
	li	r0,NAPPING_CEDE
2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638
	stb	r0,HSTATE_NAPPING(r13)
	lwz	r7,VCORE_ENTRY_EXIT(r5)
	cmpwi	r7,0x100
	bge	33f		/* another thread already exiting */

/*
 * Although not specifically required by the architecture, POWER7
 * preserves the following registers in nap mode, even if an SMT mode
 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
 */
	/* Save non-volatile GPRs */
2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656
	std	r14, VCPU_GPR(R14)(r3)
	std	r15, VCPU_GPR(R15)(r3)
	std	r16, VCPU_GPR(R16)(r3)
	std	r17, VCPU_GPR(R17)(r3)
	std	r18, VCPU_GPR(R18)(r3)
	std	r19, VCPU_GPR(R19)(r3)
	std	r20, VCPU_GPR(R20)(r3)
	std	r21, VCPU_GPR(R21)(r3)
	std	r22, VCPU_GPR(R22)(r3)
	std	r23, VCPU_GPR(R23)(r3)
	std	r24, VCPU_GPR(R24)(r3)
	std	r25, VCPU_GPR(R25)(r3)
	std	r26, VCPU_GPR(R26)(r3)
	std	r27, VCPU_GPR(R27)(r3)
	std	r28, VCPU_GPR(R28)(r3)
	std	r29, VCPU_GPR(R29)(r3)
	std	r30, VCPU_GPR(R30)(r3)
	std	r31, VCPU_GPR(R31)(r3)
2657 2658

	/* save FP state */
2659
	bl	kvmppc_save_fp
2660

2661
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2662 2663 2664 2665
/*
 * Branch around the call if both CPU_FTR_TM and
 * CPU_FTR_P9_TM_HV_ASSIST are off.
 */
2666
BEGIN_FTR_SECTION
2667 2668
	b	91f
END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
2669 2670 2671
	/*
	 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
	 */
2672 2673
	ld	r9, HSTATE_KVM_VCPU(r13)
	bl	kvmppc_save_tm
2674
91:
2675 2676
#endif

2677 2678 2679 2680 2681 2682 2683 2684
	/*
	 * Set DEC to the smaller of DEC and HDEC, so that we wake
	 * no later than the end of our timeslice (HDEC interrupts
	 * don't wake us from nap).
	 */
	mfspr	r3, SPRN_DEC
	mfspr	r4, SPRN_HDEC
	mftb	r5
2685 2686 2687 2688 2689 2690 2691
BEGIN_FTR_SECTION
	/* On P9 check whether the guest has large decrementer mode enabled */
	ld	r6, HSTATE_KVM_VCORE(r13)
	ld	r6, VCORE_LPCR(r6)
	andis.	r6, r6, LPCR_LD@h
	bne	68f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2692
	extsw	r3, r3
2693
68:	EXTEND_HDEC(r4)
2694
	cmpd	r3, r4
2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705
	ble	67f
	mtspr	SPRN_DEC, r4
67:
	/* save expiry time of guest decrementer */
	add	r3, r3, r5
	ld	r4, HSTATE_KVM_VCPU(r13)
	ld	r5, HSTATE_KVM_VCORE(r13)
	ld	r6, VCORE_TB_OFFSET(r5)
	subf	r3, r6, r3	/* convert to host TB value */
	std	r3, VCPU_DEC_EXPIRES(r4)

2706 2707 2708 2709 2710 2711
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
	ld	r4, HSTATE_KVM_VCPU(r13)
	addi	r3, r4, VCPU_TB_CEDE
	bl	kvmhv_accumulate_time
#endif

2712 2713
	lis	r3, LPCR_PECEDP@h	/* Do wake on privileged doorbell */

2714
	/*
2715
	 * Take a nap until a decrementer or external or doobell interrupt
2716
	 * occurs, with PECE1 and PECE0 set in LPCR.
2717
	 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
2718
	 * Also clear the runlatch bit before napping.
2719
	 */
2720
kvm_do_nap:
2721 2722 2723
	mfspr	r0, SPRN_CTRLF
	clrrdi	r0, r0, 1
	mtspr	SPRN_CTRLT, r0
2724

2725 2726
	li	r0,1
	stb	r0,HSTATE_HWTHREAD_REQ(r13)
2727 2728
	mfspr	r5,SPRN_LPCR
	ori	r5,r5,LPCR_PECE0 | LPCR_PECE1
2729
BEGIN_FTR_SECTION
2730
	ori	r5, r5, LPCR_PECEDH
2731
	rlwimi	r5, r3, 0, LPCR_PECEDP
2732
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747

kvm_nap_sequence:		/* desired LPCR value in r5 */
BEGIN_FTR_SECTION
	/*
	 * PSSCR bits:	exit criterion = 1 (wakeup based on LPCR at sreset)
	 *		enable state loss = 1 (allow SMT mode switch)
	 *		requested level = 0 (just stop dispatching)
	 */
	lis	r3, (PSSCR_EC | PSSCR_ESL)@h
	mtspr	SPRN_PSSCR, r3
	/* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
	li	r4, LPCR_PECE_HVEE@higher
	sldi	r4, r4, 32
	or	r5, r5, r4
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2748 2749 2750 2751 2752 2753 2754 2755
	mtspr	SPRN_LPCR,r5
	isync
	li	r0, 0
	std	r0, HSTATE_SCRATCH0(r13)
	ptesync
	ld	r0, HSTATE_SCRATCH0(r13)
1:	cmpd	r0, r0
	bne	1b
2756
BEGIN_FTR_SECTION
2757
	nap
2758 2759 2760
FTR_SECTION_ELSE
	PPC_STOP
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
2761 2762
	b	.

2763 2764 2765 2766 2767
33:	mr	r4, r3
	li	r3, 0
	li	r12, 0
	b	34f

2768
kvm_end_cede:
2769 2770 2771
	/* get vcpu pointer */
	ld	r4, HSTATE_KVM_VCPU(r13)

2772 2773 2774
	/* Woken by external or decrementer interrupt */
	ld	r1, HSTATE_HOST_R1(r13)

2775 2776 2777 2778 2779
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
	addi	r3, r4, VCPU_TB_RMINTR
	bl	kvmhv_accumulate_time
#endif

2780
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2781 2782 2783 2784
/*
 * Branch around the call if both CPU_FTR_TM and
 * CPU_FTR_P9_TM_HV_ASSIST are off.
 */
2785
BEGIN_FTR_SECTION
2786 2787
	b	91f
END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
2788 2789 2790
	/*
	 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
	 */
2791
	bl	kvmppc_restore_tm
2792
91:
2793 2794
#endif

2795 2796 2797
	/* load up FP state */
	bl	kvmppc_load_fp

2798 2799 2800 2801 2802 2803 2804 2805 2806
	/* Restore guest decrementer */
	ld	r3, VCPU_DEC_EXPIRES(r4)
	ld	r5, HSTATE_KVM_VCORE(r13)
	ld	r6, VCORE_TB_OFFSET(r5)
	add	r3, r3, r6	/* convert host TB to guest TB value */
	mftb	r7
	subf	r3, r7, r3
	mtspr	SPRN_DEC, r3

2807
	/* Load NV GPRS */
2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825
	ld	r14, VCPU_GPR(R14)(r4)
	ld	r15, VCPU_GPR(R15)(r4)
	ld	r16, VCPU_GPR(R16)(r4)
	ld	r17, VCPU_GPR(R17)(r4)
	ld	r18, VCPU_GPR(R18)(r4)
	ld	r19, VCPU_GPR(R19)(r4)
	ld	r20, VCPU_GPR(R20)(r4)
	ld	r21, VCPU_GPR(R21)(r4)
	ld	r22, VCPU_GPR(R22)(r4)
	ld	r23, VCPU_GPR(R23)(r4)
	ld	r24, VCPU_GPR(R24)(r4)
	ld	r25, VCPU_GPR(R25)(r4)
	ld	r26, VCPU_GPR(R26)(r4)
	ld	r27, VCPU_GPR(R27)(r4)
	ld	r28, VCPU_GPR(R28)(r4)
	ld	r29, VCPU_GPR(R29)(r4)
	ld	r30, VCPU_GPR(R30)(r4)
	ld	r31, VCPU_GPR(R31)(r4)
2826

2827 2828
	/* Check the wake reason in SRR1 to see why we got here */
	bl	kvmppc_check_wake_reason
2829

2830 2831 2832 2833 2834 2835 2836 2837 2838 2839
	/*
	 * Restore volatile registers since we could have called a
	 * C routine in kvmppc_check_wake_reason
	 *	r4 = VCPU
	 * r3 tells us whether we need to return to host or not
	 * WARNING: it gets checked further down:
	 * should not modify r3 until this check is done.
	 */
	ld	r4, HSTATE_KVM_VCPU(r13)

2840
	/* clear our bit in vcore->napping_threads */
2841 2842
34:	ld	r5,HSTATE_KVM_VCORE(r13)
	lbz	r7,HSTATE_PTID(r13)
2843
	li	r0,1
2844
	sld	r0,r0,r7
2845 2846 2847 2848 2849 2850 2851 2852
	addi	r6,r5,VCORE_NAPPING_THREADS
32:	lwarx	r7,0,r6
	andc	r7,r7,r0
	stwcx.	r7,0,r6
	bne	32b
	li	r0,0
	stb	r0,HSTATE_NAPPING(r13)

2853
	/* See if the wake reason saved in r3 means we need to exit */
2854
	stw	r12, VCPU_TRAP(r4)
2855
	mr	r9, r4
2856 2857
	cmpdi	r3, 0
	bgt	guest_exit_cont
2858

2859 2860 2861
	/* see if any other thread is already exiting */
	lwz	r0,VCORE_ENTRY_EXIT(r5)
	cmpwi	r0,0x100
2862
	bge	guest_exit_cont
2863

2864
	b	kvmppc_cede_reentry	/* if not go back to guest */
2865 2866

	/* cede when already previously prodded case */
2867 2868
kvm_cede_prodded:
	li	r0,0
2869 2870 2871 2872 2873 2874 2875
	stb	r0,VCPU_PRODDED(r3)
	sync			/* order testing prodded vs. clearing ceded */
	stb	r0,VCPU_CEDED(r3)
	li	r3,H_SUCCESS
	blr

	/* we've ceded but we want to give control to the host */
2876
kvm_cede_exit:
2877
	ld	r9, HSTATE_KVM_VCPU(r13)
2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903
#ifdef CONFIG_KVM_XICS
	/* Abort if we still have a pending escalation */
	lbz	r5, VCPU_XIVE_ESC_ON(r9)
	cmpwi	r5, 0
	beq	1f
	li	r0, 0
	stb	r0, VCPU_CEDED(r9)
1:	/* Enable XIVE escalation */
	li	r5, XIVE_ESB_SET_PQ_00
	mfmsr	r0
	andi.	r0, r0, MSR_DR		/* in real mode? */
	beq	1f
	ld	r10, VCPU_XIVE_ESC_VADDR(r9)
	cmpdi	r10, 0
	beq	3f
	ldx	r0, r10, r5
	b	2f
1:	ld	r10, VCPU_XIVE_ESC_RADDR(r9)
	cmpdi	r10, 0
	beq	3f
	ldcix	r0, r10, r5
2:	sync
	li	r0, 1
	stb	r0, VCPU_XIVE_ESC_ON(r9)
#endif /* CONFIG_KVM_XICS */
3:	b	guest_exit_cont
2904

2905 2906 2907
	/* Try to handle a machine check in real mode */
machine_check_realmode:
	mr	r3, r9		/* get vcpu pointer */
2908
	bl	kvmppc_realmode_machine_check
2909 2910 2911
	nop
	ld	r9, HSTATE_KVM_VCPU(r13)
	li	r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2912
	/*
2913 2914 2915 2916 2917 2918
	 * For the guest that is FWNMI capable, deliver all the MCE errors
	 * (handled/unhandled) by exiting the guest with KVM_EXIT_NMI exit
	 * reason. This new approach injects machine check errors in guest
	 * address space to guest with additional information in the form
	 * of RTAS event, thus enabling guest kernel to suitably handle
	 * such errors.
2919
	 *
2920 2921 2922 2923 2924 2925
	 * For the guest that is not FWNMI capable (old QEMU) fallback
	 * to old behaviour for backward compatibility:
	 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest either
	 * through machine check interrupt (set HSRR0 to 0x200).
	 * For handled errors (no-fatal), just go back to guest execution
	 * with current HSRR0.
2926 2927
	 * if we receive machine check with MSR(RI=0) then deliver it to
	 * guest as machine check causing guest to crash.
2928 2929
	 */
	ld	r11, VCPU_MSR(r9)
2930 2931
	rldicl.	r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
	bne	mc_cont			/* if so, exit to host */
2932 2933 2934 2935 2936 2937 2938
	/* Check if guest is capable of handling NMI exit */
	ld	r10, VCPU_KVM(r9)
	lbz	r10, KVM_FWNMI(r10)
	cmpdi	r10, 1			/* FWNMI capable? */
	beq	mc_cont			/* if so, exit with KVM_EXIT_NMI. */

	/* if not, fall through for backward compatibility. */
2939 2940 2941 2942
	andi.	r10, r11, MSR_RI	/* check for unrecoverable exception */
	beq	1f			/* Deliver a machine check to guest */
	ld	r10, VCPU_PC(r9)
	cmpdi	r3, 0		/* Did we handle MCE ? */
2943
	bne	2f	/* Continue guest execution. */
2944
	/* If not, deliver a machine check.  SRR0/1 are already set */
2945
1:	li	r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2946
	bl	kvmppc_msr_interrupt
2947
2:	b	fast_interrupt_c_return
2948

2949 2950
/*
 * Check the reason we woke from nap, and take appropriate action.
2951
 * Returns (in r3):
2952 2953
 *	0 if nothing needs to be done
 *	1 if something happened that needs to be handled by the host
2954
 *	-1 if there was a guest wakeup (IPI or msgsnd)
2955 2956
 *	-2 if we handled a PCI passthrough interrupt (returned by
 *		kvmppc_read_intr only)
2957 2958 2959
 *
 * Also sets r12 to the interrupt vector for any interrupt that needs
 * to be handled now by the host (0x500 for external interrupt), or zero.
2960 2961 2962
 * Modifies all volatile registers (since it may call a C function).
 * This routine calls kvmppc_read_intr, a C function, if an external
 * interrupt is pending.
2963 2964 2965
 */
kvmppc_check_wake_reason:
	mfspr	r6, SPRN_SRR1
2966 2967 2968 2969 2970 2971
BEGIN_FTR_SECTION
	rlwinm	r6, r6, 45-31, 0xf	/* extract wake reason field (P8) */
FTR_SECTION_ELSE
	rlwinm	r6, r6, 45-31, 0xe	/* P7 wake reason field is 3 bits */
ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
	cmpwi	r6, 8			/* was it an external interrupt? */
2972
	beq	7f			/* if so, see what it was */
2973 2974 2975 2976
	li	r3, 0
	li	r12, 0
	cmpwi	r6, 6			/* was it the decrementer? */
	beq	0f
2977 2978 2979
BEGIN_FTR_SECTION
	cmpwi	r6, 5			/* privileged doorbell? */
	beq	0f
2980 2981
	cmpwi	r6, 3			/* hypervisor doorbell? */
	beq	3f
2982
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2983 2984
	cmpwi	r6, 0xa			/* Hypervisor maintenance ? */
	beq	4f
2985 2986 2987
	li	r3, 1			/* anything else, return 1 */
0:	blr

2988 2989
	/* hypervisor doorbell */
3:	li	r12, BOOK3S_INTERRUPT_H_DOORBELL
2990 2991 2992 2993 2994 2995 2996

	/*
	 * Clear the doorbell as we will invoke the handler
	 * explicitly in the guest exit path.
	 */
	lis	r6, (PPC_DBELL_SERVER << (63-36))@h
	PPC_MSGCLR(6)
2997
	/* see if it's a host IPI */
2998
	li	r3, 1
2999 3000 3001 3002
BEGIN_FTR_SECTION
	PPC_MSGSYNC
	lwsync
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
3003 3004 3005
	lbz	r0, HSTATE_HOST_IPI(r13)
	cmpwi	r0, 0
	bnelr
3006
	/* if not, return -1 */
3007
	li	r3, -1
3008 3009
	blr

3010 3011 3012 3013 3014
	/* Woken up due to Hypervisor maintenance interrupt */
4:	li	r12, BOOK3S_INTERRUPT_HMI
	li	r3, 1
	blr

3015 3016 3017 3018 3019 3020 3021
	/* external interrupt - create a stack frame so we can call C */
7:	mflr	r0
	std	r0, PPC_LR_STKOFF(r1)
	stdu	r1, -PPC_MIN_STKFRM(r1)
	bl	kvmppc_read_intr
	nop
	li	r12, BOOK3S_INTERRUPT_EXTERNAL
3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032
	cmpdi	r3, 1
	ble	1f

	/*
	 * Return code of 2 means PCI passthrough interrupt, but
	 * we need to return back to host to complete handling the
	 * interrupt. Trap reason is expected in r12 by guest
	 * exit code.
	 */
	li	r12, BOOK3S_INTERRUPT_HV_RM_HARD
1:
3033 3034 3035 3036
	ld	r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
	addi	r1, r1, PPC_MIN_STKFRM
	mtlr	r0
	blr
3037

3038 3039 3040
/*
 * Save away FP, VMX and VSX registers.
 * r3 = vcpu pointer
3041 3042
 * N.B. r30 and r31 are volatile across this function,
 * thus it is not callable from C.
3043
 */
3044 3045 3046
kvmppc_save_fp:
	mflr	r30
	mr	r31,r3
3047 3048
	mfmsr	r5
	ori	r8,r5,MSR_FP
3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
	oris	r8,r8,MSR_VEC@h
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
	oris	r8,r8,MSR_VSX@h
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
	mtmsrd	r8
3060
	addi	r3,r3,VCPU_FPRS
3061
	bl	store_fp_state
3062 3063
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
3064
	addi	r3,r31,VCPU_VRS
3065
	bl	store_vr_state
3066 3067 3068
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
	mfspr	r6,SPRN_VRSAVE
3069
	stw	r6,VCPU_VRSAVE(r31)
3070
	mtlr	r30
3071 3072 3073 3074 3075
	blr

/*
 * Load up FP, VMX and VSX registers
 * r4 = vcpu pointer
3076 3077
 * N.B. r30 and r31 are volatile across this function,
 * thus it is not callable from C.
3078 3079
 */
kvmppc_load_fp:
3080 3081
	mflr	r30
	mr	r31,r4
3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094
	mfmsr	r9
	ori	r8,r9,MSR_FP
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
	oris	r8,r8,MSR_VEC@h
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
	oris	r8,r8,MSR_VSX@h
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
	mtmsrd	r8
3095
	addi	r3,r4,VCPU_FPRS
3096
	bl	load_fp_state
3097 3098
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
3099
	addi	r3,r31,VCPU_VRS
3100
	bl	load_vr_state
3101 3102
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
3103
	lwz	r7,VCPU_VRSAVE(r31)
3104
	mtspr	SPRN_VRSAVE,r7
3105 3106
	mtlr	r30
	mr	r4,r31
3107
	blr
3108

3109 3110 3111 3112 3113 3114 3115 3116 3117 3118
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
 * Save transactional state and TM-related registers.
 * Called with r9 pointing to the vcpu struct.
 * This can modify all checkpointed registers, but
 * restores r1, r2 and r9 (vcpu pointer) before exit.
 */
kvmppc_save_tm:
	mflr	r0
	std	r0, PPC_LR_STKOFF(r1)
3119
	stdu	r1, -PPC_MIN_STKFRM(r1)
3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133

	/* Turn on TM. */
	mfmsr	r8
	li	r0, 1
	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
	mtmsrd	r8

	ld	r5, VCPU_MSR(r9)
	rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
	beq	1f	/* TM not active in guest. */

	std	r1, HSTATE_HOST_R1(r13)
	li	r3, TM_CAUSE_KVM_RESCHED

3134
BEGIN_FTR_SECTION
3135 3136
	lbz	r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */
	cmpwi	r0, 0
3137
	beq	3f
3138 3139 3140 3141 3142 3143
	rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */
	beq	4f
BEGIN_FTR_SECTION_NESTED(96)
	bl	pnv_power9_force_smt4_catch
END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96)
	nop
3144
	b	6f
3145
3:
3146 3147 3148 3149
	/* Emulation of the treclaim instruction needs TEXASR before treclaim */
	mfspr	r6, SPRN_TEXASR
	std	r6, VCPU_ORIG_TEXASR(r9)
6:
3150 3151
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)

3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162
	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
	li	r5, 0
	mtmsrd	r5, 1

	/* All GPRs are volatile at this point. */
	TRECLAIM(R3)

	/* Temporarily store r13 and r9 so we have some regs to play with */
	SET_SCRATCH0(r13)
	GET_PACA(r13)
	std	r9, PACATMSCRATCH(r13)
3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176

	/* If doing TM emulation on POWER9 DD2.2, check for fake suspend mode */
BEGIN_FTR_SECTION
	lbz	r9, HSTATE_FAKE_SUSPEND(r13)
	cmpwi	r9, 0
	beq	2f
	/*
	 * We were in fake suspend, so we are not going to save the
	 * register state as the guest checkpointed state (since
	 * we already have it), therefore we can now use any volatile GPR.
	 */
	/* Reload stack pointer and TOC. */
	ld	r1, HSTATE_HOST_R1(r13)
	ld	r2, PACATOC(r13)
3177
	/* Set MSR RI now we have r1 and r13 back. */
3178 3179 3180 3181 3182
	li	r5, MSR_RI
	mtmsrd	r5, 1
	HMT_MEDIUM
	ld	r6, HSTATE_DSCR(r13)
	mtspr	SPRN_DSCR, r6
3183 3184 3185 3186 3187 3188
BEGIN_FTR_SECTION_NESTED(96)
	bl	pnv_power9_force_smt4_release
END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96)
	nop

4:
3189 3190 3191 3192 3193 3194
	mfspr	r3, SPRN_PSSCR
	/* PSSCR_FAKE_SUSPEND is a write-only bit, but clear it anyway */
	li	r0, PSSCR_FAKE_SUSPEND
	andc	r3, r3, r0
	mtspr	SPRN_PSSCR, r3
	ld	r9, HSTATE_KVM_VCPU(r13)
3195 3196
	/* Don't save TEXASR, use value from last exit in real suspend state */
	b	11f
3197 3198 3199
2:
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)

3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244
	ld	r9, HSTATE_KVM_VCPU(r13)

	/* Get a few more GPRs free. */
	std	r29, VCPU_GPRS_TM(29)(r9)
	std	r30, VCPU_GPRS_TM(30)(r9)
	std	r31, VCPU_GPRS_TM(31)(r9)

	/* Save away PPR and DSCR soon so don't run with user values. */
	mfspr	r31, SPRN_PPR
	HMT_MEDIUM
	mfspr	r30, SPRN_DSCR
	ld	r29, HSTATE_DSCR(r13)
	mtspr	SPRN_DSCR, r29

	/* Save all but r9, r13 & r29-r31 */
	reg = 0
	.rept	29
	.if (reg != 9) && (reg != 13)
	std	reg, VCPU_GPRS_TM(reg)(r9)
	.endif
	reg = reg + 1
	.endr
	/* ... now save r13 */
	GET_SCRATCH0(r4)
	std	r4, VCPU_GPRS_TM(13)(r9)
	/* ... and save r9 */
	ld	r4, PACATMSCRATCH(r13)
	std	r4, VCPU_GPRS_TM(9)(r9)

	/* Reload stack pointer and TOC. */
	ld	r1, HSTATE_HOST_R1(r13)
	ld	r2, PACATOC(r13)

	/* Set MSR RI now we have r1 and r13 back. */
	li	r5, MSR_RI
	mtmsrd	r5, 1

	/* Save away checkpinted SPRs. */
	std	r31, VCPU_PPR_TM(r9)
	std	r30, VCPU_DSCR_TM(r9)
	mflr	r5
	mfcr	r6
	mfctr	r7
	mfspr	r8, SPRN_AMR
	mfspr	r10, SPRN_TAR
3245
	mfxer	r11
3246 3247 3248 3249 3250
	std	r5, VCPU_LR_TM(r9)
	stw	r6, VCPU_CR_TM(r9)
	std	r7, VCPU_CTR_TM(r9)
	std	r8, VCPU_AMR_TM(r9)
	std	r10, VCPU_TAR_TM(r9)
3251
	std	r11, VCPU_XER_TM(r9)
3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269

	/* Restore r12 as trap number. */
	lwz	r12, VCPU_TRAP(r9)

	/* Save FP/VSX. */
	addi	r3, r9, VCPU_FPRS_TM
	bl	store_fp_state
	addi	r3, r9, VCPU_VRS_TM
	bl	store_vr_state
	mfspr	r6, SPRN_VRSAVE
	stw	r6, VCPU_VRSAVE_TM(r9)
1:
	/*
	 * We need to save these SPRs after the treclaim so that the software
	 * error code is recorded correctly in the TEXASR.  Also the user may
	 * change these outside of a transaction, so they must always be
	 * context switched.
	 */
3270 3271 3272
	mfspr	r7, SPRN_TEXASR
	std	r7, VCPU_TEXASR(r9)
11:
3273 3274 3275 3276 3277
	mfspr	r5, SPRN_TFHAR
	mfspr	r6, SPRN_TFIAR
	std	r5, VCPU_TFHAR(r9)
	std	r6, VCPU_TFIAR(r9)

3278
	addi	r1, r1, PPC_MIN_STKFRM
3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312
	ld	r0, PPC_LR_STKOFF(r1)
	mtlr	r0
	blr

/*
 * Restore transactional state and TM-related registers.
 * Called with r4 pointing to the vcpu struct.
 * This potentially modifies all checkpointed registers.
 * It restores r1, r2, r4 from the PACA.
 */
kvmppc_restore_tm:
	mflr	r0
	std	r0, PPC_LR_STKOFF(r1)

	/* Turn on TM/FP/VSX/VMX so we can restore them. */
	mfmsr	r5
	li	r6, MSR_TM >> 32
	sldi	r6, r6, 32
	or	r5, r5, r6
	ori	r5, r5, MSR_FP
	oris	r5, r5, (MSR_VEC | MSR_VSX)@h
	mtmsrd	r5

	/*
	 * The user may change these outside of a transaction, so they must
	 * always be context switched.
	 */
	ld	r5, VCPU_TFHAR(r4)
	ld	r6, VCPU_TFIAR(r4)
	ld	r7, VCPU_TEXASR(r4)
	mtspr	SPRN_TFHAR, r5
	mtspr	SPRN_TFIAR, r6
	mtspr	SPRN_TEXASR, r7

3313 3314
	li	r0, 0
	stb	r0, HSTATE_FAKE_SUSPEND(r13)
3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327
	ld	r5, VCPU_MSR(r4)
	rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
	beqlr		/* TM not active in guest */
	std	r1, HSTATE_HOST_R1(r13)

	/* Make sure the failure summary is set, otherwise we'll program check
	 * when we trechkpt.  It's possible that this might have been not set
	 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
	 * host.
	 */
	oris	r7, r7, (TEXASR_FS)@h
	mtspr	SPRN_TEXASR, r7

3328 3329 3330 3331 3332 3333 3334 3335 3336
	/*
	 * If we are doing TM emulation for the guest on a POWER9 DD2,
	 * then we don't actually do a trechkpt -- we either set up
	 * fake-suspend mode, or emulate a TM rollback.
	 */
BEGIN_FTR_SECTION
	b	.Ldo_tm_fake_load
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)

3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356
	/*
	 * We need to load up the checkpointed state for the guest.
	 * We need to do this early as it will blow away any GPRs, VSRs and
	 * some SPRs.
	 */

	mr	r31, r4
	addi	r3, r31, VCPU_FPRS_TM
	bl	load_fp_state
	addi	r3, r31, VCPU_VRS_TM
	bl	load_vr_state
	mr	r4, r31
	lwz	r7, VCPU_VRSAVE_TM(r4)
	mtspr	SPRN_VRSAVE, r7

	ld	r5, VCPU_LR_TM(r4)
	lwz	r6, VCPU_CR_TM(r4)
	ld	r7, VCPU_CTR_TM(r4)
	ld	r8, VCPU_AMR_TM(r4)
	ld	r9, VCPU_TAR_TM(r4)
3357
	ld	r10, VCPU_XER_TM(r4)
3358 3359 3360 3361 3362
	mtlr	r5
	mtcr	r6
	mtctr	r7
	mtspr	SPRN_AMR, r8
	mtspr	SPRN_TAR, r9
3363
	mtxer	r10
3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408

	/*
	 * Load up PPR and DSCR values but don't put them in the actual SPRs
	 * till the last moment to avoid running with userspace PPR and DSCR for
	 * too long.
	 */
	ld	r29, VCPU_DSCR_TM(r4)
	ld	r30, VCPU_PPR_TM(r4)

	std	r2, PACATMSCRATCH(r13) /* Save TOC */

	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
	li	r5, 0
	mtmsrd	r5, 1

	/* Load GPRs r0-r28 */
	reg = 0
	.rept	29
	ld	reg, VCPU_GPRS_TM(reg)(r31)
	reg = reg + 1
	.endr

	mtspr	SPRN_DSCR, r29
	mtspr	SPRN_PPR, r30

	/* Load final GPRs */
	ld	29, VCPU_GPRS_TM(29)(r31)
	ld	30, VCPU_GPRS_TM(30)(r31)
	ld	31, VCPU_GPRS_TM(31)(r31)

	/* TM checkpointed state is now setup.  All GPRs are now volatile. */
	TRECHKPT

	/* Now let's get back the state we need. */
	HMT_MEDIUM
	GET_PACA(r13)
	ld	r29, HSTATE_DSCR(r13)
	mtspr	SPRN_DSCR, r29
	ld	r4, HSTATE_KVM_VCPU(r13)
	ld	r1, HSTATE_HOST_R1(r13)
	ld	r2, PACATMSCRATCH(r13)

	/* Set the MSR RI since we have our registers back. */
	li	r5, MSR_RI
	mtmsrd	r5, 1
3409
9:
3410 3411 3412
	ld	r0, PPC_LR_STKOFF(r1)
	mtlr	r0
	blr
3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426

.Ldo_tm_fake_load:
	cmpwi	r5, 1		/* check for suspended state */
	bgt	10f
	stb	r5, HSTATE_FAKE_SUSPEND(r13)
	b	9b		/* and return */
10:	stdu	r1, -PPC_MIN_STKFRM(r1)
	/* guest is in transactional state, so simulate rollback */
	mr	r3, r4
	bl	kvmhv_emulate_tm_rollback
	nop
	ld      r4, HSTATE_KVM_VCPU(r13) /* our vcpu pointer has been trashed */
	addi	r1, r1, PPC_MIN_STKFRM
	b	9b
3427 3428
#endif

3429 3430 3431
/*
 * We come here if we get any exception or interrupt while we are
 * executing host real mode code while in guest MMU context.
3432 3433 3434 3435 3436 3437 3438 3439
 * r12 is (CR << 32) | vector
 * r13 points to our PACA
 * r12 is saved in HSTATE_SCRATCH0(r13)
 * ctr is saved in HSTATE_SCRATCH1(r13) if RELOCATABLE
 * r9 is saved in HSTATE_SCRATCH2(r13)
 * r13 is saved in HSPRG1
 * cfar is saved in HSTATE_CFAR(r13)
 * ppr is saved in HSTATE_PPR(r13)
3440 3441
 */
kvmppc_bad_host_intr:
3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489
	/*
	 * Switch to the emergency stack, but start half-way down in
	 * case we were already on it.
	 */
	mr	r9, r1
	std	r1, PACAR1(r13)
	ld	r1, PACAEMERGSP(r13)
	subi	r1, r1, THREAD_SIZE/2 + INT_FRAME_SIZE
	std	r9, 0(r1)
	std	r0, GPR0(r1)
	std	r9, GPR1(r1)
	std	r2, GPR2(r1)
	SAVE_4GPRS(3, r1)
	SAVE_2GPRS(7, r1)
	srdi	r0, r12, 32
	clrldi	r12, r12, 32
	std	r0, _CCR(r1)
	std	r12, _TRAP(r1)
	andi.	r0, r12, 2
	beq	1f
	mfspr	r3, SPRN_HSRR0
	mfspr	r4, SPRN_HSRR1
	mfspr	r5, SPRN_HDAR
	mfspr	r6, SPRN_HDSISR
	b	2f
1:	mfspr	r3, SPRN_SRR0
	mfspr	r4, SPRN_SRR1
	mfspr	r5, SPRN_DAR
	mfspr	r6, SPRN_DSISR
2:	std	r3, _NIP(r1)
	std	r4, _MSR(r1)
	std	r5, _DAR(r1)
	std	r6, _DSISR(r1)
	ld	r9, HSTATE_SCRATCH2(r13)
	ld	r12, HSTATE_SCRATCH0(r13)
	GET_SCRATCH0(r0)
	SAVE_4GPRS(9, r1)
	std	r0, GPR13(r1)
	SAVE_NVGPRS(r1)
	ld	r5, HSTATE_CFAR(r13)
	std	r5, ORIG_GPR3(r1)
	mflr	r3
#ifdef CONFIG_RELOCATABLE
	ld	r4, HSTATE_SCRATCH1(r13)
#else
	mfctr	r4
#endif
	mfxer	r5
3490
	lbz	r6, PACAIRQSOFTMASK(r13)
3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507
	std	r3, _LINK(r1)
	std	r4, _CTR(r1)
	std	r5, _XER(r1)
	std	r6, SOFTE(r1)
	ld	r2, PACATOC(r13)
	LOAD_REG_IMMEDIATE(3, 0x7265677368657265)
	std	r3, STACK_FRAME_OVERHEAD-16(r1)

	/*
	 * On POWER9 do a minimal restore of the MMU and call C code,
	 * which will print a message and panic.
	 * XXX On POWER7 and POWER8, we just spin here since we don't
	 * know what the other threads are doing (and we don't want to
	 * coordinate with them) - but at least we now have register state
	 * in memory that we might be able to look at from another CPU.
	 */
BEGIN_FTR_SECTION
3508
	b	.
3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
	ld	r9, HSTATE_KVM_VCPU(r13)
	ld	r10, VCPU_KVM(r9)

	li	r0, 0
	mtspr	SPRN_AMR, r0
	mtspr	SPRN_IAMR, r0
	mtspr	SPRN_CIABR, r0
	mtspr	SPRN_DAWRX, r0

	/* Flush the ERAT on radix P9 DD1 guest exit */
BEGIN_FTR_SECTION
	PPC_INVALIDATE_ERAT
END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)

BEGIN_MMU_FTR_SECTION
	b	4f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)

	slbmte	r0, r0
	slbia
	ptesync
	ld	r8, PACA_SLBSHADOWPTR(r13)
	.rept	SLB_NUM_BOLTED
	li	r3, SLBSHADOW_SAVEAREA
	LDX_BE	r5, r8, r3
	addi	r3, r3, 8
	LDX_BE	r6, r8, r3
	andis.	r7, r5, SLB_ESID_V@h
	beq	3f
	slbmte	r6, r5
3:	addi	r8, r8, 16
	.endr

4:	lwz	r7, KVM_HOST_LPID(r10)
	mtspr	SPRN_LPID, r7
	mtspr	SPRN_PID, r0
	ld	r8, KVM_HOST_LPCR(r10)
	mtspr	SPRN_LPCR, r8
	isync
	li	r0, KVM_GUEST_MODE_NONE
	stb	r0, HSTATE_IN_GUEST(r13)

	/*
	 * Turn on the MMU and jump to C code
	 */
	bcl	20, 31, .+4
5:	mflr	r3
	addi	r3, r3, 9f - 5b
	ld	r4, PACAKMSR(r13)
	mtspr	SPRN_SRR0, r3
	mtspr	SPRN_SRR1, r4
3561
	RFI_TO_KERNEL
3562 3563 3564
9:	addi	r3, r1, STACK_FRAME_OVERHEAD
	bl	kvmppc_bad_interrupt
	b	9b
3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581

/*
 * This mimics the MSR transition on IRQ delivery.  The new guest MSR is taken
 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
 *   r11 has the guest MSR value (in/out)
 *   r9 has a vcpu pointer (in)
 *   r0 is used as a scratch register
 */
kvmppc_msr_interrupt:
	rldicl	r0, r11, 64 - MSR_TS_S_LG, 62
	cmpwi	r0, 2 /* Check if we are in transactional state..  */
	ld	r11, VCPU_INTR_MSR(r9)
	bne	1f
	/* ... if transactional, change to suspended */
	li	r0, 1
1:	rldimi	r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
	blr
3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599

/*
 * This works around a hardware bug on POWER8E processors, where
 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
 * performance monitor interrupt.  Instead, when we need to have
 * an interrupt pending, we have to arrange for a counter to overflow.
 */
kvmppc_fix_pmao:
	li	r3, 0
	mtspr	SPRN_MMCR2, r3
	lis	r3, (MMCR0_PMXE | MMCR0_FCECE)@h
	ori	r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
	mtspr	SPRN_MMCR0, r3
	lis	r3, 0x7fff
	ori	r3, r3, 0xffff
	mtspr	SPRN_PMC6, r3
	isync
	blr
3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658

#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
/*
 * Start timing an activity
 * r3 = pointer to time accumulation struct, r4 = vcpu
 */
kvmhv_start_timing:
	ld	r5, HSTATE_KVM_VCORE(r13)
	lbz	r6, VCORE_IN_GUEST(r5)
	cmpwi	r6, 0
	beq	5f				/* if in guest, need to */
	ld	r6, VCORE_TB_OFFSET(r5)		/* subtract timebase offset */
5:	mftb	r5
	subf	r5, r6, r5
	std	r3, VCPU_CUR_ACTIVITY(r4)
	std	r5, VCPU_ACTIVITY_START(r4)
	blr

/*
 * Accumulate time to one activity and start another.
 * r3 = pointer to new time accumulation struct, r4 = vcpu
 */
kvmhv_accumulate_time:
	ld	r5, HSTATE_KVM_VCORE(r13)
	lbz	r8, VCORE_IN_GUEST(r5)
	cmpwi	r8, 0
	beq	4f				/* if in guest, need to */
	ld	r8, VCORE_TB_OFFSET(r5)		/* subtract timebase offset */
4:	ld	r5, VCPU_CUR_ACTIVITY(r4)
	ld	r6, VCPU_ACTIVITY_START(r4)
	std	r3, VCPU_CUR_ACTIVITY(r4)
	mftb	r7
	subf	r7, r8, r7
	std	r7, VCPU_ACTIVITY_START(r4)
	cmpdi	r5, 0
	beqlr
	subf	r3, r6, r7
	ld	r8, TAS_SEQCOUNT(r5)
	cmpdi	r8, 0
	addi	r8, r8, 1
	std	r8, TAS_SEQCOUNT(r5)
	lwsync
	ld	r7, TAS_TOTAL(r5)
	add	r7, r7, r3
	std	r7, TAS_TOTAL(r5)
	ld	r6, TAS_MIN(r5)
	ld	r7, TAS_MAX(r5)
	beq	3f
	cmpd	r3, r6
	bge	1f
3:	std	r3, TAS_MIN(r5)
1:	cmpd	r3, r7
	ble	2f
	std	r3, TAS_MAX(r5)
2:	lwsync
	addi	r8, r8, 1
	std	r8, TAS_SEQCOUNT(r5)
	blr
#endif