book3s_hv_rmhandlers.S 76.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
 *
 * Derived from book3s_rmhandlers.S and other files, which are:
 *
 * Copyright SUSE Linux Products GmbH 2009
 *
 * Authors: Alexander Graf <agraf@suse.de>
 */

#include <asm/ppc_asm.h>
#include <asm/kvm_asm.h>
#include <asm/reg.h>
23
#include <asm/mmu.h>
24
#include <asm/page.h>
25 26
#include <asm/ptrace.h>
#include <asm/hvcall.h>
27 28
#include <asm/asm-offsets.h>
#include <asm/exception-64s.h>
29
#include <asm/kvm_book3s_asm.h>
30
#include <asm/book3s/64/mmu-hash.h>
31
#include <asm/tm.h>
32
#include <asm/opal.h>
33
#include <asm/xive-regs.h>
34

35 36 37 38 39 40
/* Sign-extend HDEC if not on POWER9 */
#define EXTEND_HDEC(reg)			\
BEGIN_FTR_SECTION;				\
	extsw	reg, reg;			\
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)

41
#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
42

43 44 45 46
/* Values in HSTATE_NAPPING(r13) */
#define NAPPING_CEDE	1
#define NAPPING_NOVCPU	2

47
/* Stack frame offsets for kvmppc_hv_entry */
48
#define SFS			160
49 50 51 52 53 54 55 56
#define STACK_SLOT_TRAP		(SFS-4)
#define STACK_SLOT_TID		(SFS-16)
#define STACK_SLOT_PSSCR	(SFS-24)
#define STACK_SLOT_PID		(SFS-32)
#define STACK_SLOT_IAMR		(SFS-40)
#define STACK_SLOT_CIABR	(SFS-48)
#define STACK_SLOT_DAWR		(SFS-56)
#define STACK_SLOT_DAWRX	(SFS-64)
57
#define STACK_SLOT_HFSCR	(SFS-72)
58

59
/*
60
 * Call kvmppc_hv_entry in real mode.
61 62 63 64 65 66
 * Must be called with interrupts hard-disabled.
 *
 * Input Registers:
 *
 * LR = return address to continue at after eventually re-enabling MMU
 */
67
_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
68 69 70
	mflr	r0
	std	r0, PPC_LR_STKOFF(r1)
	stdu	r1, -112(r1)
71
	mfmsr	r10
72
	LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
73 74 75 76 77 78 79 80 81
	li	r0,MSR_RI
	andc	r0,r10,r0
	li	r6,MSR_IR | MSR_DR
	andc	r6,r10,r6
	mtmsrd	r0,1		/* clear RI in MSR */
	mtsrr0	r5
	mtsrr1	r6
	RFI

82
kvmppc_call_hv_entry:
83
	ld	r4, HSTATE_KVM_VCPU(r13)
84 85 86 87
	bl	kvmppc_hv_entry

	/* Back from guest - restore host state and return to caller */

88
BEGIN_FTR_SECTION
89 90 91 92 93
	/* Restore host DABR and DABRX */
	ld	r5,HSTATE_DABR(r13)
	li	r6,7
	mtspr	SPRN_DABR,r5
	mtspr	SPRN_DABRX,r6
94
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
95 96

	/* Restore SPRG3 */
S
Scott Wood 已提交
97 98
	ld	r3,PACA_SPRG_VDSO(r13)
	mtspr	SPRN_SPRG_VDSO_WRITE,r3
99 100 101 102 103 104

	/* Reload the host's PMU registers */
	ld	r3, PACALPPACAPTR(r13)	/* is the host using the PMU? */
	lbz	r4, LPPACA_PMCINUSE(r3)
	cmpwi	r4, 0
	beq	23f			/* skip if not */
105
BEGIN_FTR_SECTION
106
	ld	r3, HSTATE_MMCR0(r13)
107 108 109 110
	andi.	r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
	cmpwi	r4, MMCR0_PMAO
	beql	kvmppc_fix_pmao
END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
111 112 113 114 115 116
	lwz	r3, HSTATE_PMC1(r13)
	lwz	r4, HSTATE_PMC2(r13)
	lwz	r5, HSTATE_PMC3(r13)
	lwz	r6, HSTATE_PMC4(r13)
	lwz	r8, HSTATE_PMC5(r13)
	lwz	r9, HSTATE_PMC6(r13)
117 118 119 120 121 122
	mtspr	SPRN_PMC1, r3
	mtspr	SPRN_PMC2, r4
	mtspr	SPRN_PMC3, r5
	mtspr	SPRN_PMC4, r6
	mtspr	SPRN_PMC5, r8
	mtspr	SPRN_PMC6, r9
123 124 125 126 127
	ld	r3, HSTATE_MMCR0(r13)
	ld	r4, HSTATE_MMCR1(r13)
	ld	r5, HSTATE_MMCRA(r13)
	ld	r6, HSTATE_SIAR(r13)
	ld	r7, HSTATE_SDAR(r13)
128 129
	mtspr	SPRN_MMCR1, r4
	mtspr	SPRN_MMCRA, r5
130 131 132
	mtspr	SPRN_SIAR, r6
	mtspr	SPRN_SDAR, r7
BEGIN_FTR_SECTION
133 134
	ld	r8, HSTATE_MMCR2(r13)
	ld	r9, HSTATE_SIER(r13)
135 136 137
	mtspr	SPRN_MMCR2, r8
	mtspr	SPRN_SIER, r9
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
138 139 140 141
	mtspr	SPRN_MMCR0, r3
	isync
23:

142 143 144 145 146 147 148 149 150
	/*
	 * Reload DEC.  HDEC interrupts were disabled when
	 * we reloaded the host's LPCR value.
	 */
	ld	r3, HSTATE_DECEXP(r13)
	mftb	r4
	subf	r4, r4, r3
	mtspr	SPRN_DEC, r4

151 152 153 154
	/* hwthread_req may have got set by cede or no vcpu, so clear it */
	li	r0, 0
	stb	r0, HSTATE_HWTHREAD_REQ(r13)

155
	/*
156 157 158 159 160 161 162
	 * For external interrupts we need to call the Linux
	 * handler to process the interrupt. We do that by jumping
	 * to absolute address 0x500 for external interrupts.
	 * The [h]rfid at the end of the handler will return to
	 * the book3s_hv_interrupts.S code. For other interrupts
	 * we do the rfid to get back to the book3s_hv_interrupts.S
	 * code here.
163 164 165 166 167
	 */
	ld	r8, 112+PPC_LR_STKOFF(r1)
	addi	r1, r1, 112
	ld	r7, HSTATE_HOST_MSR(r13)

168 169 170 171 172 173 174 175 176
	/*
	 * If we came back from the guest via a relocation-on interrupt,
	 * we will be in virtual mode at this point, which makes it a
	 * little easier to get back to the caller.
	 */
	mfmsr	r0
	andi.	r0, r0, MSR_IR		/* in real mode? */
	bne	.Lvirt_return

177 178
	cmpwi	r12, BOOK3S_INTERRUPT_EXTERNAL
	beq	11f
179 180
	cmpwi	r12, BOOK3S_INTERRUPT_H_DOORBELL
	beq 	15f	/* Invoke the H_DOORBELL handler */
181 182
	cmpwi	cr2, r12, BOOK3S_INTERRUPT_HMI
	beq	cr2, 14f			/* HMI check */
183 184 185 186 187 188 189 190

	/* RFI into the highmem handler, or branch to interrupt handler */
	mfmsr	r6
	li	r0, MSR_RI
	andc	r6, r6, r0
	mtmsrd	r6, 1			/* Clear RI in MSR */
	mtsrr0	r8
	mtsrr1	r7
191 192 193 194
	/*
	 * BOOK3S_INTERRUPT_MACHINE_CHECK is handled at the
	 * time of guest exit
	 */
195 196 197 198 199 200 201
	RFI

	/* On POWER7, we have external interrupts set to use HSRR0/1 */
11:	mtspr	SPRN_HSRR0, r8
	mtspr	SPRN_HSRR1, r7
	ba	0x500

202 203 204 205
14:	mtspr	SPRN_HSRR0, r8
	mtspr	SPRN_HSRR1, r7
	b	hmi_exception_after_realmode

206 207 208 209
15:	mtspr SPRN_HSRR0, r8
	mtspr SPRN_HSRR1, r7
	ba    0xe80

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
	/* Virtual-mode return - can't get here for HMI or machine check */
.Lvirt_return:
	cmpwi	r12, BOOK3S_INTERRUPT_EXTERNAL
	beq	16f
	cmpwi	r12, BOOK3S_INTERRUPT_H_DOORBELL
	beq	17f
	andi.	r0, r7, MSR_EE		/* were interrupts hard-enabled? */
	beq	18f
	mtmsrd	r7, 1			/* if so then re-enable them */
18:	mtlr	r8
	blr

16:	mtspr	SPRN_HSRR0, r8		/* jump to reloc-on external vector */
	mtspr	SPRN_HSRR1, r7
	b	exc_virt_0x4500_hardware_interrupt

17:	mtspr	SPRN_HSRR0, r8
	mtspr	SPRN_HSRR1, r7
	b	exc_virt_0x4e80_h_doorbell

230 231
kvmppc_primary_no_guest:
	/* We handle this much like a ceded vcpu */
232
	/* put the HDEC into the DEC, since HDEC interrupts don't wake us */
233 234
	/* HDEC may be larger than DEC for arch >= v3.00, but since the */
	/* HDEC value came from DEC in the first place, it will fit */
235 236
	mfspr	r3, SPRN_HDEC
	mtspr	SPRN_DEC, r3
237 238 239 240 241 242 243 244 245 246 247 248 249
	/*
	 * Make sure the primary has finished the MMU switch.
	 * We should never get here on a secondary thread, but
	 * check it for robustness' sake.
	 */
	ld	r5, HSTATE_KVM_VCORE(r13)
65:	lbz	r0, VCORE_IN_GUEST(r5)
	cmpwi	r0, 0
	beq	65b
	/* Set LPCR. */
	ld	r8,VCORE_LPCR(r5)
	mtspr	SPRN_LPCR,r8
	isync
250 251 252 253 254 255 256 257 258 259
	/* set our bit in napping_threads */
	ld	r5, HSTATE_KVM_VCORE(r13)
	lbz	r7, HSTATE_PTID(r13)
	li	r0, 1
	sld	r0, r0, r7
	addi	r6, r5, VCORE_NAPPING_THREADS
1:	lwarx	r3, 0, r6
	or	r3, r3, r0
	stwcx.	r3, 0, r6
	bne	1b
260
	/* order napping_threads update vs testing entry_exit_map */
261 262 263 264 265 266 267 268
	isync
	li	r12, 0
	lwz	r7, VCORE_ENTRY_EXIT(r5)
	cmpwi	r7, 0x100
	bge	kvm_novcpu_exit	/* another thread already exiting */
	li	r3, NAPPING_NOVCPU
	stb	r3, HSTATE_NAPPING(r13)

269
	li	r3, 0		/* Don't wake on privileged (OS) doorbell */
270 271
	b	kvm_do_nap

272 273 274 275 276 277 278
/*
 * kvm_novcpu_wakeup
 *	Entered from kvm_start_guest if kvm_hstate.napping is set
 *	to NAPPING_NOVCPU
 *		r2 = kernel TOC
 *		r13 = paca
 */
279 280 281 282 283 284
kvm_novcpu_wakeup:
	ld	r1, HSTATE_HOST_R1(r13)
	ld	r5, HSTATE_KVM_VCORE(r13)
	li	r0, 0
	stb	r0, HSTATE_NAPPING(r13)

285 286
	/* check the wake reason */
	bl	kvmppc_check_wake_reason
287

288 289 290 291 292 293 294
	/*
	 * Restore volatile registers since we could have called
	 * a C routine in kvmppc_check_wake_reason.
	 *	r5 = VCORE
	 */
	ld	r5, HSTATE_KVM_VCORE(r13)

295 296 297 298 299 300 301 302 303 304
	/* see if any other thread is already exiting */
	lwz	r0, VCORE_ENTRY_EXIT(r5)
	cmpwi	r0, 0x100
	bge	kvm_novcpu_exit

	/* clear our bit in napping_threads */
	lbz	r7, HSTATE_PTID(r13)
	li	r0, 1
	sld	r0, r0, r7
	addi	r6, r5, VCORE_NAPPING_THREADS
305 306 307
4:	lwarx	r7, 0, r6
	andc	r7, r7, r0
	stwcx.	r7, 0, r6
308 309
	bne	4b

310
	/* See if the wake reason means we need to exit */
311 312 313
	cmpdi	r3, 0
	bge	kvm_novcpu_exit

314 315
	/* See if our timeslice has expired (HDEC is negative) */
	mfspr	r0, SPRN_HDEC
316
	EXTEND_HDEC(r0)
317
	li	r12, BOOK3S_INTERRUPT_HV_DECREMENTER
318
	cmpdi	r0, 0
319 320
	blt	kvm_novcpu_exit

321 322 323
	/* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
	ld	r4, HSTATE_KVM_VCPU(r13)
	cmpdi	r4, 0
324 325 326 327 328 329 330
	beq	kvmppc_primary_no_guest

#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
	addi	r3, r4, VCPU_TB_RMENTRY
	bl	kvmhv_start_timing
#endif
	b	kvmppc_got_guest
331 332

kvm_novcpu_exit:
333 334 335 336 337 338 339
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
	ld	r4, HSTATE_KVM_VCPU(r13)
	cmpdi	r4, 0
	beq	13f
	addi	r3, r4, VCPU_TB_RMEXIT
	bl	kvmhv_accumulate_time
#endif
340
13:	mr	r3, r12
341
	stw	r12, STACK_SLOT_TRAP(r1)
342 343
	bl	kvmhv_commence_exit
	nop
344
	lwz	r12, STACK_SLOT_TRAP(r1)
345
	b	kvmhv_switch_to_host
346

347
/*
348
 * We come in here when wakened from nap mode.
349 350 351 352 353
 * Relocation is off and most register values are lost.
 * r13 points to the PACA.
 */
	.globl	kvm_start_guest
kvm_start_guest:
354 355

	/* Set runlatch bit the minute you wake up from nap */
356 357 358
	mfspr	r0, SPRN_CTRLF
	ori 	r0, r0, 1
	mtspr	SPRN_CTRLT, r0
359

360 361
	ld	r2,PACATOC(r13)

362 363
	li	r0,KVM_HWTHREAD_IN_KVM
	stb	r0,HSTATE_HWTHREAD_STATE(r13)
364

365 366 367
	/* NV GPR values from power7_idle() will no longer be valid */
	li	r0,1
	stb	r0,PACA_NAPSTATELOST(r13)
368

369 370
	/* were we napping due to cede? */
	lbz	r0,HSTATE_NAPPING(r13)
371 372 373 374 375 376 377
	cmpwi	r0,NAPPING_CEDE
	beq	kvm_end_cede
	cmpwi	r0,NAPPING_NOVCPU
	beq	kvm_novcpu_wakeup

	ld	r1,PACAEMERGSP(r13)
	subi	r1,r1,STACK_FRAME_OVERHEAD
378 379 380 381 382 383 384

	/*
	 * We weren't napping due to cede, so this must be a secondary
	 * thread being woken up to run a guest, or being woken up due
	 * to a stray IPI.  (Or due to some machine check or hypervisor
	 * maintenance interrupt while the core is in KVM.)
	 */
385 386

	/* Check the wake reason in SRR1 to see why we got here */
387
	bl	kvmppc_check_wake_reason
388 389 390 391 392
	/*
	 * kvmppc_check_wake_reason could invoke a C routine, but we
	 * have no volatile registers to restore when we return.
	 */

393 394
	cmpdi	r3, 0
	bge	kvm_no_guest
395

396 397 398 399
	/* get vcore pointer, NULL if we have nothing to run */
	ld	r5,HSTATE_KVM_VCORE(r13)
	cmpdi	r5,0
	/* if we have no vcore to run, go back to sleep */
400
	beq	kvm_no_guest
401

402 403
kvm_secondary_got_guest:

404
	/* Set HSTATE_DSCR(r13) to something sensible */
405
	ld	r6, PACA_DSCR_DEFAULT(r13)
406
	std	r6, HSTATE_DSCR(r13)
407

408 409 410 411
	/* On thread 0 of a subcore, set HDEC to max */
	lbz	r4, HSTATE_PTID(r13)
	cmpwi	r4, 0
	bne	63f
412 413
	LOAD_REG_ADDR(r6, decrementer_max)
	ld	r6, 0(r6)
414 415 416 417 418 419 420 421 422 423 424 425 426 427
	mtspr	SPRN_HDEC, r6
	/* and set per-LPAR registers, if doing dynamic micro-threading */
	ld	r6, HSTATE_SPLIT_MODE(r13)
	cmpdi	r6, 0
	beq	63f
	ld	r0, KVM_SPLIT_RPR(r6)
	mtspr	SPRN_RPR, r0
	ld	r0, KVM_SPLIT_PMMAR(r6)
	mtspr	SPRN_PMMAR, r0
	ld	r0, KVM_SPLIT_LDBAR(r6)
	mtspr	SPRN_LDBAR, r0
	isync
63:
	/* Order load of vcpu after load of vcore */
428
	lwsync
429
	ld	r4, HSTATE_KVM_VCPU(r13)
430
	bl	kvmppc_hv_entry
431 432

	/* Back from the guest, go back to nap */
433
	/* Clear our vcpu and vcore pointers so we don't come back in early */
434
	li	r0, 0
435
	std	r0, HSTATE_KVM_VCPU(r13)
436
	/*
437
	 * Once we clear HSTATE_KVM_VCORE(r13), the code in
438 439 440
	 * kvmppc_run_core() is going to assume that all our vcpu
	 * state is visible in memory.  This lwsync makes sure
	 * that that is true.
441
	 */
442
	lwsync
443
	std	r0, HSTATE_KVM_VCORE(r13)
444

445 446 447 448 449 450 451 452 453 454 455 456
	/*
	 * All secondaries exiting guest will fall through this path.
	 * Before proceeding, just check for HMI interrupt and
	 * invoke opal hmi handler. By now we are sure that the
	 * primary thread on this core/subcore has already made partition
	 * switch/TB resync and we are good to call opal hmi handler.
	 */
	cmpwi	r12, BOOK3S_INTERRUPT_HMI
	bne	kvm_no_guest

	li	r3,0			/* NULL argument */
	bl	hmi_exception_realmode
457 458 459 460 461 462
/*
 * At this point we have finished executing in the guest.
 * We need to wait for hwthread_req to become zero, since
 * we may not turn on the MMU while hwthread_req is non-zero.
 * While waiting we also need to check if we get given a vcpu to run.
 */
463
kvm_no_guest:
464 465 466 467 468
	lbz	r3, HSTATE_HWTHREAD_REQ(r13)
	cmpwi	r3, 0
	bne	53f
	HMT_MEDIUM
	li	r0, KVM_HWTHREAD_IN_KERNEL
469
	stb	r0, HSTATE_HWTHREAD_STATE(r13)
470 471 472 473 474 475
	/* need to recheck hwthread_req after a barrier, to avoid race */
	sync
	lbz	r3, HSTATE_HWTHREAD_REQ(r13)
	cmpwi	r3, 0
	bne	54f
/*
476
 * We jump to pnv_wakeup_loss, which will return to the caller
477 478 479
 * of power7_nap in the powernv cpu offline loop.  The value we
 * put in r3 becomes the return value for power7_nap.
 */
480 481 482 483
	li	r3, LPCR_PECE0
	mfspr	r4, SPRN_LPCR
	rlwimi	r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
	mtspr	SPRN_LPCR, r4
484
	li	r3, 0
485
	b	pnv_wakeup_loss
486 487

53:	HMT_LOW
488 489 490 491 492 493 494 495
	ld	r5, HSTATE_KVM_VCORE(r13)
	cmpdi	r5, 0
	bne	60f
	ld	r3, HSTATE_SPLIT_MODE(r13)
	cmpdi	r3, 0
	beq	kvm_no_guest
	lbz	r0, KVM_SPLIT_DO_NAP(r3)
	cmpwi	r0, 0
496 497
	beq	kvm_no_guest
	HMT_MEDIUM
498 499
	b	kvm_unsplit_nap
60:	HMT_MEDIUM
500 501 502 503 504
	b	kvm_secondary_got_guest

54:	li	r0, KVM_HWTHREAD_IN_KVM
	stb	r0, HSTATE_HWTHREAD_STATE(r13)
	b	kvm_no_guest
505

506 507 508 509 510
/*
 * Here the primary thread is trying to return the core to
 * whole-core mode, so we need to nap.
 */
kvm_unsplit_nap:
511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526
	/*
	 * When secondaries are napping in kvm_unsplit_nap() with
	 * hwthread_req = 1, HMI goes ignored even though subcores are
	 * already exited the guest. Hence HMI keeps waking up secondaries
	 * from nap in a loop and secondaries always go back to nap since
	 * no vcore is assigned to them. This makes impossible for primary
	 * thread to get hold of secondary threads resulting into a soft
	 * lockup in KVM path.
	 *
	 * Let us check if HMI is pending and handle it before we go to nap.
	 */
	cmpwi	r12, BOOK3S_INTERRUPT_HMI
	bne	55f
	li	r3, 0			/* NULL argument */
	bl	hmi_exception_realmode
55:
527 528 529 530 531 532 533 534
	/*
	 * Ensure that secondary doesn't nap when it has
	 * its vcore pointer set.
	 */
	sync		/* matches smp_mb() before setting split_info.do_nap */
	ld	r0, HSTATE_KVM_VCORE(r13)
	cmpdi	r0, 0
	bne	kvm_no_guest
535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552
	/* clear any pending message */
BEGIN_FTR_SECTION
	lis	r6, (PPC_DBELL_SERVER << (63-36))@h
	PPC_MSGCLR(6)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
	/* Set kvm_split_mode.napped[tid] = 1 */
	ld	r3, HSTATE_SPLIT_MODE(r13)
	li	r0, 1
	lhz	r4, PACAPACAINDEX(r13)
	clrldi	r4, r4, 61	/* micro-threading => P8 => 8 threads/core */
	addi	r4, r4, KVM_SPLIT_NAPPED
	stbx	r0, r3, r4
	/* Check the do_nap flag again after setting napped[] */
	sync
	lbz	r0, KVM_SPLIT_DO_NAP(r3)
	cmpwi	r0, 0
	beq	57f
	li	r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
553 554 555
	mfspr	r5, SPRN_LPCR
	rlwimi	r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
	b	kvm_nap_sequence
556 557 558 559 560

57:	li	r0, 0
	stbx	r0, r3, r4
	b	kvm_no_guest

561 562 563 564 565 566
/******************************************************************************
 *                                                                            *
 *                               Entry code                                   *
 *                                                                            *
 *****************************************************************************/

567 568 569 570 571
.global kvmppc_hv_entry
kvmppc_hv_entry:

	/* Required state:
	 *
572
	 * R4 = vcpu pointer (or NULL)
573 574 575
	 * MSR = ~IR|DR
	 * R13 = PACA
	 * R1 = host R1
576
	 * R2 = TOC
577
	 * all other volatile GPRS = free
578
	 * Does not preserve non-volatile GPRs or CR fields
579 580
	 */
	mflr	r0
581
	std	r0, PPC_LR_STKOFF(r1)
582
	stdu	r1, -SFS(r1)
583 584 585 586

	/* Save R1 in the PACA */
	std	r1, HSTATE_HOST_R1(r13)

587 588 589
	li	r6, KVM_GUEST_MODE_HOST_HV
	stb	r6, HSTATE_IN_GUEST(r13)

590 591 592 593 594 595 596 597
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
	/* Store initial timestamp */
	cmpdi	r4, 0
	beq	1f
	addi	r3, r4, VCPU_TB_RMENTRY
	bl	kvmhv_start_timing
1:
#endif
598 599 600 601 602 603 604 605 606

	/* Use cr7 as an indication of radix mode */
	ld	r5, HSTATE_KVM_VCORE(r13)
	ld	r9, VCORE_KVM(r5)	/* pointer to struct kvm */
	lbz	r0, KVM_RADIX(r9)
	cmpwi	cr7, r0, 0

	/* Clear out SLB if hash */
	bne	cr7, 2f
607 608 609 610
	li	r6,0
	slbmte	r6,r6
	slbia
	ptesync
611
2:
612
	/*
613
	 * POWER7/POWER8 host -> guest partition switch code.
614 615 616
	 * We don't have to lock against concurrent tlbies,
	 * but we do have to coordinate across hardware threads.
	 */
617 618 619 620
	/* Set bit in entry map iff exit map is zero. */
	li	r7, 1
	lbz	r6, HSTATE_PTID(r13)
	sld	r7, r7, r6
621 622
	addi	r8, r5, VCORE_ENTRY_EXIT
21:	lwarx	r3, 0, r8
623
	cmpwi	r3, 0x100		/* any threads starting to exit? */
624
	bge	secondary_too_late	/* if so we're too late to the party */
625
	or	r3, r3, r7
626
	stwcx.	r3, 0, r8
627 628 629 630
	bne	21b

	/* Primary thread switches to guest partition. */
	cmpwi	r6,0
631
	bne	10f
632
	lwz	r7,KVM_LPID(r9)
633 634
BEGIN_FTR_SECTION
	ld	r6,KVM_SDR1(r9)
635 636 637 638
	li	r0,LPID_RSVD		/* switch to reserved LPID */
	mtspr	SPRN_LPID,r0
	ptesync
	mtspr	SPRN_SDR1,r6		/* switch to partition page table */
639
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
640 641
	mtspr	SPRN_LPID,r7
	isync
642 643 644

	/* See if we need to flush the TLB */
	lhz	r6,PACAPACAINDEX(r13)	/* test_bit(cpu, need_tlb_flush) */
645 646 647 648 649 650 651 652 653
BEGIN_FTR_SECTION
	/*
	 * On POWER9, individual threads can come in here, but the
	 * TLB is shared between the 4 threads in a core, hence
	 * invalidating on one thread invalidates for all.
	 * Thus we make all 4 threads use the same bit here.
	 */
	clrrdi	r6,r6,2
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
654 655 656 657 658
	clrldi	r7,r6,64-6		/* extract bit number (6 bits) */
	srdi	r6,r6,6			/* doubleword number */
	sldi	r6,r6,3			/* address offset */
	add	r6,r6,r9
	addi	r6,r6,KVM_NEED_FLUSH	/* dword in kvm->arch.need_tlb_flush */
659 660
	li	r8,1
	sld	r8,r8,r7
661
	ld	r7,0(r6)
662
	and.	r7,r7,r8
663
	beq	22f
664
	/* Flush the TLB of any entries for this LPID */
665 666
	lwz	r0,KVM_TLB_SETS(r9)
	mtctr	r0
667 668
	li	r7,0x800		/* IS field = 0b10 */
	ptesync
669 670 671
	li	r0,0			/* RS for P9 version of tlbiel */
	bne	cr7, 29f
28:	tlbiel	r7			/* On P9, rs=0, RIC=0, PRS=0, R=0 */
672 673
	addi	r7,r7,0x1000
	bdnz	28b
674 675 676 677 678 679 680 681 682
	b	30f
29:	PPC_TLBIEL(7,0,2,1,1)		/* for radix, RIC=2, PRS=1, R=1 */
	addi	r7,r7,0x1000
	bdnz	29b
30:	ptesync
23:	ldarx	r7,0,r6			/* clear the bit after TLB flushed */
	andc	r7,r7,r8
	stdcx.	r7,0,r6
	bne	23b
683

684 685 686 687 688 689 690 691 692 693 694 695 696 697 698
	/* Add timebase offset onto timebase */
22:	ld	r8,VCORE_TB_OFFSET(r5)
	cmpdi	r8,0
	beq	37f
	mftb	r6		/* current host timebase */
	add	r8,r8,r6
	mtspr	SPRN_TBU40,r8	/* update upper 40 bits */
	mftb	r7		/* check if lower 24 bits overflowed */
	clrldi	r6,r6,40
	clrldi	r7,r7,40
	cmpld	r7,r6
	bge	37f
	addis	r8,r8,0x100	/* if so, increment upper 40 bits */
	mtspr	SPRN_TBU40,r8

699 700 701 702 703 704
	/* Load guest PCR value to select appropriate compat mode */
37:	ld	r7, VCORE_PCR(r5)
	cmpdi	r7, 0
	beq	38f
	mtspr	SPRN_PCR, r7
38:
705 706

BEGIN_FTR_SECTION
707
	/* DPDES and VTB are shared between threads */
708
	ld	r8, VCORE_DPDES(r5)
709
	ld	r7, VCORE_VTB(r5)
710
	mtspr	SPRN_DPDES, r8
711
	mtspr	SPRN_VTB, r7
712 713
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)

714 715 716 717 718
	/* Mark the subcore state as inside guest */
	bl	kvmppc_subcore_enter_guest
	nop
	ld	r5, HSTATE_KVM_VCORE(r13)
	ld	r4, HSTATE_KVM_VCPU(r13)
719
	li	r0,1
720
	stb	r0,VCORE_IN_GUEST(r5)	/* signal secondaries to continue */
721

722
	/* Do we have a guest vcpu to run? */
723
10:	cmpdi	r4, 0
724 725
	beq	kvmppc_primary_no_guest
kvmppc_got_guest:
726

727
	/* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
728
	lwz	r5,VCPU_SLB_MAX(r4)
729 730 731 732 733 734 735 736 737 738
	cmpwi	r5,0
	beq	9f
	mtctr	r5
	addi	r6,r4,VCPU_SLB
1:	ld	r8,VCPU_SLB_E(r6)
	ld	r9,VCPU_SLB_V(r6)
	slbmte	r9,r8
	addi	r6,r6,VCPU_SLB_SIZE
	bdnz	1b
9:
739 740 741 742
	/* Increment yield count if they have a VPA */
	ld	r3, VCPU_VPA(r4)
	cmpdi	r3, 0
	beq	25f
743 744
	li	r6, LPPACA_YIELDCOUNT
	LWZX_BE	r5, r3, r6
745
	addi	r5, r5, 1
746
	STWX_BE	r5, r3, r6
747 748 749 750 751 752 753 754 755 756 757 758 759 760
	li	r6, 1
	stb	r6, VCPU_VPA_DIRTY(r4)
25:

	/* Save purr/spurr */
	mfspr	r5,SPRN_PURR
	mfspr	r6,SPRN_SPURR
	std	r5,HSTATE_PURR(r13)
	std	r6,HSTATE_SPURR(r13)
	ld	r7,VCPU_PURR(r4)
	ld	r8,VCPU_SPURR(r4)
	mtspr	SPRN_PURR,r7
	mtspr	SPRN_SPURR,r8

761 762 763 764
	/* Save host values of some registers */
BEGIN_FTR_SECTION
	mfspr	r5, SPRN_TIDR
	mfspr	r6, SPRN_PSSCR
765
	mfspr	r7, SPRN_PID
766
	mfspr	r8, SPRN_IAMR
767 768
	std	r5, STACK_SLOT_TID(r1)
	std	r6, STACK_SLOT_PSSCR(r1)
769
	std	r7, STACK_SLOT_PID(r1)
770
	std	r8, STACK_SLOT_IAMR(r1)
771 772
	mfspr	r5, SPRN_HFSCR
	std	r5, STACK_SLOT_HFSCR(r1)
773
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
774 775 776 777 778 779 780 781
BEGIN_FTR_SECTION
	mfspr	r5, SPRN_CIABR
	mfspr	r6, SPRN_DAWR
	mfspr	r7, SPRN_DAWRX
	std	r5, STACK_SLOT_CIABR(r1)
	std	r6, STACK_SLOT_DAWR(r1)
	std	r7, STACK_SLOT_DAWRX(r1)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
782

783 784 785
BEGIN_FTR_SECTION
	/* Set partition DABR */
	/* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
786
	lwz	r5,VCPU_DABRX(r4)
787 788 789 790 791 792
	ld	r6,VCPU_DABR(r4)
	mtspr	SPRN_DABRX,r5
	mtspr	SPRN_DABR,r6
	isync
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)

793 794
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BEGIN_FTR_SECTION
795 796
	bl	kvmppc_restore_tm
END_FTR_SECTION_IFSET(CPU_FTR_TM)
797 798
#endif

799 800 801 802 803 804
	/* Load guest PMU registers */
	/* R4 is live here (vcpu pointer) */
	li	r3, 1
	sldi	r3, r3, 31		/* MMCR0_FC (freeze counters) bit */
	mtspr	SPRN_MMCR0, r3		/* freeze all counters, disable ints */
	isync
805 806 807 808 809 810
BEGIN_FTR_SECTION
	ld	r3, VCPU_MMCR(r4)
	andi.	r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
	cmpwi	r5, MMCR0_PMAO
	beql	kvmppc_fix_pmao
END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831
	lwz	r3, VCPU_PMC(r4)	/* always load up guest PMU registers */
	lwz	r5, VCPU_PMC + 4(r4)	/* to prevent information leak */
	lwz	r6, VCPU_PMC + 8(r4)
	lwz	r7, VCPU_PMC + 12(r4)
	lwz	r8, VCPU_PMC + 16(r4)
	lwz	r9, VCPU_PMC + 20(r4)
	mtspr	SPRN_PMC1, r3
	mtspr	SPRN_PMC2, r5
	mtspr	SPRN_PMC3, r6
	mtspr	SPRN_PMC4, r7
	mtspr	SPRN_PMC5, r8
	mtspr	SPRN_PMC6, r9
	ld	r3, VCPU_MMCR(r4)
	ld	r5, VCPU_MMCR + 8(r4)
	ld	r6, VCPU_MMCR + 16(r4)
	ld	r7, VCPU_SIAR(r4)
	ld	r8, VCPU_SDAR(r4)
	mtspr	SPRN_MMCR1, r5
	mtspr	SPRN_MMCRA, r6
	mtspr	SPRN_SIAR, r7
	mtspr	SPRN_SDAR, r8
832 833 834
BEGIN_FTR_SECTION
	ld	r5, VCPU_MMCR + 24(r4)
	ld	r6, VCPU_SIER(r4)
835 836 837
	mtspr	SPRN_MMCR2, r5
	mtspr	SPRN_SIER, r6
BEGIN_FTR_SECTION_NESTED(96)
838 839 840 841 842 843
	lwz	r7, VCPU_PMC + 24(r4)
	lwz	r8, VCPU_PMC + 28(r4)
	ld	r9, VCPU_MMCR + 32(r4)
	mtspr	SPRN_SPMC1, r7
	mtspr	SPRN_SPMC2, r8
	mtspr	SPRN_MMCRS, r9
844
END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
845
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874
	mtspr	SPRN_MMCR0, r3
	isync

	/* Load up FP, VMX and VSX registers */
	bl	kvmppc_load_fp

	ld	r14, VCPU_GPR(R14)(r4)
	ld	r15, VCPU_GPR(R15)(r4)
	ld	r16, VCPU_GPR(R16)(r4)
	ld	r17, VCPU_GPR(R17)(r4)
	ld	r18, VCPU_GPR(R18)(r4)
	ld	r19, VCPU_GPR(R19)(r4)
	ld	r20, VCPU_GPR(R20)(r4)
	ld	r21, VCPU_GPR(R21)(r4)
	ld	r22, VCPU_GPR(R22)(r4)
	ld	r23, VCPU_GPR(R23)(r4)
	ld	r24, VCPU_GPR(R24)(r4)
	ld	r25, VCPU_GPR(R25)(r4)
	ld	r26, VCPU_GPR(R26)(r4)
	ld	r27, VCPU_GPR(R27)(r4)
	ld	r28, VCPU_GPR(R28)(r4)
	ld	r29, VCPU_GPR(R29)(r4)
	ld	r30, VCPU_GPR(R30)(r4)
	ld	r31, VCPU_GPR(R31)(r4)

	/* Switch DSCR to guest value */
	ld	r5, VCPU_DSCR(r4)
	mtspr	SPRN_DSCR, r5

875
BEGIN_FTR_SECTION
876
	/* Skip next section on POWER7 */
877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894
	b	8f
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
	/* Load up POWER8-specific registers */
	ld	r5, VCPU_IAMR(r4)
	lwz	r6, VCPU_PSPB(r4)
	ld	r7, VCPU_FSCR(r4)
	mtspr	SPRN_IAMR, r5
	mtspr	SPRN_PSPB, r6
	mtspr	SPRN_FSCR, r7
	ld	r5, VCPU_DAWR(r4)
	ld	r6, VCPU_DAWRX(r4)
	ld	r7, VCPU_CIABR(r4)
	ld	r8, VCPU_TAR(r4)
	mtspr	SPRN_DAWR, r5
	mtspr	SPRN_DAWRX, r6
	mtspr	SPRN_CIABR, r7
	mtspr	SPRN_TAR, r8
	ld	r5, VCPU_IC(r4)
895
	ld	r8, VCPU_EBBHR(r4)
896
	mtspr	SPRN_IC, r5
897 898 899
	mtspr	SPRN_EBBHR, r8
	ld	r5, VCPU_EBBRR(r4)
	ld	r6, VCPU_BESCR(r4)
900 901
	lwz	r7, VCPU_GUEST_PID(r4)
	ld	r8, VCPU_WORT(r4)
902 903
	mtspr	SPRN_EBBRR, r5
	mtspr	SPRN_BESCR, r6
904 905
	mtspr	SPRN_PID, r7
	mtspr	SPRN_WORT, r8
906 907 908
BEGIN_FTR_SECTION
	PPC_INVALIDATE_ERAT
END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
909
BEGIN_FTR_SECTION
910
	/* POWER8-only registers */
911 912
	ld	r5, VCPU_TCSCR(r4)
	ld	r6, VCPU_ACOP(r4)
913 914
	ld	r7, VCPU_CSIGR(r4)
	ld	r8, VCPU_TACR(r4)
915 916
	mtspr	SPRN_TCSCR, r5
	mtspr	SPRN_ACOP, r6
917 918
	mtspr	SPRN_CSIGR, r7
	mtspr	SPRN_TACR, r8
919 920 921 922 923
FTR_SECTION_ELSE
	/* POWER9-only registers */
	ld	r5, VCPU_TID(r4)
	ld	r6, VCPU_PSSCR(r4)
	oris	r6, r6, PSSCR_EC@h	/* This makes stop trap to HV */
924
	ld	r7, VCPU_HFSCR(r4)
925 926
	mtspr	SPRN_TIDR, r5
	mtspr	SPRN_PSSCR, r6
927
	mtspr	SPRN_HFSCR, r7
928
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
929 930
8:

931 932 933 934
	/*
	 * Set the decrementer to the guest decrementer.
	 */
	ld	r8,VCPU_DEC_EXPIRES(r4)
935 936 937 938
	/* r8 is a host timebase value here, convert to guest TB */
	ld	r5,HSTATE_KVM_VCORE(r13)
	ld	r6,VCORE_TB_OFFSET(r5)
	add	r8,r8,r6
939 940 941
	mftb	r7
	subf	r3,r7,r8
	mtspr	SPRN_DEC,r3
942
	std	r3,VCPU_DEC(r4)
943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965

	ld	r5, VCPU_SPRG0(r4)
	ld	r6, VCPU_SPRG1(r4)
	ld	r7, VCPU_SPRG2(r4)
	ld	r8, VCPU_SPRG3(r4)
	mtspr	SPRN_SPRG0, r5
	mtspr	SPRN_SPRG1, r6
	mtspr	SPRN_SPRG2, r7
	mtspr	SPRN_SPRG3, r8

	/* Load up DAR and DSISR */
	ld	r5, VCPU_DAR(r4)
	lwz	r6, VCPU_DSISR(r4)
	mtspr	SPRN_DAR, r5
	mtspr	SPRN_DSISR, r6

	/* Restore AMR and UAMOR, set AMOR to all 1s */
	ld	r5,VCPU_AMR(r4)
	ld	r6,VCPU_UAMOR(r4)
	li	r7,-1
	mtspr	SPRN_AMR,r5
	mtspr	SPRN_UAMOR,r6
	mtspr	SPRN_AMOR,r7
966 967 968 969 970 971 972 973 974

	/* Restore state of CTRL run bit; assume 1 on entry */
	lwz	r5,VCPU_CTRL(r4)
	andi.	r5,r5,1
	bne	4f
	mfspr	r6,SPRN_CTRLF
	clrrdi	r6,r6,1
	mtspr	SPRN_CTRLT,r6
4:
975 976 977 978 979 980 981 982 983
	/* Secondary threads wait for primary to have done partition switch */
	ld	r5, HSTATE_KVM_VCORE(r13)
	lbz	r6, HSTATE_PTID(r13)
	cmpwi	r6, 0
	beq	21f
	lbz	r0, VCORE_IN_GUEST(r5)
	cmpwi	r0, 0
	bne	21f
	HMT_LOW
984 985 986 987
20:	lwz	r3, VCORE_ENTRY_EXIT(r5)
	cmpwi	r3, 0x100
	bge	no_switch_exit
	lbz	r0, VCORE_IN_GUEST(r5)
988 989 990 991 992 993 994 995 996 997 998
	cmpwi	r0, 0
	beq	20b
	HMT_MEDIUM
21:
	/* Set LPCR. */
	ld	r8,VCORE_LPCR(r5)
	mtspr	SPRN_LPCR,r8
	isync

	/* Check if HDEC expires soon */
	mfspr	r3, SPRN_HDEC
999 1000
	EXTEND_HDEC(r3)
	cmpdi	r3, 512		/* 1 microsecond */
1001 1002
	blt	hdec_soon

1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
#ifdef CONFIG_KVM_XICS
	/* We are entering the guest on that thread, push VCPU to XIVE */
	ld	r10, HSTATE_XIVE_TIMA_PHYS(r13)
	cmpldi	cr0, r10, r0
	beq	no_xive
	ld	r11, VCPU_XIVE_SAVED_STATE(r4)
	li	r9, TM_QW1_OS
	stdcix	r11,r9,r10
	eieio
	lwz	r11, VCPU_XIVE_CAM_WORD(r4)
	li	r9, TM_QW1_OS + TM_WORD2
	stwcix	r11,r9,r10
	li	r9, 1
	stw	r9, VCPU_XIVE_PUSHED(r4)
no_xive:
#endif /* CONFIG_KVM_XICS */

1020
deliver_guest_interrupt:
1021
	ld	r6, VCPU_CTR(r4)
1022
	ld	r7, VCPU_XER(r4)
1023 1024 1025 1026

	mtctr	r6
	mtxer	r7

1027
kvmppc_cede_reentry:		/* r4 = vcpu, r13 = paca */
1028 1029
	ld	r10, VCPU_PC(r4)
	ld	r11, VCPU_MSR(r4)
1030 1031
	ld	r6, VCPU_SRR0(r4)
	ld	r7, VCPU_SRR1(r4)
1032 1033
	mtspr	SPRN_SRR0, r6
	mtspr	SPRN_SRR1, r7
1034

1035
	/* r11 = vcpu->arch.msr & ~MSR_HV */
1036 1037 1038 1039
	rldicl	r11, r11, 63 - MSR_HV_LG, 1
	rotldi	r11, r11, 1 + MSR_HV_LG
	ori	r11, r11, MSR_ME

1040
	/* Check if we can deliver an external or decrementer interrupt now */
1041 1042 1043 1044 1045 1046 1047 1048
	ld	r0, VCPU_PENDING_EXC(r4)
	rldicl	r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
	cmpdi	cr1, r0, 0
	andi.	r8, r11, MSR_EE
	mfspr	r8, SPRN_LPCR
	/* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
	rldimi	r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
	mtspr	SPRN_LPCR, r8
1049 1050
	isync
	beq	5f
1051 1052 1053
	li	r0, BOOK3S_INTERRUPT_EXTERNAL
	bne	cr1, 12f
	mfspr	r0, SPRN_DEC
1054 1055 1056 1057 1058 1059 1060
BEGIN_FTR_SECTION
	/* On POWER9 check whether the guest has large decrementer enabled */
	andis.	r8, r8, LPCR_LD@h
	bne	15f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
	extsw	r0, r0
15:	cmpdi	r0, 0
1061 1062
	li	r0, BOOK3S_INTERRUPT_DECREMENTER
	bge	5f
1063

1064
12:	mtspr	SPRN_SRR0, r10
1065
	mr	r10,r0
1066
	mtspr	SPRN_SRR1, r11
1067 1068
	mr	r9, r4
	bl	kvmppc_msr_interrupt
1069
5:
1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
BEGIN_FTR_SECTION
	b	fast_guest_return
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
	/* On POWER9, check for pending doorbell requests */
	lbz	r0, VCPU_DBELL_REQ(r4)
	cmpwi	r0, 0
	beq	fast_guest_return
	ld	r5, HSTATE_KVM_VCORE(r13)
	/* Set DPDES register so the CPU will take a doorbell interrupt */
	li	r0, 1
	mtspr	SPRN_DPDES, r0
	std	r0, VCORE_DPDES(r5)
	/* Make sure other cpus see vcore->dpdes set before dbell req clear */
	lwsync
	/* Clear the pending doorbell request */
	li	r0, 0
	stb	r0, VCPU_DBELL_REQ(r4)
1087

1088 1089 1090 1091 1092 1093 1094
/*
 * Required state:
 * R4 = vcpu
 * R10: value for HSRR0
 * R11: value for HSRR1
 * R13 = PACA
 */
1095
fast_guest_return:
1096 1097
	li	r0,0
	stb	r0,VCPU_CEDED(r4)	/* cancel cede */
1098 1099 1100 1101
	mtspr	SPRN_HSRR0,r10
	mtspr	SPRN_HSRR1,r11

	/* Activate guest mode, so faults get handled by KVM */
1102
	li	r9, KVM_GUEST_MODE_GUEST_HV
1103 1104
	stb	r9, HSTATE_IN_GUEST(r13)

1105 1106 1107 1108 1109 1110
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
	/* Accumulate timing */
	addi	r3, r4, VCPU_TB_GUEST
	bl	kvmhv_accumulate_time
#endif

1111 1112
	/* Enter guest */

1113 1114 1115 1116
BEGIN_FTR_SECTION
	ld	r5, VCPU_CFAR(r4)
	mtspr	SPRN_CFAR, r5
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1117 1118 1119
BEGIN_FTR_SECTION
	ld	r0, VCPU_PPR(r4)
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1120

1121 1122 1123 1124 1125
	ld	r5, VCPU_LR(r4)
	lwz	r6, VCPU_CR(r4)
	mtlr	r5
	mtcr	r6

1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
	ld	r1, VCPU_GPR(R1)(r4)
	ld	r2, VCPU_GPR(R2)(r4)
	ld	r3, VCPU_GPR(R3)(r4)
	ld	r5, VCPU_GPR(R5)(r4)
	ld	r6, VCPU_GPR(R6)(r4)
	ld	r7, VCPU_GPR(R7)(r4)
	ld	r8, VCPU_GPR(R8)(r4)
	ld	r9, VCPU_GPR(R9)(r4)
	ld	r10, VCPU_GPR(R10)(r4)
	ld	r11, VCPU_GPR(R11)(r4)
	ld	r12, VCPU_GPR(R12)(r4)
	ld	r13, VCPU_GPR(R13)(r4)

1139 1140 1141 1142
BEGIN_FTR_SECTION
	mtspr	SPRN_PPR, r0
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
	ld	r0, VCPU_GPR(R0)(r4)
1143
	ld	r4, VCPU_GPR(R4)(r4)
1144 1145 1146 1147

	hrfid
	b	.

1148
secondary_too_late:
1149
	li	r12, 0
1150 1151
	cmpdi	r4, 0
	beq	11f
1152 1153
	stw	r12, VCPU_TRAP(r4)
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1154 1155
	addi	r3, r4, VCPU_TB_RMEXIT
	bl	kvmhv_accumulate_time
1156
#endif
1157 1158
11:	b	kvmhv_switch_to_host

1159 1160 1161 1162
no_switch_exit:
	HMT_MEDIUM
	li	r12, 0
	b	12f
1163
hdec_soon:
1164
	li	r12, BOOK3S_INTERRUPT_HV_DECREMENTER
1165
12:	stw	r12, VCPU_TRAP(r4)
1166 1167
	mr	r9, r4
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1168 1169 1170
	addi	r3, r4, VCPU_TB_RMEXIT
	bl	kvmhv_accumulate_time
#endif
1171
	b	guest_exit_cont
1172

1173 1174 1175 1176 1177 1178 1179 1180 1181
/******************************************************************************
 *                                                                            *
 *                               Exit code                                    *
 *                                                                            *
 *****************************************************************************/

/*
 * We come here from the first-level interrupt handlers.
 */
1182 1183
	.globl	kvmppc_interrupt_hv
kvmppc_interrupt_hv:
1184 1185
	/*
	 * Register contents:
1186
	 * R12		= (guest CR << 32) | interrupt vector
1187
	 * R13		= PACA
1188
	 * guest R12 saved in shadow VCPU SCRATCH0
1189
	 * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE
1190 1191
	 * guest R13 saved in SPRN_SCRATCH0
	 */
1192
	std	r9, HSTATE_SCRATCH2(r13)
1193 1194 1195
	lbz	r9, HSTATE_IN_GUEST(r13)
	cmpwi	r9, KVM_GUEST_MODE_HOST_HV
	beq	kvmppc_bad_host_intr
1196 1197
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
	cmpwi	r9, KVM_GUEST_MODE_GUEST
1198
	ld	r9, HSTATE_SCRATCH2(r13)
1199 1200
	beq	kvmppc_interrupt_pr
#endif
1201 1202 1203 1204
	/* We're now back in the host but in guest MMU context */
	li	r9, KVM_GUEST_MODE_HOST_HV
	stb	r9, HSTATE_IN_GUEST(r13)

1205 1206 1207 1208
	ld	r9, HSTATE_KVM_VCPU(r13)

	/* Save registers */

1209 1210 1211 1212 1213 1214 1215 1216 1217
	std	r0, VCPU_GPR(R0)(r9)
	std	r1, VCPU_GPR(R1)(r9)
	std	r2, VCPU_GPR(R2)(r9)
	std	r3, VCPU_GPR(R3)(r9)
	std	r4, VCPU_GPR(R4)(r9)
	std	r5, VCPU_GPR(R5)(r9)
	std	r6, VCPU_GPR(R6)(r9)
	std	r7, VCPU_GPR(R7)(r9)
	std	r8, VCPU_GPR(R8)(r9)
1218
	ld	r0, HSTATE_SCRATCH2(r13)
1219 1220 1221
	std	r0, VCPU_GPR(R9)(r9)
	std	r10, VCPU_GPR(R10)(r9)
	std	r11, VCPU_GPR(R11)(r9)
1222
	ld	r3, HSTATE_SCRATCH0(r13)
1223
	std	r3, VCPU_GPR(R12)(r9)
1224 1225
	/* CR is in the high half of r12 */
	srdi	r4, r12, 32
1226
	stw	r4, VCPU_CR(r9)
1227 1228 1229 1230
BEGIN_FTR_SECTION
	ld	r3, HSTATE_CFAR(r13)
	std	r3, VCPU_CFAR(r9)
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1231 1232 1233 1234
BEGIN_FTR_SECTION
	ld	r4, HSTATE_PPR(r13)
	std	r4, VCPU_PPR(r9)
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1235 1236 1237 1238 1239 1240 1241 1242 1243

	/* Restore R1/R2 so we can handle faults */
	ld	r1, HSTATE_HOST_R1(r13)
	ld	r2, PACATOC(r13)

	mfspr	r10, SPRN_SRR0
	mfspr	r11, SPRN_SRR1
	std	r10, VCPU_SRR0(r9)
	std	r11, VCPU_SRR1(r9)
1244 1245
	/* trap is in the low half of r12, clear CR from the high half */
	clrldi	r12, r12, 32
1246 1247 1248 1249 1250 1251 1252 1253 1254 1255
	andi.	r0, r12, 2		/* need to read HSRR0/1? */
	beq	1f
	mfspr	r10, SPRN_HSRR0
	mfspr	r11, SPRN_HSRR1
	clrrdi	r12, r12, 2
1:	std	r10, VCPU_PC(r9)
	std	r11, VCPU_MSR(r9)

	GET_SCRATCH0(r3)
	mflr	r4
1256
	std	r3, VCPU_GPR(R13)(r9)
1257 1258 1259 1260
	std	r4, VCPU_LR(r9)

	stw	r12,VCPU_TRAP(r9)

1261 1262 1263 1264 1265 1266 1267 1268 1269 1270
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
	addi	r3, r9, VCPU_TB_RMINTR
	mr	r4, r9
	bl	kvmhv_accumulate_time
	ld	r5, VCPU_GPR(R5)(r9)
	ld	r6, VCPU_GPR(R6)(r9)
	ld	r7, VCPU_GPR(R7)(r9)
	ld	r8, VCPU_GPR(R8)(r9)
#endif

1271
	/* Save HEIR (HV emulation assist reg) in emul_inst
1272 1273
	   if this is an HEI (HV emulation interrupt, e40) */
	li	r3,KVM_INST_FETCH_FAILED
1274
	stw	r3,VCPU_LAST_INST(r9)
1275 1276 1277
	cmpwi	r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
	bne	11f
	mfspr	r3,SPRN_HEIR
1278
11:	stw	r3,VCPU_HEIR(r9)
1279 1280

	/* these are volatile across C function calls */
1281 1282 1283 1284
#ifdef CONFIG_RELOCATABLE
	ld	r3, HSTATE_SCRATCH1(r13)
	mtctr	r3
#else
1285
	mfctr	r3
1286
#endif
1287 1288
	mfxer	r4
	std	r3, VCPU_CTR(r9)
1289
	std	r4, VCPU_XER(r9)
1290 1291 1292 1293

	/* If this is a page table miss then see if it's theirs or ours */
	cmpwi	r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
	beq	kvmppc_hdsi
1294 1295
	cmpwi	r12, BOOK3S_INTERRUPT_H_INST_STORAGE
	beq	kvmppc_hisi
1296

1297 1298 1299 1300 1301
	/* See if this is a leftover HDEC interrupt */
	cmpwi	r12,BOOK3S_INTERRUPT_HV_DECREMENTER
	bne	2f
	mfspr	r3,SPRN_HDEC
	cmpwi	r3,0
1302 1303
	mr	r4,r9
	bge	fast_guest_return
1304
2:
1305
	/* See if this is an hcall we can handle in real mode */
1306 1307
	cmpwi	r12,BOOK3S_INTERRUPT_SYSCALL
	beq	hcall_try_real_mode
1308

1309 1310 1311 1312
	/* Hypervisor doorbell - exit only if host IPI flag set */
	cmpwi	r12, BOOK3S_INTERRUPT_H_DOORBELL
	bne	3f
	lbz	r0, HSTATE_HOST_IPI(r13)
1313
	cmpwi	r0, 0
1314 1315 1316
	beq	4f
	b	guest_exit_cont
3:
1317 1318 1319 1320 1321 1322 1323
	/* If it's a hypervisor facility unavailable interrupt, save HFSCR */
	cmpwi	r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL
	bne	14f
	mfspr	r3, SPRN_HFSCR
	std	r3, VCPU_HFSCR(r9)
	b	guest_exit_cont
14:
1324 1325
	/* External interrupt ? */
	cmpwi	r12, BOOK3S_INTERRUPT_EXTERNAL
1326
	bne+	guest_exit_cont
1327 1328 1329 1330

	/* External interrupt, first check for host_ipi. If this is
	 * set, we know the host wants us out so let's do it now
	 */
1331
	bl	kvmppc_read_intr
1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346

	/*
	 * Restore the active volatile registers after returning from
	 * a C function.
	 */
	ld	r9, HSTATE_KVM_VCPU(r13)
	li	r12, BOOK3S_INTERRUPT_EXTERNAL

	/*
	 * kvmppc_read_intr return codes:
	 *
	 * Exit to host (r3 > 0)
	 *   1 An interrupt is pending that needs to be handled by the host
	 *     Exit guest and return to host by branching to guest_exit_cont
	 *
1347 1348 1349 1350 1351
	 *   2 Passthrough that needs completion in the host
	 *     Exit guest and return to host by branching to guest_exit_cont
	 *     However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
	 *     to indicate to the host to complete handling the interrupt
	 *
1352 1353 1354 1355 1356 1357 1358 1359 1360
	 * Before returning to guest, we check if any CPU is heading out
	 * to the host and if so, we head out also. If no CPUs are heading
	 * check return values <= 0.
	 *
	 * Return to guest (r3 <= 0)
	 *  0 No external interrupt is pending
	 * -1 A guest wakeup IPI (which has now been cleared)
	 *    In either case, we return to guest to deliver any pending
	 *    guest interrupts.
1361 1362 1363 1364
	 *
	 * -2 A PCI passthrough external interrupt was handled
	 *    (interrupt was delivered directly to guest)
	 *    Return to guest to deliver any pending guest interrupts.
1365 1366
	 */

1367 1368 1369 1370 1371 1372 1373 1374 1375
	cmpdi	r3, 1
	ble	1f

	/* Return code = 2 */
	li	r12, BOOK3S_INTERRUPT_HV_RM_HARD
	stw	r12, VCPU_TRAP(r9)
	b	guest_exit_cont

1:	/* Return code <= 1 */
1376
	cmpdi	r3, 0
1377
	bgt	guest_exit_cont
1378

1379
	/* Return code <= 0 */
1380
4:	ld	r5, HSTATE_KVM_VCORE(r13)
1381 1382
	lwz	r0, VCORE_ENTRY_EXIT(r5)
	cmpwi	r0, 0x100
1383
	mr	r4, r9
1384
	blt	deliver_guest_interrupt
1385

1386
guest_exit_cont:		/* r9 = vcpu, r12 = trap, r13 = paca */
1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422
#ifdef CONFIG_KVM_XICS
	/* We are exiting, pull the VP from the XIVE */
	lwz	r0, VCPU_XIVE_PUSHED(r9)
	cmpwi	cr0, r0, 0
	beq	1f
	li	r7, TM_SPC_PULL_OS_CTX
	li	r6, TM_QW1_OS
	mfmsr	r0
	andi.	r0, r0, MSR_IR		/* in real mode? */
	beq	2f
	ld	r10, HSTATE_XIVE_TIMA_VIRT(r13)
	cmpldi	cr0, r10, 0
	beq	1f
	/* First load to pull the context, we ignore the value */
	lwzx	r11, r7, r10
	eieio
	/* Second load to recover the context state (Words 0 and 1) */
	ldx	r11, r6, r10
	b	3f
2:	ld	r10, HSTATE_XIVE_TIMA_PHYS(r13)
	cmpldi	cr0, r10, 0
	beq	1f
	/* First load to pull the context, we ignore the value */
	lwzcix	r11, r7, r10
	eieio
	/* Second load to recover the context state (Words 0 and 1) */
	ldcix	r11, r6, r10
3:	std	r11, VCPU_XIVE_SAVED_STATE(r9)
	/* Fixup some of the state for the next load */
	li	r10, 0
	li	r0, 0xff
	stw	r10, VCPU_XIVE_PUSHED(r9)
	stb	r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
	stb	r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
1:
#endif /* CONFIG_KVM_XICS */
1423 1424 1425 1426 1427
	/* Save more register state  */
	mfdar	r6
	mfdsisr	r7
	std	r6, VCPU_DAR(r9)
	stw	r7, VCPU_DSISR(r9)
1428
	/* don't overwrite fault_dar/fault_dsisr if HDSI */
1429
	cmpwi	r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1430
	beq	mc_cont
1431
	std	r6, VCPU_FAULT_DAR(r9)
1432 1433
	stw	r7, VCPU_FAULT_DSISR(r9)

1434 1435 1436 1437
	/* See if it is a machine check */
	cmpwi	r12, BOOK3S_INTERRUPT_MACHINE_CHECK
	beq	machine_check_realmode
mc_cont:
1438 1439 1440 1441 1442
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
	addi	r3, r9, VCPU_TB_RMEXIT
	mr	r4, r9
	bl	kvmhv_accumulate_time
#endif
1443

1444
	mr 	r3, r12
1445 1446
	/* Increment exit count, poke other threads to exit */
	bl	kvmhv_commence_exit
1447 1448 1449
	nop
	ld	r9, HSTATE_KVM_VCPU(r13)
	lwz	r12, VCPU_TRAP(r9)
1450

1451 1452 1453 1454 1455
	/* Stop others sending VCPU interrupts to this physical CPU */
	li	r0, -1
	stw	r0, VCPU_CPU(r9)
	stw	r0, VCPU_THREAD_CPU(r9)

1456
	/* Save guest CTRL register, set runlatch to 1 */
1457
	mfspr	r6,SPRN_CTRLF
1458 1459 1460 1461 1462 1463 1464
	stw	r6,VCPU_CTRL(r9)
	andi.	r0,r6,1
	bne	4f
	ori	r6,r6,1
	mtspr	SPRN_CTRLT,r6
4:
	/* Read the guest SLB and save it away */
1465 1466 1467 1468 1469
	ld	r5, VCPU_KVM(r9)
	lbz	r0, KVM_RADIX(r5)
	cmpwi	r0, 0
	li	r5, 0
	bne	3f			/* for radix, save 0 entries */
1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484
	lwz	r0,VCPU_SLB_NR(r9)	/* number of entries in SLB */
	mtctr	r0
	li	r6,0
	addi	r7,r9,VCPU_SLB
1:	slbmfee	r8,r6
	andis.	r0,r8,SLB_ESID_V@h
	beq	2f
	add	r8,r8,r6		/* put index in */
	slbmfev	r3,r6
	std	r8,VCPU_SLB_E(r7)
	std	r3,VCPU_SLB_V(r7)
	addi	r7,r7,VCPU_SLB_SIZE
	addi	r5,r5,1
2:	addi	r6,r6,1
	bdnz	1b
1485
3:	stw	r5,VCPU_SLB_MAX(r9)
1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509

	/*
	 * Save the guest PURR/SPURR
	 */
	mfspr	r5,SPRN_PURR
	mfspr	r6,SPRN_SPURR
	ld	r7,VCPU_PURR(r9)
	ld	r8,VCPU_SPURR(r9)
	std	r5,VCPU_PURR(r9)
	std	r6,VCPU_SPURR(r9)
	subf	r5,r7,r5
	subf	r6,r8,r6

	/*
	 * Restore host PURR/SPURR and add guest times
	 * so that the time in the guest gets accounted.
	 */
	ld	r3,HSTATE_PURR(r13)
	ld	r4,HSTATE_SPURR(r13)
	add	r3,r3,r5
	add	r4,r4,r6
	mtspr	SPRN_PURR,r3
	mtspr	SPRN_SPURR,r4

1510
	/* Save DEC */
1511
	ld	r3, HSTATE_KVM_VCORE(r13)
1512 1513
	mfspr	r5,SPRN_DEC
	mftb	r6
1514 1515 1516 1517 1518 1519
	/* On P9, if the guest has large decr enabled, don't sign extend */
BEGIN_FTR_SECTION
	ld	r4, VCORE_LPCR(r3)
	andis.	r4, r4, LPCR_LD@h
	bne	16f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1520
	extsw	r5,r5
1521
16:	add	r5,r5,r6
1522 1523 1524
	/* r5 is a guest timebase value here, convert to host TB */
	ld	r4,VCORE_TB_OFFSET(r3)
	subf	r5,r4,r5
1525 1526
	std	r5,VCPU_DEC_EXPIRES(r9)

1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540
BEGIN_FTR_SECTION
	b	8f
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
	/* Save POWER8-specific registers */
	mfspr	r5, SPRN_IAMR
	mfspr	r6, SPRN_PSPB
	mfspr	r7, SPRN_FSCR
	std	r5, VCPU_IAMR(r9)
	stw	r6, VCPU_PSPB(r9)
	std	r7, VCPU_FSCR(r9)
	mfspr	r5, SPRN_IC
	mfspr	r7, SPRN_TAR
	std	r5, VCPU_IC(r9)
	std	r7, VCPU_TAR(r9)
1541
	mfspr	r8, SPRN_EBBHR
1542 1543 1544
	std	r8, VCPU_EBBHR(r9)
	mfspr	r5, SPRN_EBBRR
	mfspr	r6, SPRN_BESCR
1545 1546
	mfspr	r7, SPRN_PID
	mfspr	r8, SPRN_WORT
1547 1548
	std	r5, VCPU_EBBRR(r9)
	std	r6, VCPU_BESCR(r9)
1549 1550 1551
	stw	r7, VCPU_GUEST_PID(r9)
	std	r8, VCPU_WORT(r9)
BEGIN_FTR_SECTION
1552 1553
	mfspr	r5, SPRN_TCSCR
	mfspr	r6, SPRN_ACOP
1554 1555
	mfspr	r7, SPRN_CSIGR
	mfspr	r8, SPRN_TACR
1556 1557
	std	r5, VCPU_TCSCR(r9)
	std	r6, VCPU_ACOP(r9)
1558 1559
	std	r7, VCPU_CSIGR(r9)
	std	r8, VCPU_TACR(r9)
1560 1561 1562 1563 1564 1565 1566
FTR_SECTION_ELSE
	mfspr	r5, SPRN_TIDR
	mfspr	r6, SPRN_PSSCR
	std	r5, VCPU_TID(r9)
	rldicl	r6, r6, 4, 50		/* r6 &= PSSCR_GUEST_VIS */
	rotldi	r6, r6, 60
	std	r6, VCPU_PSSCR(r9)
1567 1568 1569
	/* Restore host HFSCR value */
	ld	r7, STACK_SLOT_HFSCR(r1)
	mtspr	SPRN_HFSCR, r7
1570
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
1571 1572 1573 1574 1575
	/*
	 * Restore various registers to 0, where non-zero values
	 * set by the guest could disrupt the host.
	 */
	li	r0, 0
1576
	mtspr	SPRN_PSPB, r0
1577
	mtspr	SPRN_WORT, r0
1578
BEGIN_FTR_SECTION
1579
	mtspr	SPRN_IAMR, r0
1580
	mtspr	SPRN_TCSCR, r0
1581 1582 1583 1584
	/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
	li	r0, 1
	sldi	r0, r0, 31
	mtspr	SPRN_MMCRS, r0
1585
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1586 1587
8:

1588 1589 1590 1591 1592 1593 1594
	/* Save and reset AMR and UAMOR before turning on the MMU */
	mfspr	r5,SPRN_AMR
	mfspr	r6,SPRN_UAMOR
	std	r5,VCPU_AMR(r9)
	std	r6,VCPU_UAMOR(r9)
	li	r6,0
	mtspr	SPRN_AMR,r6
1595
	mtspr	SPRN_UAMOR, r6
1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635

	/* Switch DSCR back to host value */
	mfspr	r8, SPRN_DSCR
	ld	r7, HSTATE_DSCR(r13)
	std	r8, VCPU_DSCR(r9)
	mtspr	SPRN_DSCR, r7

	/* Save non-volatile GPRs */
	std	r14, VCPU_GPR(R14)(r9)
	std	r15, VCPU_GPR(R15)(r9)
	std	r16, VCPU_GPR(R16)(r9)
	std	r17, VCPU_GPR(R17)(r9)
	std	r18, VCPU_GPR(R18)(r9)
	std	r19, VCPU_GPR(R19)(r9)
	std	r20, VCPU_GPR(R20)(r9)
	std	r21, VCPU_GPR(R21)(r9)
	std	r22, VCPU_GPR(R22)(r9)
	std	r23, VCPU_GPR(R23)(r9)
	std	r24, VCPU_GPR(R24)(r9)
	std	r25, VCPU_GPR(R25)(r9)
	std	r26, VCPU_GPR(R26)(r9)
	std	r27, VCPU_GPR(R27)(r9)
	std	r28, VCPU_GPR(R28)(r9)
	std	r29, VCPU_GPR(R29)(r9)
	std	r30, VCPU_GPR(R30)(r9)
	std	r31, VCPU_GPR(R31)(r9)

	/* Save SPRGs */
	mfspr	r3, SPRN_SPRG0
	mfspr	r4, SPRN_SPRG1
	mfspr	r5, SPRN_SPRG2
	mfspr	r6, SPRN_SPRG3
	std	r3, VCPU_SPRG0(r9)
	std	r4, VCPU_SPRG1(r9)
	std	r5, VCPU_SPRG2(r9)
	std	r6, VCPU_SPRG3(r9)

	/* save FP state */
	mr	r3, r9
	bl	kvmppc_save_fp
1636

1637 1638
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BEGIN_FTR_SECTION
1639 1640
	bl	kvmppc_save_tm
END_FTR_SECTION_IFSET(CPU_FTR_TM)
1641 1642
#endif

1643 1644 1645 1646
	/* Increment yield count if they have a VPA */
	ld	r8, VCPU_VPA(r9)	/* do they have a VPA? */
	cmpdi	r8, 0
	beq	25f
1647 1648
	li	r4, LPPACA_YIELDCOUNT
	LWZX_BE	r3, r8, r4
1649
	addi	r3, r3, 1
1650
	STWX_BE	r3, r8, r4
1651 1652 1653 1654 1655
	li	r3, 1
	stb	r3, VCPU_VPA_DIRTY(r9)
25:
	/* Save PMU registers if requested */
	/* r8 and cr0.eq are live here */
1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679
BEGIN_FTR_SECTION
	/*
	 * POWER8 seems to have a hardware bug where setting
	 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
	 * when some counters are already negative doesn't seem
	 * to cause a performance monitor alert (and hence interrupt).
	 * The effect of this is that when saving the PMU state,
	 * if there is no PMU alert pending when we read MMCR0
	 * before freezing the counters, but one becomes pending
	 * before we read the counters, we lose it.
	 * To work around this, we need a way to freeze the counters
	 * before reading MMCR0.  Normally, freezing the counters
	 * is done by writing MMCR0 (to set MMCR0[FC]) which
	 * unavoidably writes MMCR0[PMA0] as well.  On POWER8,
	 * we can also freeze the counters using MMCR2, by writing
	 * 1s to all the counter freeze condition bits (there are
	 * 9 bits each for 6 counters).
	 */
	li	r3, -1			/* set all freeze bits */
	clrrdi	r3, r3, 10
	mfspr	r10, SPRN_MMCR2
	mtspr	SPRN_MMCR2, r3
	isync
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1680 1681 1682 1683 1684
	li	r3, 1
	sldi	r3, r3, 31		/* MMCR0_FC (freeze counters) bit */
	mfspr	r4, SPRN_MMCR0		/* save MMCR0 */
	mtspr	SPRN_MMCR0, r3		/* freeze all counters, disable ints */
	mfspr	r6, SPRN_MMCRA
1685
	/* Clear MMCRA in order to disable SDAR updates */
1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700
	li	r7, 0
	mtspr	SPRN_MMCRA, r7
	isync
	beq	21f			/* if no VPA, save PMU stuff anyway */
	lbz	r7, LPPACA_PMCINUSE(r8)
	cmpwi	r7, 0			/* did they ask for PMU stuff to be saved? */
	bne	21f
	std	r3, VCPU_MMCR(r9)	/* if not, set saved MMCR0 to FC */
	b	22f
21:	mfspr	r5, SPRN_MMCR1
	mfspr	r7, SPRN_SIAR
	mfspr	r8, SPRN_SDAR
	std	r4, VCPU_MMCR(r9)
	std	r5, VCPU_MMCR + 8(r9)
	std	r6, VCPU_MMCR + 16(r9)
1701 1702 1703
BEGIN_FTR_SECTION
	std	r10, VCPU_MMCR + 24(r9)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717
	std	r7, VCPU_SIAR(r9)
	std	r8, VCPU_SDAR(r9)
	mfspr	r3, SPRN_PMC1
	mfspr	r4, SPRN_PMC2
	mfspr	r5, SPRN_PMC3
	mfspr	r6, SPRN_PMC4
	mfspr	r7, SPRN_PMC5
	mfspr	r8, SPRN_PMC6
	stw	r3, VCPU_PMC(r9)
	stw	r4, VCPU_PMC + 4(r9)
	stw	r5, VCPU_PMC + 8(r9)
	stw	r6, VCPU_PMC + 12(r9)
	stw	r7, VCPU_PMC + 16(r9)
	stw	r8, VCPU_PMC + 20(r9)
1718 1719
BEGIN_FTR_SECTION
	mfspr	r5, SPRN_SIER
1720 1721
	std	r5, VCPU_SIER(r9)
BEGIN_FTR_SECTION_NESTED(96)
1722 1723 1724 1725 1726 1727 1728 1729
	mfspr	r6, SPRN_SPMC1
	mfspr	r7, SPRN_SPMC2
	mfspr	r8, SPRN_MMCRS
	stw	r6, VCPU_PMC + 24(r9)
	stw	r7, VCPU_PMC + 28(r9)
	std	r8, VCPU_MMCR + 32(r9)
	lis	r4, 0x8000
	mtspr	SPRN_MMCRS, r4
1730
END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
1731
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1732
22:
1733 1734 1735 1736 1737 1738
	/* Clear out SLB */
	li	r5,0
	slbmte	r5,r5
	slbia
	ptesync

1739
	/* Restore host values of some registers */
1740 1741 1742 1743 1744 1745 1746 1747
BEGIN_FTR_SECTION
	ld	r5, STACK_SLOT_CIABR(r1)
	ld	r6, STACK_SLOT_DAWR(r1)
	ld	r7, STACK_SLOT_DAWRX(r1)
	mtspr	SPRN_CIABR, r5
	mtspr	SPRN_DAWR, r6
	mtspr	SPRN_DAWRX, r7
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1748 1749 1750
BEGIN_FTR_SECTION
	ld	r5, STACK_SLOT_TID(r1)
	ld	r6, STACK_SLOT_PSSCR(r1)
1751
	ld	r7, STACK_SLOT_PID(r1)
1752
	ld	r8, STACK_SLOT_IAMR(r1)
1753 1754
	mtspr	SPRN_TIDR, r5
	mtspr	SPRN_PSSCR, r6
1755
	mtspr	SPRN_PID, r7
1756
	mtspr	SPRN_IAMR, r8
1757
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1758 1759 1760
BEGIN_FTR_SECTION
	PPC_INVALIDATE_ERAT
END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
1761

1762
	/*
1763
	 * POWER7/POWER8 guest -> host partition switch code.
1764 1765 1766
	 * We don't have to lock against tlbies but we do
	 * have to coordinate the hardware threads.
	 */
1767
kvmhv_switch_to_host:
1768
	/* Secondary threads wait for primary to do partition switch */
1769
	ld	r5,HSTATE_KVM_VCORE(r13)
1770 1771
	ld	r4,VCORE_KVM(r5)	/* pointer to struct kvm */
	lbz	r3,HSTATE_PTID(r13)
1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782
	cmpwi	r3,0
	beq	15f
	HMT_LOW
13:	lbz	r3,VCORE_IN_GUEST(r5)
	cmpwi	r3,0
	bne	13b
	HMT_MEDIUM
	b	16f

	/* Primary thread waits for all the secondaries to exit guest */
15:	lwz	r3,VCORE_ENTRY_EXIT(r5)
1783
	rlwinm	r0,r3,32-8,0xff
1784 1785 1786 1787 1788
	clrldi	r3,r3,56
	cmpw	r3,r0
	bne	15b
	isync

1789 1790 1791 1792 1793
	/* Did we actually switch to the guest at all? */
	lbz	r6, VCORE_IN_GUEST(r5)
	cmpwi	r6, 0
	beq	19f

1794
	/* Primary thread switches back to host partition */
1795
	lwz	r7,KVM_HOST_LPID(r4)
1796 1797
BEGIN_FTR_SECTION
	ld	r6,KVM_HOST_SDR1(r4)
1798 1799 1800
	li	r8,LPID_RSVD		/* switch to reserved LPID */
	mtspr	SPRN_LPID,r8
	ptesync
1801 1802
	mtspr	SPRN_SDR1,r6		/* switch to host page table */
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1803 1804
	mtspr	SPRN_LPID,r7
	isync
1805

1806
BEGIN_FTR_SECTION
1807
	/* DPDES and VTB are shared between threads */
1808
	mfspr	r7, SPRN_DPDES
1809
	mfspr	r8, SPRN_VTB
1810
	std	r7, VCORE_DPDES(r5)
1811
	std	r8, VCORE_VTB(r5)
1812 1813 1814 1815 1816
	/* clear DPDES so we don't get guest doorbells in the host */
	li	r8, 0
	mtspr	SPRN_DPDES, r8
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)

1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833
	/* If HMI, call kvmppc_realmode_hmi_handler() */
	cmpwi	r12, BOOK3S_INTERRUPT_HMI
	bne	27f
	bl	kvmppc_realmode_hmi_handler
	nop
	li	r12, BOOK3S_INTERRUPT_HMI
	/*
	 * At this point kvmppc_realmode_hmi_handler would have resync-ed
	 * the TB. Hence it is not required to subtract guest timebase
	 * offset from timebase. So, skip it.
	 *
	 * Also, do not call kvmppc_subcore_exit_guest() because it has
	 * been invoked as part of kvmppc_realmode_hmi_handler().
	 */
	b	30f

27:
1834 1835 1836 1837
	/* Subtract timebase offset from timebase */
	ld	r8,VCORE_TB_OFFSET(r5)
	cmpdi	r8,0
	beq	17f
1838
	mftb	r6			/* current guest timebase */
1839 1840 1841 1842 1843 1844 1845 1846 1847 1848
	subf	r8,r8,r6
	mtspr	SPRN_TBU40,r8		/* update upper 40 bits */
	mftb	r7			/* check if lower 24 bits overflowed */
	clrldi	r6,r6,40
	clrldi	r7,r7,40
	cmpld	r7,r6
	bge	17f
	addis	r8,r8,0x100		/* if so, increment upper 40 bits */
	mtspr	SPRN_TBU40,r8

1849 1850 1851 1852 1853
17:	bl	kvmppc_subcore_exit_guest
	nop
30:	ld	r5,HSTATE_KVM_VCORE(r13)
	ld	r4,VCORE_KVM(r5)	/* pointer to struct kvm */

1854
	/* Reset PCR */
1855
	ld	r0, VCORE_PCR(r5)
1856 1857 1858 1859 1860
	cmpdi	r0, 0
	beq	18f
	li	r0, 0
	mtspr	SPRN_PCR, r0
18:
1861
	/* Signal secondary CPUs to continue */
1862
	stb	r0,VCORE_IN_GUEST(r5)
1863
19:	lis	r8,0x7fff		/* MAX_INT@h */
1864 1865
	mtspr	SPRN_HDEC,r8

1866
16:	ld	r8,KVM_HOST_LPCR(r4)
1867 1868 1869 1870
	mtspr	SPRN_LPCR,r8
	isync

	/* load host SLB entries */
1871 1872 1873
BEGIN_MMU_FTR_SECTION
	b	0f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
1874
	ld	r8,PACA_SLBSHADOWPTR(r13)
1875 1876

	.rept	SLB_NUM_BOLTED
1877 1878 1879 1880
	li	r3, SLBSHADOW_SAVEAREA
	LDX_BE	r5, r8, r3
	addi	r3, r3, 8
	LDX_BE	r6, r8, r3
1881 1882 1883 1884 1885
	andis.	r7,r5,SLB_ESID_V@h
	beq	1f
	slbmte	r6,r5
1:	addi	r8,r8,16
	.endr
1886
0:
1887 1888 1889 1890 1891 1892 1893 1894 1895
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
	/* Finish timing, if we have a vcpu */
	ld	r4, HSTATE_KVM_VCPU(r13)
	cmpdi	r4, 0
	li	r3, 0
	beq	2f
	bl	kvmhv_accumulate_time
2:
#endif
1896 1897 1898 1899
	/* Unset guest mode */
	li	r0, KVM_GUEST_MODE_NONE
	stb	r0, HSTATE_IN_GUEST(r13)

1900 1901
	ld	r0, SFS+PPC_LR_STKOFF(r1)
	addi	r1, r1, SFS
1902 1903
	mtlr	r0
	blr
1904

1905 1906 1907 1908 1909 1910 1911 1912
/*
 * Check whether an HDSI is an HPTE not found fault or something else.
 * If it is an HPTE not found fault that is due to the guest accessing
 * a page that they have mapped but which we have paged out, then
 * we continue on with the guest exit path.  In all other cases,
 * reflect the HDSI to the guest as a DSI.
 */
kvmppc_hdsi:
1913 1914 1915
	ld	r3, VCPU_KVM(r9)
	lbz	r0, KVM_RADIX(r3)
	cmpwi	r0, 0
1916 1917
	mfspr	r4, SPRN_HDAR
	mfspr	r6, SPRN_HDSISR
1918
	bne	.Lradix_hdsi		/* on radix, just save DAR/DSISR/ASDR */
1919 1920
	/* HPTE not found fault or protection fault? */
	andis.	r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
1921
	beq	1f			/* if not, send it to the guest */
1922 1923
	andi.	r0, r11, MSR_DR		/* data relocation enabled? */
	beq	3f
1924 1925 1926 1927
BEGIN_FTR_SECTION
	mfspr	r5, SPRN_ASDR		/* on POWER9, use ASDR to get VSID */
	b	4f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1928
	clrrdi	r0, r4, 28
1929
	PPC_SLBFEE_DOT(R5, R0)		/* if so, look up SLB */
1930 1931
	li	r0, BOOK3S_INTERRUPT_DATA_SEGMENT
	bne	7f			/* if no SLB entry found */
1932 1933 1934 1935 1936
4:	std	r4, VCPU_FAULT_DAR(r9)
	stw	r6, VCPU_FAULT_DSISR(r9)

	/* Search the hash table. */
	mr	r3, r9			/* vcpu pointer */
1937
	li	r7, 1			/* data fault */
1938
	bl	kvmppc_hpte_hv_fault
1939 1940 1941 1942 1943 1944 1945
	ld	r9, HSTATE_KVM_VCPU(r13)
	ld	r10, VCPU_PC(r9)
	ld	r11, VCPU_MSR(r9)
	li	r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
	cmpdi	r3, 0			/* retry the instruction */
	beq	6f
	cmpdi	r3, -1			/* handle in kernel mode */
1946
	beq	guest_exit_cont
1947 1948 1949
	cmpdi	r3, -2			/* MMIO emulation; need instr word */
	beq	2f

1950
	/* Synthesize a DSI (or DSegI) for the guest */
1951 1952
	ld	r4, VCPU_FAULT_DAR(r9)
	mr	r6, r3
1953
1:	li	r0, BOOK3S_INTERRUPT_DATA_STORAGE
1954
	mtspr	SPRN_DSISR, r6
1955
7:	mtspr	SPRN_DAR, r4
1956 1957
	mtspr	SPRN_SRR0, r10
	mtspr	SPRN_SRR1, r11
1958
	mr	r10, r0
1959
	bl	kvmppc_msr_interrupt
1960
fast_interrupt_c_return:
1961
6:	ld	r7, VCPU_CTR(r9)
1962
	ld	r8, VCPU_XER(r9)
1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990
	mtctr	r7
	mtxer	r8
	mr	r4, r9
	b	fast_guest_return

3:	ld	r5, VCPU_KVM(r9)	/* not relocated, use VRMA */
	ld	r5, KVM_VRMA_SLB_V(r5)
	b	4b

	/* If this is for emulated MMIO, load the instruction word */
2:	li	r8, KVM_INST_FETCH_FAILED	/* In case lwz faults */

	/* Set guest mode to 'jump over instruction' so if lwz faults
	 * we'll just continue at the next IP. */
	li	r0, KVM_GUEST_MODE_SKIP
	stb	r0, HSTATE_IN_GUEST(r13)

	/* Do the access with MSR:DR enabled */
	mfmsr	r3
	ori	r4, r3, MSR_DR		/* Enable paging for data */
	mtmsrd	r4
	lwz	r8, 0(r10)
	mtmsrd	r3

	/* Store the result */
	stw	r8, VCPU_LAST_INST(r9)

	/* Unset guest mode. */
1991
	li	r0, KVM_GUEST_MODE_HOST_HV
1992
	stb	r0, HSTATE_IN_GUEST(r13)
1993
	b	guest_exit_cont
1994

1995 1996 1997 1998 1999 2000 2001 2002
.Lradix_hdsi:
	std	r4, VCPU_FAULT_DAR(r9)
	stw	r6, VCPU_FAULT_DSISR(r9)
.Lradix_hisi:
	mfspr	r5, SPRN_ASDR
	std	r5, VCPU_FAULT_GPA(r9)
	b	guest_exit_cont

2003 2004 2005 2006 2007
/*
 * Similarly for an HISI, reflect it to the guest as an ISI unless
 * it is an HPTE not found fault for a page that we have paged out.
 */
kvmppc_hisi:
2008 2009 2010 2011
	ld	r3, VCPU_KVM(r9)
	lbz	r0, KVM_RADIX(r3)
	cmpwi	r0, 0
	bne	.Lradix_hisi		/* for radix, just save ASDR */
2012 2013
	andis.	r0, r11, SRR1_ISI_NOPT@h
	beq	1f
2014 2015
	andi.	r0, r11, MSR_IR		/* instruction relocation enabled? */
	beq	3f
2016 2017 2018 2019
BEGIN_FTR_SECTION
	mfspr	r5, SPRN_ASDR		/* on POWER9, use ASDR to get VSID */
	b	4f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2020
	clrrdi	r0, r10, 28
2021
	PPC_SLBFEE_DOT(R5, R0)		/* if so, look up SLB */
2022 2023
	li	r0, BOOK3S_INTERRUPT_INST_SEGMENT
	bne	7f			/* if no SLB entry found */
2024 2025 2026 2027 2028 2029
4:
	/* Search the hash table. */
	mr	r3, r9			/* vcpu pointer */
	mr	r4, r10
	mr	r6, r11
	li	r7, 0			/* instruction fault */
2030
	bl	kvmppc_hpte_hv_fault
2031 2032 2033 2034 2035
	ld	r9, HSTATE_KVM_VCPU(r13)
	ld	r10, VCPU_PC(r9)
	ld	r11, VCPU_MSR(r9)
	li	r12, BOOK3S_INTERRUPT_H_INST_STORAGE
	cmpdi	r3, 0			/* retry the instruction */
2036
	beq	fast_interrupt_c_return
2037
	cmpdi	r3, -1			/* handle in kernel mode */
2038
	beq	guest_exit_cont
2039

2040
	/* Synthesize an ISI (or ISegI) for the guest */
2041
	mr	r11, r3
2042 2043
1:	li	r0, BOOK3S_INTERRUPT_INST_STORAGE
7:	mtspr	SPRN_SRR0, r10
2044
	mtspr	SPRN_SRR1, r11
2045
	mr	r10, r0
2046
	bl	kvmppc_msr_interrupt
2047
	b	fast_interrupt_c_return
2048 2049 2050 2051 2052

3:	ld	r6, VCPU_KVM(r9)	/* not relocated, use VRMA */
	ld	r5, KVM_VRMA_SLB_V(r6)
	b	4b

2053 2054 2055 2056 2057
/*
 * Try to handle an hcall in real mode.
 * Returns to the guest if we handle it, or continues on up to
 * the kernel if we can't (i.e. if we don't have a handler for
 * it, or if the handler returns H_TOO_HARD).
2058 2059 2060
 *
 * r5 - r8 contain hcall args,
 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
2061 2062
 */
hcall_try_real_mode:
2063
	ld	r3,VCPU_GPR(R3)(r9)
2064
	andi.	r0,r11,MSR_PR
2065 2066
	/* sc 1 from userspace - reflect to guest syscall */
	bne	sc_1_fast_return
2067 2068
	clrrdi	r3,r3,2
	cmpldi	r3,hcall_real_table_end - hcall_real_table
2069
	bge	guest_exit_cont
2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080
	/* See if this hcall is enabled for in-kernel handling */
	ld	r4, VCPU_KVM(r9)
	srdi	r0, r3, 8	/* r0 = (r3 / 4) >> 6 */
	sldi	r0, r0, 3	/* index into kvm->arch.enabled_hcalls[] */
	add	r4, r4, r0
	ld	r0, KVM_ENABLED_HCALLS(r4)
	rlwinm	r4, r3, 32-2, 0x3f	/* r4 = (r3 / 4) & 0x3f */
	srd	r0, r0, r4
	andi.	r0, r0, 1
	beq	guest_exit_cont
	/* Get pointer to handler, if any, and call it */
2081
	LOAD_REG_ADDR(r4, hcall_real_table)
2082
	lwax	r3,r3,r4
2083
	cmpwi	r3,0
2084
	beq	guest_exit_cont
2085 2086
	add	r12,r3,r4
	mtctr	r12
2087
	mr	r3,r9		/* get vcpu pointer */
2088
	ld	r4,VCPU_GPR(R4)(r9)
2089 2090 2091 2092
	bctrl
	cmpdi	r3,H_TOO_HARD
	beq	hcall_real_fallback
	ld	r4,HSTATE_KVM_VCPU(r13)
2093
	std	r3,VCPU_GPR(R3)(r4)
2094 2095 2096 2097
	ld	r10,VCPU_PC(r4)
	ld	r11,VCPU_MSR(r4)
	b	fast_guest_return

2098 2099 2100 2101
sc_1_fast_return:
	mtspr	SPRN_SRR0,r10
	mtspr	SPRN_SRR1,r11
	li	r10, BOOK3S_INTERRUPT_SYSCALL
2102
	bl	kvmppc_msr_interrupt
2103 2104 2105
	mr	r4,r9
	b	fast_guest_return

2106 2107 2108 2109 2110 2111 2112
	/* We've attempted a real mode hcall, but it's punted it back
	 * to userspace.  We need to restore some clobbered volatiles
	 * before resuming the pass-it-to-qemu path */
hcall_real_fallback:
	li	r12,BOOK3S_INTERRUPT_SYSCALL
	ld	r9, HSTATE_KVM_VCPU(r13)

2113
	b	guest_exit_cont
2114 2115 2116 2117

	.globl	hcall_real_table
hcall_real_table:
	.long	0		/* 0 - unused */
2118 2119 2120
	.long	DOTSYM(kvmppc_h_remove) - hcall_real_table
	.long	DOTSYM(kvmppc_h_enter) - hcall_real_table
	.long	DOTSYM(kvmppc_h_read) - hcall_real_table
2121 2122
	.long	DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
	.long	DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
2123 2124
	.long	DOTSYM(kvmppc_h_protect) - hcall_real_table
	.long	DOTSYM(kvmppc_h_get_tce) - hcall_real_table
2125
	.long	DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
2126
	.long	0		/* 0x24 - H_SET_SPRG0 */
2127
	.long	DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141
	.long	0		/* 0x2c */
	.long	0		/* 0x30 */
	.long	0		/* 0x34 */
	.long	0		/* 0x38 */
	.long	0		/* 0x3c */
	.long	0		/* 0x40 */
	.long	0		/* 0x44 */
	.long	0		/* 0x48 */
	.long	0		/* 0x4c */
	.long	0		/* 0x50 */
	.long	0		/* 0x54 */
	.long	0		/* 0x58 */
	.long	0		/* 0x5c */
	.long	0		/* 0x60 */
2142
#ifdef CONFIG_KVM_XICS
2143 2144 2145
	.long	DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
	.long	DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
	.long	DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
2146
	.long	DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
2147
	.long	DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
2148 2149 2150 2151 2152 2153 2154
#else
	.long	0		/* 0x64 - H_EOI */
	.long	0		/* 0x68 - H_CPPR */
	.long	0		/* 0x6c - H_IPI */
	.long	0		/* 0x70 - H_IPOLL */
	.long	0		/* 0x74 - H_XIRR */
#endif
2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180
	.long	0		/* 0x78 */
	.long	0		/* 0x7c */
	.long	0		/* 0x80 */
	.long	0		/* 0x84 */
	.long	0		/* 0x88 */
	.long	0		/* 0x8c */
	.long	0		/* 0x90 */
	.long	0		/* 0x94 */
	.long	0		/* 0x98 */
	.long	0		/* 0x9c */
	.long	0		/* 0xa0 */
	.long	0		/* 0xa4 */
	.long	0		/* 0xa8 */
	.long	0		/* 0xac */
	.long	0		/* 0xb0 */
	.long	0		/* 0xb4 */
	.long	0		/* 0xb8 */
	.long	0		/* 0xbc */
	.long	0		/* 0xc0 */
	.long	0		/* 0xc4 */
	.long	0		/* 0xc8 */
	.long	0		/* 0xcc */
	.long	0		/* 0xd0 */
	.long	0		/* 0xd4 */
	.long	0		/* 0xd8 */
	.long	0		/* 0xdc */
2181
	.long	DOTSYM(kvmppc_h_cede) - hcall_real_table
2182
	.long	DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197
	.long	0		/* 0xe8 */
	.long	0		/* 0xec */
	.long	0		/* 0xf0 */
	.long	0		/* 0xf4 */
	.long	0		/* 0xf8 */
	.long	0		/* 0xfc */
	.long	0		/* 0x100 */
	.long	0		/* 0x104 */
	.long	0		/* 0x108 */
	.long	0		/* 0x10c */
	.long	0		/* 0x110 */
	.long	0		/* 0x114 */
	.long	0		/* 0x118 */
	.long	0		/* 0x11c */
	.long	0		/* 0x120 */
2198
	.long	DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
2199 2200 2201
	.long	0		/* 0x128 */
	.long	0		/* 0x12c */
	.long	0		/* 0x130 */
2202
	.long	DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
2203
	.long	DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
2204
	.long	DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315
	.long	0		/* 0x140 */
	.long	0		/* 0x144 */
	.long	0		/* 0x148 */
	.long	0		/* 0x14c */
	.long	0		/* 0x150 */
	.long	0		/* 0x154 */
	.long	0		/* 0x158 */
	.long	0		/* 0x15c */
	.long	0		/* 0x160 */
	.long	0		/* 0x164 */
	.long	0		/* 0x168 */
	.long	0		/* 0x16c */
	.long	0		/* 0x170 */
	.long	0		/* 0x174 */
	.long	0		/* 0x178 */
	.long	0		/* 0x17c */
	.long	0		/* 0x180 */
	.long	0		/* 0x184 */
	.long	0		/* 0x188 */
	.long	0		/* 0x18c */
	.long	0		/* 0x190 */
	.long	0		/* 0x194 */
	.long	0		/* 0x198 */
	.long	0		/* 0x19c */
	.long	0		/* 0x1a0 */
	.long	0		/* 0x1a4 */
	.long	0		/* 0x1a8 */
	.long	0		/* 0x1ac */
	.long	0		/* 0x1b0 */
	.long	0		/* 0x1b4 */
	.long	0		/* 0x1b8 */
	.long	0		/* 0x1bc */
	.long	0		/* 0x1c0 */
	.long	0		/* 0x1c4 */
	.long	0		/* 0x1c8 */
	.long	0		/* 0x1cc */
	.long	0		/* 0x1d0 */
	.long	0		/* 0x1d4 */
	.long	0		/* 0x1d8 */
	.long	0		/* 0x1dc */
	.long	0		/* 0x1e0 */
	.long	0		/* 0x1e4 */
	.long	0		/* 0x1e8 */
	.long	0		/* 0x1ec */
	.long	0		/* 0x1f0 */
	.long	0		/* 0x1f4 */
	.long	0		/* 0x1f8 */
	.long	0		/* 0x1fc */
	.long	0		/* 0x200 */
	.long	0		/* 0x204 */
	.long	0		/* 0x208 */
	.long	0		/* 0x20c */
	.long	0		/* 0x210 */
	.long	0		/* 0x214 */
	.long	0		/* 0x218 */
	.long	0		/* 0x21c */
	.long	0		/* 0x220 */
	.long	0		/* 0x224 */
	.long	0		/* 0x228 */
	.long	0		/* 0x22c */
	.long	0		/* 0x230 */
	.long	0		/* 0x234 */
	.long	0		/* 0x238 */
	.long	0		/* 0x23c */
	.long	0		/* 0x240 */
	.long	0		/* 0x244 */
	.long	0		/* 0x248 */
	.long	0		/* 0x24c */
	.long	0		/* 0x250 */
	.long	0		/* 0x254 */
	.long	0		/* 0x258 */
	.long	0		/* 0x25c */
	.long	0		/* 0x260 */
	.long	0		/* 0x264 */
	.long	0		/* 0x268 */
	.long	0		/* 0x26c */
	.long	0		/* 0x270 */
	.long	0		/* 0x274 */
	.long	0		/* 0x278 */
	.long	0		/* 0x27c */
	.long	0		/* 0x280 */
	.long	0		/* 0x284 */
	.long	0		/* 0x288 */
	.long	0		/* 0x28c */
	.long	0		/* 0x290 */
	.long	0		/* 0x294 */
	.long	0		/* 0x298 */
	.long	0		/* 0x29c */
	.long	0		/* 0x2a0 */
	.long	0		/* 0x2a4 */
	.long	0		/* 0x2a8 */
	.long	0		/* 0x2ac */
	.long	0		/* 0x2b0 */
	.long	0		/* 0x2b4 */
	.long	0		/* 0x2b8 */
	.long	0		/* 0x2bc */
	.long	0		/* 0x2c0 */
	.long	0		/* 0x2c4 */
	.long	0		/* 0x2c8 */
	.long	0		/* 0x2cc */
	.long	0		/* 0x2d0 */
	.long	0		/* 0x2d4 */
	.long	0		/* 0x2d8 */
	.long	0		/* 0x2dc */
	.long	0		/* 0x2e0 */
	.long	0		/* 0x2e4 */
	.long	0		/* 0x2e8 */
	.long	0		/* 0x2ec */
	.long	0		/* 0x2f0 */
	.long	0		/* 0x2f4 */
	.long	0		/* 0x2f8 */
2316 2317 2318 2319 2320
#ifdef CONFIG_KVM_XICS
	.long	DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
#else
	.long	0		/* 0x2fc - H_XIRR_X*/
#endif
2321
	.long	DOTSYM(kvmppc_h_random) - hcall_real_table
2322
	.globl	hcall_real_table_end
2323 2324
hcall_real_table_end:

2325 2326 2327 2328 2329 2330 2331 2332 2333
_GLOBAL(kvmppc_h_set_xdabr)
	andi.	r0, r5, DABRX_USER | DABRX_KERNEL
	beq	6f
	li	r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
	andc.	r0, r5, r0
	beq	3f
6:	li	r3, H_PARAMETER
	blr

2334
_GLOBAL(kvmppc_h_set_dabr)
2335 2336
	li	r5, DABRX_USER | DABRX_KERNEL
3:
2337 2338 2339
BEGIN_FTR_SECTION
	b	2f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2340
	std	r4,VCPU_DABR(r3)
2341 2342
	stw	r5, VCPU_DABRX(r3)
	mtspr	SPRN_DABRX, r5
2343 2344 2345 2346 2347 2348
	/* Work around P7 bug where DABR can get corrupted on mtspr */
1:	mtspr	SPRN_DABR,r4
	mfspr	r5, SPRN_DABR
	cmpd	r4, r5
	bne	1b
	isync
2349 2350 2351
	li	r3,0
	blr

2352 2353
	/* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
2:	rlwimi	r5, r4, 5, DAWRX_DR | DAWRX_DW
2354
	rlwimi	r5, r4, 2, DAWRX_WT
2355 2356 2357 2358 2359 2360
	clrrdi	r4, r4, 3
	std	r4, VCPU_DAWR(r3)
	std	r5, VCPU_DAWRX(r3)
	mtspr	SPRN_DAWR, r4
	mtspr	SPRN_DAWRX, r5
	li	r3, 0
2361 2362
	blr

2363
_GLOBAL(kvmppc_h_cede)		/* r3 = vcpu pointer, r11 = msr, r13 = paca */
2364 2365 2366 2367 2368 2369 2370
	ori	r11,r11,MSR_EE
	std	r11,VCPU_MSR(r3)
	li	r0,1
	stb	r0,VCPU_CEDED(r3)
	sync			/* order setting ceded vs. testing prodded */
	lbz	r5,VCPU_PRODDED(r3)
	cmpwi	r5,0
2371
	bne	kvm_cede_prodded
2372 2373
	li	r12,0		/* set trap to 0 to say hcall is handled */
	stw	r12,VCPU_TRAP(r3)
2374
	li	r0,H_SUCCESS
2375
	std	r0,VCPU_GPR(R3)(r3)
2376 2377 2378 2379 2380 2381 2382

	/*
	 * Set our bit in the bitmask of napping threads unless all the
	 * other threads are already napping, in which case we send this
	 * up to the host.
	 */
	ld	r5,HSTATE_KVM_VCORE(r13)
2383
	lbz	r6,HSTATE_PTID(r13)
2384 2385 2386 2387 2388 2389 2390
	lwz	r8,VCORE_ENTRY_EXIT(r5)
	clrldi	r8,r8,56
	li	r0,1
	sld	r0,r0,r6
	addi	r6,r5,VCORE_NAPPING_THREADS
31:	lwarx	r4,0,r6
	or	r4,r4,r0
2391 2392
	cmpw	r4,r8
	beq	kvm_cede_exit
2393 2394
	stwcx.	r4,0,r6
	bne	31b
2395
	/* order napping_threads update vs testing entry_exit_map */
2396
	isync
2397
	li	r0,NAPPING_CEDE
2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409
	stb	r0,HSTATE_NAPPING(r13)
	lwz	r7,VCORE_ENTRY_EXIT(r5)
	cmpwi	r7,0x100
	bge	33f		/* another thread already exiting */

/*
 * Although not specifically required by the architecture, POWER7
 * preserves the following registers in nap mode, even if an SMT mode
 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
 */
	/* Save non-volatile GPRs */
2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427
	std	r14, VCPU_GPR(R14)(r3)
	std	r15, VCPU_GPR(R15)(r3)
	std	r16, VCPU_GPR(R16)(r3)
	std	r17, VCPU_GPR(R17)(r3)
	std	r18, VCPU_GPR(R18)(r3)
	std	r19, VCPU_GPR(R19)(r3)
	std	r20, VCPU_GPR(R20)(r3)
	std	r21, VCPU_GPR(R21)(r3)
	std	r22, VCPU_GPR(R22)(r3)
	std	r23, VCPU_GPR(R23)(r3)
	std	r24, VCPU_GPR(R24)(r3)
	std	r25, VCPU_GPR(R25)(r3)
	std	r26, VCPU_GPR(R26)(r3)
	std	r27, VCPU_GPR(R27)(r3)
	std	r28, VCPU_GPR(R28)(r3)
	std	r29, VCPU_GPR(R29)(r3)
	std	r30, VCPU_GPR(R30)(r3)
	std	r31, VCPU_GPR(R31)(r3)
2428 2429

	/* save FP state */
2430
	bl	kvmppc_save_fp
2431

2432 2433 2434 2435 2436 2437 2438
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BEGIN_FTR_SECTION
	ld	r9, HSTATE_KVM_VCPU(r13)
	bl	kvmppc_save_tm
END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif

2439 2440 2441 2442 2443 2444 2445 2446
	/*
	 * Set DEC to the smaller of DEC and HDEC, so that we wake
	 * no later than the end of our timeslice (HDEC interrupts
	 * don't wake us from nap).
	 */
	mfspr	r3, SPRN_DEC
	mfspr	r4, SPRN_HDEC
	mftb	r5
2447 2448 2449 2450 2451 2452 2453
BEGIN_FTR_SECTION
	/* On P9 check whether the guest has large decrementer mode enabled */
	ld	r6, HSTATE_KVM_VCORE(r13)
	ld	r6, VCORE_LPCR(r6)
	andis.	r6, r6, LPCR_LD@h
	bne	68f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2454
	extsw	r3, r3
2455
68:	EXTEND_HDEC(r4)
2456
	cmpd	r3, r4
2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467
	ble	67f
	mtspr	SPRN_DEC, r4
67:
	/* save expiry time of guest decrementer */
	add	r3, r3, r5
	ld	r4, HSTATE_KVM_VCPU(r13)
	ld	r5, HSTATE_KVM_VCORE(r13)
	ld	r6, VCORE_TB_OFFSET(r5)
	subf	r3, r6, r3	/* convert to host TB value */
	std	r3, VCPU_DEC_EXPIRES(r4)

2468 2469 2470 2471 2472 2473
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
	ld	r4, HSTATE_KVM_VCPU(r13)
	addi	r3, r4, VCPU_TB_CEDE
	bl	kvmhv_accumulate_time
#endif

2474 2475
	lis	r3, LPCR_PECEDP@h	/* Do wake on privileged doorbell */

2476
	/*
2477
	 * Take a nap until a decrementer or external or doobell interrupt
2478
	 * occurs, with PECE1 and PECE0 set in LPCR.
2479
	 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
2480
	 * Also clear the runlatch bit before napping.
2481
	 */
2482
kvm_do_nap:
2483 2484 2485
	mfspr	r0, SPRN_CTRLF
	clrrdi	r0, r0, 1
	mtspr	SPRN_CTRLT, r0
2486

2487 2488
	li	r0,1
	stb	r0,HSTATE_HWTHREAD_REQ(r13)
2489 2490
	mfspr	r5,SPRN_LPCR
	ori	r5,r5,LPCR_PECE0 | LPCR_PECE1
2491
BEGIN_FTR_SECTION
2492
	ori	r5, r5, LPCR_PECEDH
2493
	rlwimi	r5, r3, 0, LPCR_PECEDP
2494
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509

kvm_nap_sequence:		/* desired LPCR value in r5 */
BEGIN_FTR_SECTION
	/*
	 * PSSCR bits:	exit criterion = 1 (wakeup based on LPCR at sreset)
	 *		enable state loss = 1 (allow SMT mode switch)
	 *		requested level = 0 (just stop dispatching)
	 */
	lis	r3, (PSSCR_EC | PSSCR_ESL)@h
	mtspr	SPRN_PSSCR, r3
	/* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
	li	r4, LPCR_PECE_HVEE@higher
	sldi	r4, r4, 32
	or	r5, r5, r4
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2510 2511 2512 2513 2514 2515 2516 2517
	mtspr	SPRN_LPCR,r5
	isync
	li	r0, 0
	std	r0, HSTATE_SCRATCH0(r13)
	ptesync
	ld	r0, HSTATE_SCRATCH0(r13)
1:	cmpd	r0, r0
	bne	1b
2518
BEGIN_FTR_SECTION
2519
	nap
2520 2521 2522
FTR_SECTION_ELSE
	PPC_STOP
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
2523 2524
	b	.

2525 2526 2527 2528 2529
33:	mr	r4, r3
	li	r3, 0
	li	r12, 0
	b	34f

2530
kvm_end_cede:
2531 2532 2533
	/* get vcpu pointer */
	ld	r4, HSTATE_KVM_VCPU(r13)

2534 2535 2536
	/* Woken by external or decrementer interrupt */
	ld	r1, HSTATE_HOST_R1(r13)

2537 2538 2539 2540 2541
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
	addi	r3, r4, VCPU_TB_RMINTR
	bl	kvmhv_accumulate_time
#endif

2542 2543 2544 2545 2546 2547
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BEGIN_FTR_SECTION
	bl	kvmppc_restore_tm
END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif

2548 2549 2550
	/* load up FP state */
	bl	kvmppc_load_fp

2551 2552 2553 2554 2555 2556 2557 2558 2559
	/* Restore guest decrementer */
	ld	r3, VCPU_DEC_EXPIRES(r4)
	ld	r5, HSTATE_KVM_VCORE(r13)
	ld	r6, VCORE_TB_OFFSET(r5)
	add	r3, r3, r6	/* convert host TB to guest TB value */
	mftb	r7
	subf	r3, r7, r3
	mtspr	SPRN_DEC, r3

2560
	/* Load NV GPRS */
2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578
	ld	r14, VCPU_GPR(R14)(r4)
	ld	r15, VCPU_GPR(R15)(r4)
	ld	r16, VCPU_GPR(R16)(r4)
	ld	r17, VCPU_GPR(R17)(r4)
	ld	r18, VCPU_GPR(R18)(r4)
	ld	r19, VCPU_GPR(R19)(r4)
	ld	r20, VCPU_GPR(R20)(r4)
	ld	r21, VCPU_GPR(R21)(r4)
	ld	r22, VCPU_GPR(R22)(r4)
	ld	r23, VCPU_GPR(R23)(r4)
	ld	r24, VCPU_GPR(R24)(r4)
	ld	r25, VCPU_GPR(R25)(r4)
	ld	r26, VCPU_GPR(R26)(r4)
	ld	r27, VCPU_GPR(R27)(r4)
	ld	r28, VCPU_GPR(R28)(r4)
	ld	r29, VCPU_GPR(R29)(r4)
	ld	r30, VCPU_GPR(R30)(r4)
	ld	r31, VCPU_GPR(R31)(r4)
2579

2580 2581
	/* Check the wake reason in SRR1 to see why we got here */
	bl	kvmppc_check_wake_reason
2582

2583 2584 2585 2586 2587 2588 2589 2590 2591 2592
	/*
	 * Restore volatile registers since we could have called a
	 * C routine in kvmppc_check_wake_reason
	 *	r4 = VCPU
	 * r3 tells us whether we need to return to host or not
	 * WARNING: it gets checked further down:
	 * should not modify r3 until this check is done.
	 */
	ld	r4, HSTATE_KVM_VCPU(r13)

2593
	/* clear our bit in vcore->napping_threads */
2594 2595
34:	ld	r5,HSTATE_KVM_VCORE(r13)
	lbz	r7,HSTATE_PTID(r13)
2596
	li	r0,1
2597
	sld	r0,r0,r7
2598 2599 2600 2601 2602 2603 2604 2605
	addi	r6,r5,VCORE_NAPPING_THREADS
32:	lwarx	r7,0,r6
	andc	r7,r7,r0
	stwcx.	r7,0,r6
	bne	32b
	li	r0,0
	stb	r0,HSTATE_NAPPING(r13)

2606
	/* See if the wake reason saved in r3 means we need to exit */
2607
	stw	r12, VCPU_TRAP(r4)
2608
	mr	r9, r4
2609 2610
	cmpdi	r3, 0
	bgt	guest_exit_cont
2611

2612 2613 2614
	/* see if any other thread is already exiting */
	lwz	r0,VCORE_ENTRY_EXIT(r5)
	cmpwi	r0,0x100
2615
	bge	guest_exit_cont
2616

2617
	b	kvmppc_cede_reentry	/* if not go back to guest */
2618 2619

	/* cede when already previously prodded case */
2620 2621
kvm_cede_prodded:
	li	r0,0
2622 2623 2624 2625 2626 2627 2628
	stb	r0,VCPU_PRODDED(r3)
	sync			/* order testing prodded vs. clearing ceded */
	stb	r0,VCPU_CEDED(r3)
	li	r3,H_SUCCESS
	blr

	/* we've ceded but we want to give control to the host */
2629
kvm_cede_exit:
2630 2631
	ld	r9, HSTATE_KVM_VCPU(r13)
	b	guest_exit_cont
2632

2633 2634 2635
	/* Try to handle a machine check in real mode */
machine_check_realmode:
	mr	r3, r9		/* get vcpu pointer */
2636
	bl	kvmppc_realmode_machine_check
2637 2638 2639
	nop
	ld	r9, HSTATE_KVM_VCPU(r13)
	li	r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2640
	/*
2641 2642 2643 2644 2645 2646
	 * For the guest that is FWNMI capable, deliver all the MCE errors
	 * (handled/unhandled) by exiting the guest with KVM_EXIT_NMI exit
	 * reason. This new approach injects machine check errors in guest
	 * address space to guest with additional information in the form
	 * of RTAS event, thus enabling guest kernel to suitably handle
	 * such errors.
2647
	 *
2648 2649 2650 2651 2652 2653
	 * For the guest that is not FWNMI capable (old QEMU) fallback
	 * to old behaviour for backward compatibility:
	 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest either
	 * through machine check interrupt (set HSRR0 to 0x200).
	 * For handled errors (no-fatal), just go back to guest execution
	 * with current HSRR0.
2654 2655
	 * if we receive machine check with MSR(RI=0) then deliver it to
	 * guest as machine check causing guest to crash.
2656 2657
	 */
	ld	r11, VCPU_MSR(r9)
2658 2659
	rldicl.	r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
	bne	mc_cont			/* if so, exit to host */
2660 2661 2662 2663 2664 2665 2666
	/* Check if guest is capable of handling NMI exit */
	ld	r10, VCPU_KVM(r9)
	lbz	r10, KVM_FWNMI(r10)
	cmpdi	r10, 1			/* FWNMI capable? */
	beq	mc_cont			/* if so, exit with KVM_EXIT_NMI. */

	/* if not, fall through for backward compatibility. */
2667 2668 2669 2670
	andi.	r10, r11, MSR_RI	/* check for unrecoverable exception */
	beq	1f			/* Deliver a machine check to guest */
	ld	r10, VCPU_PC(r9)
	cmpdi	r3, 0		/* Did we handle MCE ? */
2671
	bne	2f	/* Continue guest execution. */
2672
	/* If not, deliver a machine check.  SRR0/1 are already set */
2673
1:	li	r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2674
	bl	kvmppc_msr_interrupt
2675
2:	b	fast_interrupt_c_return
2676

2677 2678
/*
 * Check the reason we woke from nap, and take appropriate action.
2679
 * Returns (in r3):
2680 2681
 *	0 if nothing needs to be done
 *	1 if something happened that needs to be handled by the host
2682
 *	-1 if there was a guest wakeup (IPI or msgsnd)
2683 2684
 *	-2 if we handled a PCI passthrough interrupt (returned by
 *		kvmppc_read_intr only)
2685 2686 2687
 *
 * Also sets r12 to the interrupt vector for any interrupt that needs
 * to be handled now by the host (0x500 for external interrupt), or zero.
2688 2689 2690
 * Modifies all volatile registers (since it may call a C function).
 * This routine calls kvmppc_read_intr, a C function, if an external
 * interrupt is pending.
2691 2692 2693
 */
kvmppc_check_wake_reason:
	mfspr	r6, SPRN_SRR1
2694 2695 2696 2697 2698 2699
BEGIN_FTR_SECTION
	rlwinm	r6, r6, 45-31, 0xf	/* extract wake reason field (P8) */
FTR_SECTION_ELSE
	rlwinm	r6, r6, 45-31, 0xe	/* P7 wake reason field is 3 bits */
ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
	cmpwi	r6, 8			/* was it an external interrupt? */
2700
	beq	7f			/* if so, see what it was */
2701 2702 2703 2704
	li	r3, 0
	li	r12, 0
	cmpwi	r6, 6			/* was it the decrementer? */
	beq	0f
2705 2706 2707
BEGIN_FTR_SECTION
	cmpwi	r6, 5			/* privileged doorbell? */
	beq	0f
2708 2709
	cmpwi	r6, 3			/* hypervisor doorbell? */
	beq	3f
2710
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2711 2712
	cmpwi	r6, 0xa			/* Hypervisor maintenance ? */
	beq	4f
2713 2714 2715
	li	r3, 1			/* anything else, return 1 */
0:	blr

2716 2717
	/* hypervisor doorbell */
3:	li	r12, BOOK3S_INTERRUPT_H_DOORBELL
2718 2719 2720 2721 2722 2723 2724

	/*
	 * Clear the doorbell as we will invoke the handler
	 * explicitly in the guest exit path.
	 */
	lis	r6, (PPC_DBELL_SERVER << (63-36))@h
	PPC_MSGCLR(6)
2725
	/* see if it's a host IPI */
2726
	li	r3, 1
2727 2728 2729
	lbz	r0, HSTATE_HOST_IPI(r13)
	cmpwi	r0, 0
	bnelr
2730
	/* if not, return -1 */
2731
	li	r3, -1
2732 2733
	blr

2734 2735 2736 2737 2738
	/* Woken up due to Hypervisor maintenance interrupt */
4:	li	r12, BOOK3S_INTERRUPT_HMI
	li	r3, 1
	blr

2739 2740 2741 2742 2743 2744 2745
	/* external interrupt - create a stack frame so we can call C */
7:	mflr	r0
	std	r0, PPC_LR_STKOFF(r1)
	stdu	r1, -PPC_MIN_STKFRM(r1)
	bl	kvmppc_read_intr
	nop
	li	r12, BOOK3S_INTERRUPT_EXTERNAL
2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756
	cmpdi	r3, 1
	ble	1f

	/*
	 * Return code of 2 means PCI passthrough interrupt, but
	 * we need to return back to host to complete handling the
	 * interrupt. Trap reason is expected in r12 by guest
	 * exit code.
	 */
	li	r12, BOOK3S_INTERRUPT_HV_RM_HARD
1:
2757 2758 2759 2760
	ld	r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
	addi	r1, r1, PPC_MIN_STKFRM
	mtlr	r0
	blr
2761

2762 2763 2764
/*
 * Save away FP, VMX and VSX registers.
 * r3 = vcpu pointer
2765 2766
 * N.B. r30 and r31 are volatile across this function,
 * thus it is not callable from C.
2767
 */
2768 2769 2770
kvmppc_save_fp:
	mflr	r30
	mr	r31,r3
2771 2772
	mfmsr	r5
	ori	r8,r5,MSR_FP
2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
	oris	r8,r8,MSR_VEC@h
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
	oris	r8,r8,MSR_VSX@h
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
	mtmsrd	r8
2784
	addi	r3,r3,VCPU_FPRS
2785
	bl	store_fp_state
2786 2787
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
2788
	addi	r3,r31,VCPU_VRS
2789
	bl	store_vr_state
2790 2791 2792
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
	mfspr	r6,SPRN_VRSAVE
2793
	stw	r6,VCPU_VRSAVE(r31)
2794
	mtlr	r30
2795 2796 2797 2798 2799
	blr

/*
 * Load up FP, VMX and VSX registers
 * r4 = vcpu pointer
2800 2801
 * N.B. r30 and r31 are volatile across this function,
 * thus it is not callable from C.
2802 2803
 */
kvmppc_load_fp:
2804 2805
	mflr	r30
	mr	r31,r4
2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818
	mfmsr	r9
	ori	r8,r9,MSR_FP
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
	oris	r8,r8,MSR_VEC@h
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
	oris	r8,r8,MSR_VSX@h
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
	mtmsrd	r8
2819
	addi	r3,r4,VCPU_FPRS
2820
	bl	load_fp_state
2821 2822
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
2823
	addi	r3,r31,VCPU_VRS
2824
	bl	load_vr_state
2825 2826
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
2827
	lwz	r7,VCPU_VRSAVE(r31)
2828
	mtspr	SPRN_VRSAVE,r7
2829 2830
	mtlr	r30
	mr	r4,r31
2831
	blr
2832

2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
 * Save transactional state and TM-related registers.
 * Called with r9 pointing to the vcpu struct.
 * This can modify all checkpointed registers, but
 * restores r1, r2 and r9 (vcpu pointer) before exit.
 */
kvmppc_save_tm:
	mflr	r0
	std	r0, PPC_LR_STKOFF(r1)

	/* Turn on TM. */
	mfmsr	r8
	li	r0, 1
	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
	mtmsrd	r8

	ld	r5, VCPU_MSR(r9)
	rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
	beq	1f	/* TM not active in guest. */

	std	r1, HSTATE_HOST_R1(r13)
	li	r3, TM_CAUSE_KVM_RESCHED

	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
	li	r5, 0
	mtmsrd	r5, 1

	/* All GPRs are volatile at this point. */
	TRECLAIM(R3)

	/* Temporarily store r13 and r9 so we have some regs to play with */
	SET_SCRATCH0(r13)
	GET_PACA(r13)
	std	r9, PACATMSCRATCH(r13)
	ld	r9, HSTATE_KVM_VCPU(r13)

	/* Get a few more GPRs free. */
	std	r29, VCPU_GPRS_TM(29)(r9)
	std	r30, VCPU_GPRS_TM(30)(r9)
	std	r31, VCPU_GPRS_TM(31)(r9)

	/* Save away PPR and DSCR soon so don't run with user values. */
	mfspr	r31, SPRN_PPR
	HMT_MEDIUM
	mfspr	r30, SPRN_DSCR
	ld	r29, HSTATE_DSCR(r13)
	mtspr	SPRN_DSCR, r29

	/* Save all but r9, r13 & r29-r31 */
	reg = 0
	.rept	29
	.if (reg != 9) && (reg != 13)
	std	reg, VCPU_GPRS_TM(reg)(r9)
	.endif
	reg = reg + 1
	.endr
	/* ... now save r13 */
	GET_SCRATCH0(r4)
	std	r4, VCPU_GPRS_TM(13)(r9)
	/* ... and save r9 */
	ld	r4, PACATMSCRATCH(r13)
	std	r4, VCPU_GPRS_TM(9)(r9)

	/* Reload stack pointer and TOC. */
	ld	r1, HSTATE_HOST_R1(r13)
	ld	r2, PACATOC(r13)

	/* Set MSR RI now we have r1 and r13 back. */
	li	r5, MSR_RI
	mtmsrd	r5, 1

	/* Save away checkpinted SPRs. */
	std	r31, VCPU_PPR_TM(r9)
	std	r30, VCPU_DSCR_TM(r9)
	mflr	r5
	mfcr	r6
	mfctr	r7
	mfspr	r8, SPRN_AMR
	mfspr	r10, SPRN_TAR
2913
	mfxer	r11
2914 2915 2916 2917 2918
	std	r5, VCPU_LR_TM(r9)
	stw	r6, VCPU_CR_TM(r9)
	std	r7, VCPU_CTR_TM(r9)
	std	r8, VCPU_AMR_TM(r9)
	std	r10, VCPU_TAR_TM(r9)
2919
	std	r11, VCPU_XER_TM(r9)
2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011

	/* Restore r12 as trap number. */
	lwz	r12, VCPU_TRAP(r9)

	/* Save FP/VSX. */
	addi	r3, r9, VCPU_FPRS_TM
	bl	store_fp_state
	addi	r3, r9, VCPU_VRS_TM
	bl	store_vr_state
	mfspr	r6, SPRN_VRSAVE
	stw	r6, VCPU_VRSAVE_TM(r9)
1:
	/*
	 * We need to save these SPRs after the treclaim so that the software
	 * error code is recorded correctly in the TEXASR.  Also the user may
	 * change these outside of a transaction, so they must always be
	 * context switched.
	 */
	mfspr	r5, SPRN_TFHAR
	mfspr	r6, SPRN_TFIAR
	mfspr	r7, SPRN_TEXASR
	std	r5, VCPU_TFHAR(r9)
	std	r6, VCPU_TFIAR(r9)
	std	r7, VCPU_TEXASR(r9)

	ld	r0, PPC_LR_STKOFF(r1)
	mtlr	r0
	blr

/*
 * Restore transactional state and TM-related registers.
 * Called with r4 pointing to the vcpu struct.
 * This potentially modifies all checkpointed registers.
 * It restores r1, r2, r4 from the PACA.
 */
kvmppc_restore_tm:
	mflr	r0
	std	r0, PPC_LR_STKOFF(r1)

	/* Turn on TM/FP/VSX/VMX so we can restore them. */
	mfmsr	r5
	li	r6, MSR_TM >> 32
	sldi	r6, r6, 32
	or	r5, r5, r6
	ori	r5, r5, MSR_FP
	oris	r5, r5, (MSR_VEC | MSR_VSX)@h
	mtmsrd	r5

	/*
	 * The user may change these outside of a transaction, so they must
	 * always be context switched.
	 */
	ld	r5, VCPU_TFHAR(r4)
	ld	r6, VCPU_TFIAR(r4)
	ld	r7, VCPU_TEXASR(r4)
	mtspr	SPRN_TFHAR, r5
	mtspr	SPRN_TFIAR, r6
	mtspr	SPRN_TEXASR, r7

	ld	r5, VCPU_MSR(r4)
	rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
	beqlr		/* TM not active in guest */
	std	r1, HSTATE_HOST_R1(r13)

	/* Make sure the failure summary is set, otherwise we'll program check
	 * when we trechkpt.  It's possible that this might have been not set
	 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
	 * host.
	 */
	oris	r7, r7, (TEXASR_FS)@h
	mtspr	SPRN_TEXASR, r7

	/*
	 * We need to load up the checkpointed state for the guest.
	 * We need to do this early as it will blow away any GPRs, VSRs and
	 * some SPRs.
	 */

	mr	r31, r4
	addi	r3, r31, VCPU_FPRS_TM
	bl	load_fp_state
	addi	r3, r31, VCPU_VRS_TM
	bl	load_vr_state
	mr	r4, r31
	lwz	r7, VCPU_VRSAVE_TM(r4)
	mtspr	SPRN_VRSAVE, r7

	ld	r5, VCPU_LR_TM(r4)
	lwz	r6, VCPU_CR_TM(r4)
	ld	r7, VCPU_CTR_TM(r4)
	ld	r8, VCPU_AMR_TM(r4)
	ld	r9, VCPU_TAR_TM(r4)
3012
	ld	r10, VCPU_XER_TM(r4)
3013 3014 3015 3016 3017
	mtlr	r5
	mtcr	r6
	mtctr	r7
	mtspr	SPRN_AMR, r8
	mtspr	SPRN_TAR, r9
3018
	mtxer	r10
3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069

	/*
	 * Load up PPR and DSCR values but don't put them in the actual SPRs
	 * till the last moment to avoid running with userspace PPR and DSCR for
	 * too long.
	 */
	ld	r29, VCPU_DSCR_TM(r4)
	ld	r30, VCPU_PPR_TM(r4)

	std	r2, PACATMSCRATCH(r13) /* Save TOC */

	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
	li	r5, 0
	mtmsrd	r5, 1

	/* Load GPRs r0-r28 */
	reg = 0
	.rept	29
	ld	reg, VCPU_GPRS_TM(reg)(r31)
	reg = reg + 1
	.endr

	mtspr	SPRN_DSCR, r29
	mtspr	SPRN_PPR, r30

	/* Load final GPRs */
	ld	29, VCPU_GPRS_TM(29)(r31)
	ld	30, VCPU_GPRS_TM(30)(r31)
	ld	31, VCPU_GPRS_TM(31)(r31)

	/* TM checkpointed state is now setup.  All GPRs are now volatile. */
	TRECHKPT

	/* Now let's get back the state we need. */
	HMT_MEDIUM
	GET_PACA(r13)
	ld	r29, HSTATE_DSCR(r13)
	mtspr	SPRN_DSCR, r29
	ld	r4, HSTATE_KVM_VCPU(r13)
	ld	r1, HSTATE_HOST_R1(r13)
	ld	r2, PACATMSCRATCH(r13)

	/* Set the MSR RI since we have our registers back. */
	li	r5, MSR_RI
	mtmsrd	r5, 1

	ld	r0, PPC_LR_STKOFF(r1)
	mtlr	r0
	blr
#endif

3070 3071 3072 3073 3074 3075 3076
/*
 * We come here if we get any exception or interrupt while we are
 * executing host real mode code while in guest MMU context.
 * For now just spin, but we should do something better.
 */
kvmppc_bad_host_intr:
	b	.
3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093

/*
 * This mimics the MSR transition on IRQ delivery.  The new guest MSR is taken
 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
 *   r11 has the guest MSR value (in/out)
 *   r9 has a vcpu pointer (in)
 *   r0 is used as a scratch register
 */
kvmppc_msr_interrupt:
	rldicl	r0, r11, 64 - MSR_TS_S_LG, 62
	cmpwi	r0, 2 /* Check if we are in transactional state..  */
	ld	r11, VCPU_INTR_MSR(r9)
	bne	1f
	/* ... if transactional, change to suspended */
	li	r0, 1
1:	rldimi	r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
	blr
3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111

/*
 * This works around a hardware bug on POWER8E processors, where
 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
 * performance monitor interrupt.  Instead, when we need to have
 * an interrupt pending, we have to arrange for a counter to overflow.
 */
kvmppc_fix_pmao:
	li	r3, 0
	mtspr	SPRN_MMCR2, r3
	lis	r3, (MMCR0_PMXE | MMCR0_FCECE)@h
	ori	r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
	mtspr	SPRN_MMCR0, r3
	lis	r3, 0x7fff
	ori	r3, r3, 0xffff
	mtspr	SPRN_PMC6, r3
	isync
	blr
3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170

#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
/*
 * Start timing an activity
 * r3 = pointer to time accumulation struct, r4 = vcpu
 */
kvmhv_start_timing:
	ld	r5, HSTATE_KVM_VCORE(r13)
	lbz	r6, VCORE_IN_GUEST(r5)
	cmpwi	r6, 0
	beq	5f				/* if in guest, need to */
	ld	r6, VCORE_TB_OFFSET(r5)		/* subtract timebase offset */
5:	mftb	r5
	subf	r5, r6, r5
	std	r3, VCPU_CUR_ACTIVITY(r4)
	std	r5, VCPU_ACTIVITY_START(r4)
	blr

/*
 * Accumulate time to one activity and start another.
 * r3 = pointer to new time accumulation struct, r4 = vcpu
 */
kvmhv_accumulate_time:
	ld	r5, HSTATE_KVM_VCORE(r13)
	lbz	r8, VCORE_IN_GUEST(r5)
	cmpwi	r8, 0
	beq	4f				/* if in guest, need to */
	ld	r8, VCORE_TB_OFFSET(r5)		/* subtract timebase offset */
4:	ld	r5, VCPU_CUR_ACTIVITY(r4)
	ld	r6, VCPU_ACTIVITY_START(r4)
	std	r3, VCPU_CUR_ACTIVITY(r4)
	mftb	r7
	subf	r7, r8, r7
	std	r7, VCPU_ACTIVITY_START(r4)
	cmpdi	r5, 0
	beqlr
	subf	r3, r6, r7
	ld	r8, TAS_SEQCOUNT(r5)
	cmpdi	r8, 0
	addi	r8, r8, 1
	std	r8, TAS_SEQCOUNT(r5)
	lwsync
	ld	r7, TAS_TOTAL(r5)
	add	r7, r7, r3
	std	r7, TAS_TOTAL(r5)
	ld	r6, TAS_MIN(r5)
	ld	r7, TAS_MAX(r5)
	beq	3f
	cmpd	r3, r6
	bge	1f
3:	std	r3, TAS_MIN(r5)
1:	cmpd	r3, r7
	ble	2f
	std	r3, TAS_MAX(r5)
2:	lwsync
	addi	r8, r8, 1
	std	r8, TAS_SEQCOUNT(r5)
	blr
#endif