book3s_hv_rmhandlers.S 54.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
 *
 * Derived from book3s_rmhandlers.S and other files, which are:
 *
 * Copyright SUSE Linux Products GmbH 2009
 *
 * Authors: Alexander Graf <agraf@suse.de>
 */

#include <asm/ppc_asm.h>
#include <asm/kvm_asm.h>
#include <asm/reg.h>
23
#include <asm/mmu.h>
24
#include <asm/page.h>
25 26
#include <asm/ptrace.h>
#include <asm/hvcall.h>
27 28
#include <asm/asm-offsets.h>
#include <asm/exception-64s.h>
29
#include <asm/kvm_book3s_asm.h>
30
#include <asm/mmu-hash64.h>
31 32 33
#include <asm/tm.h>

#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
34

35 36 37 38
/* Values in HSTATE_NAPPING(r13) */
#define NAPPING_CEDE	1
#define NAPPING_NOVCPU	2

39
/*
40
 * Call kvmppc_hv_entry in real mode.
41 42 43 44 45 46
 * Must be called with interrupts hard-disabled.
 *
 * Input Registers:
 *
 * LR = return address to continue at after eventually re-enabling MMU
 */
47
_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
48 49 50
	mflr	r0
	std	r0, PPC_LR_STKOFF(r1)
	stdu	r1, -112(r1)
51
	mfmsr	r10
52
	LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
53 54 55 56 57 58 59 60 61
	li	r0,MSR_RI
	andc	r0,r10,r0
	li	r6,MSR_IR | MSR_DR
	andc	r6,r10,r6
	mtmsrd	r0,1		/* clear RI in MSR */
	mtsrr0	r5
	mtsrr1	r6
	RFI

62
kvmppc_call_hv_entry:
63
	ld	r4, HSTATE_KVM_VCPU(r13)
64 65 66 67
	bl	kvmppc_hv_entry

	/* Back from guest - restore host state and return to caller */

68
BEGIN_FTR_SECTION
69 70 71 72 73
	/* Restore host DABR and DABRX */
	ld	r5,HSTATE_DABR(r13)
	li	r6,7
	mtspr	SPRN_DABR,r5
	mtspr	SPRN_DABRX,r6
74
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
75 76

	/* Restore SPRG3 */
S
Scott Wood 已提交
77 78
	ld	r3,PACA_SPRG_VDSO(r13)
	mtspr	SPRN_SPRG_VDSO_WRITE,r3
79 80 81 82 83 84

	/* Reload the host's PMU registers */
	ld	r3, PACALPPACAPTR(r13)	/* is the host using the PMU? */
	lbz	r4, LPPACA_PMCINUSE(r3)
	cmpwi	r4, 0
	beq	23f			/* skip if not */
85 86 87 88 89 90
BEGIN_FTR_SECTION
	ld	r3, HSTATE_MMCR(r13)
	andi.	r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
	cmpwi	r4, MMCR0_PMAO
	beql	kvmppc_fix_pmao
END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
	lwz	r3, HSTATE_PMC(r13)
	lwz	r4, HSTATE_PMC + 4(r13)
	lwz	r5, HSTATE_PMC + 8(r13)
	lwz	r6, HSTATE_PMC + 12(r13)
	lwz	r8, HSTATE_PMC + 16(r13)
	lwz	r9, HSTATE_PMC + 20(r13)
	mtspr	SPRN_PMC1, r3
	mtspr	SPRN_PMC2, r4
	mtspr	SPRN_PMC3, r5
	mtspr	SPRN_PMC4, r6
	mtspr	SPRN_PMC5, r8
	mtspr	SPRN_PMC6, r9
	ld	r3, HSTATE_MMCR(r13)
	ld	r4, HSTATE_MMCR + 8(r13)
	ld	r5, HSTATE_MMCR + 16(r13)
106 107
	ld	r6, HSTATE_MMCR + 24(r13)
	ld	r7, HSTATE_MMCR + 32(r13)
108 109
	mtspr	SPRN_MMCR1, r4
	mtspr	SPRN_MMCRA, r5
110 111 112 113 114 115 116 117
	mtspr	SPRN_SIAR, r6
	mtspr	SPRN_SDAR, r7
BEGIN_FTR_SECTION
	ld	r8, HSTATE_MMCR + 40(r13)
	ld	r9, HSTATE_MMCR + 48(r13)
	mtspr	SPRN_MMCR2, r8
	mtspr	SPRN_SIER, r9
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
118 119 120 121
	mtspr	SPRN_MMCR0, r3
	isync
23:

122 123 124 125 126 127 128 129 130
	/*
	 * Reload DEC.  HDEC interrupts were disabled when
	 * we reloaded the host's LPCR value.
	 */
	ld	r3, HSTATE_DECEXP(r13)
	mftb	r4
	subf	r4, r4, r3
	mtspr	SPRN_DEC, r4

131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
	/*
	 * For external and machine check interrupts, we need
	 * to call the Linux handler to process the interrupt.
	 * We do that by jumping to absolute address 0x500 for
	 * external interrupts, or the machine_check_fwnmi label
	 * for machine checks (since firmware might have patched
	 * the vector area at 0x200).  The [h]rfid at the end of the
	 * handler will return to the book3s_hv_interrupts.S code.
	 * For other interrupts we do the rfid to get back
	 * to the book3s_hv_interrupts.S code here.
	 */
	ld	r8, 112+PPC_LR_STKOFF(r1)
	addi	r1, r1, 112
	ld	r7, HSTATE_HOST_MSR(r13)

	cmpwi	cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
	cmpwi	r12, BOOK3S_INTERRUPT_EXTERNAL
	beq	11f
149 150
	cmpwi	cr2, r12, BOOK3S_INTERRUPT_HMI
	beq	cr2, 14f			/* HMI check */
151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168

	/* RFI into the highmem handler, or branch to interrupt handler */
	mfmsr	r6
	li	r0, MSR_RI
	andc	r6, r6, r0
	mtmsrd	r6, 1			/* Clear RI in MSR */
	mtsrr0	r8
	mtsrr1	r7
	beq	cr1, 13f		/* machine check */
	RFI

	/* On POWER7, we have external interrupts set to use HSRR0/1 */
11:	mtspr	SPRN_HSRR0, r8
	mtspr	SPRN_HSRR1, r7
	ba	0x500

13:	b	machine_check_fwnmi

169 170 171 172
14:	mtspr	SPRN_HSRR0, r8
	mtspr	SPRN_HSRR1, r7
	b	hmi_exception_after_realmode

173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
kvmppc_primary_no_guest:
	/* We handle this much like a ceded vcpu */
	/* set our bit in napping_threads */
	ld	r5, HSTATE_KVM_VCORE(r13)
	lbz	r7, HSTATE_PTID(r13)
	li	r0, 1
	sld	r0, r0, r7
	addi	r6, r5, VCORE_NAPPING_THREADS
1:	lwarx	r3, 0, r6
	or	r3, r3, r0
	stwcx.	r3, 0, r6
	bne	1b
	/* order napping_threads update vs testing entry_exit_count */
	isync
	li	r12, 0
	lwz	r7, VCORE_ENTRY_EXIT(r5)
	cmpwi	r7, 0x100
	bge	kvm_novcpu_exit	/* another thread already exiting */
	li	r3, NAPPING_NOVCPU
	stb	r3, HSTATE_NAPPING(r13)
	li	r3, 1
	stb	r3, HSTATE_HWTHREAD_REQ(r13)

	b	kvm_do_nap

kvm_novcpu_wakeup:
	ld	r1, HSTATE_HOST_R1(r13)
	ld	r5, HSTATE_KVM_VCORE(r13)
	li	r0, 0
	stb	r0, HSTATE_NAPPING(r13)
	stb	r0, HSTATE_HWTHREAD_REQ(r13)

205 206 207
	/* check the wake reason */
	bl	kvmppc_check_wake_reason
	
208 209 210 211 212 213 214 215 216 217
	/* see if any other thread is already exiting */
	lwz	r0, VCORE_ENTRY_EXIT(r5)
	cmpwi	r0, 0x100
	bge	kvm_novcpu_exit

	/* clear our bit in napping_threads */
	lbz	r7, HSTATE_PTID(r13)
	li	r0, 1
	sld	r0, r0, r7
	addi	r6, r5, VCORE_NAPPING_THREADS
218 219 220
4:	lwarx	r7, 0, r6
	andc	r7, r7, r0
	stwcx.	r7, 0, r6
221 222
	bne	4b

223
	/* See if the wake reason means we need to exit */
224 225 226 227 228 229 230 231 232 233 234
	cmpdi	r3, 0
	bge	kvm_novcpu_exit

	/* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
	ld	r4, HSTATE_KVM_VCPU(r13)
	cmpdi	r4, 0
	bne	kvmppc_got_guest

kvm_novcpu_exit:
	b	hdec_soon

235
/*
236
 * We come in here when wakened from nap mode.
237 238 239 240 241
 * Relocation is off and most register values are lost.
 * r13 points to the PACA.
 */
	.globl	kvm_start_guest
kvm_start_guest:
242 243 244 245 246 247

	/* Set runlatch bit the minute you wake up from nap */
	mfspr	r1, SPRN_CTRLF
	ori 	r1, r1, 1
	mtspr	SPRN_CTRLT, r1

248 249
	ld	r2,PACATOC(r13)

250 251
	li	r0,KVM_HWTHREAD_IN_KVM
	stb	r0,HSTATE_HWTHREAD_STATE(r13)
252

253 254 255
	/* NV GPR values from power7_idle() will no longer be valid */
	li	r0,1
	stb	r0,PACA_NAPSTATELOST(r13)
256

257 258
	/* were we napping due to cede? */
	lbz	r0,HSTATE_NAPPING(r13)
259 260 261 262 263 264 265
	cmpwi	r0,NAPPING_CEDE
	beq	kvm_end_cede
	cmpwi	r0,NAPPING_NOVCPU
	beq	kvm_novcpu_wakeup

	ld	r1,PACAEMERGSP(r13)
	subi	r1,r1,STACK_FRAME_OVERHEAD
266 267 268 269 270 271 272

	/*
	 * We weren't napping due to cede, so this must be a secondary
	 * thread being woken up to run a guest, or being woken up due
	 * to a stray IPI.  (Or due to some machine check or hypervisor
	 * maintenance interrupt while the core is in KVM.)
	 */
273 274

	/* Check the wake reason in SRR1 to see why we got here */
275 276 277
	bl	kvmppc_check_wake_reason
	cmpdi	r3, 0
	bge	kvm_no_guest
278

279
	/* get vcpu pointer, NULL if we have no vcpu to run */
280 281
	ld	r4,HSTATE_KVM_VCPU(r13)
	cmpdi	r4,0
282
	/* if we have no vcpu to run, go back to sleep */
283
	beq	kvm_no_guest
284

285
	/* Set HSTATE_DSCR(r13) to something sensible */
286
	ld	r6, PACA_DSCR(r13)
287
	std	r6, HSTATE_DSCR(r13)
288

289
	bl	kvmppc_hv_entry
290 291 292 293 294

	/* Back from the guest, go back to nap */
	/* Clear our vcpu pointer so we don't come back in early */
	li	r0, 0
	std	r0, HSTATE_KVM_VCPU(r13)
295 296 297 298 299
	/*
	 * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing
	 * the nap_count, because once the increment to nap_count is
	 * visible we could be given another vcpu.
	 */
300 301 302 303 304 305 306 307 308 309 310 311 312
	lwsync

	/* increment the nap count and then go to nap mode */
	ld	r4, HSTATE_KVM_VCORE(r13)
	addi	r4, r4, VCORE_NAP_COUNT
51:	lwarx	r3, 0, r4
	addi	r3, r3, 1
	stwcx.	r3, 0, r4
	bne	51b

kvm_no_guest:
	li	r0, KVM_HWTHREAD_IN_NAP
	stb	r0, HSTATE_HWTHREAD_STATE(r13)
313
kvm_do_nap:
314 315 316 317 318
	/* Clear the runlatch bit before napping */
	mfspr	r2, SPRN_CTRLF
	clrrdi	r2, r2, 1
	mtspr	SPRN_CTRLT, r2

319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
	li	r3, LPCR_PECE0
	mfspr	r4, SPRN_LPCR
	rlwimi	r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
	mtspr	SPRN_LPCR, r4
	isync
	std	r0, HSTATE_SCRATCH0(r13)
	ptesync
	ld	r0, HSTATE_SCRATCH0(r13)
1:	cmpd	r0, r0
	bne	1b
	nap
	b	.

/******************************************************************************
 *                                                                            *
 *                               Entry code                                   *
 *                                                                            *
 *****************************************************************************/

338 339 340 341 342
.global kvmppc_hv_entry
kvmppc_hv_entry:

	/* Required state:
	 *
343
	 * R4 = vcpu pointer (or NULL)
344 345 346
	 * MSR = ~IR|DR
	 * R13 = PACA
	 * R1 = host R1
347
	 * R2 = TOC
348 349 350
	 * all other volatile GPRS = free
	 */
	mflr	r0
351 352
	std	r0, PPC_LR_STKOFF(r1)
	stdu	r1, -112(r1)
353 354 355 356

	/* Save R1 in the PACA */
	std	r1, HSTATE_HOST_R1(r13)

357 358 359
	li	r6, KVM_GUEST_MODE_HOST_HV
	stb	r6, HSTATE_IN_GUEST(r13)

360 361 362 363 364 365
	/* Clear out SLB */
	li	r6,0
	slbmte	r6,r6
	slbia
	ptesync

366
	/*
367
	 * POWER7/POWER8 host -> guest partition switch code.
368 369 370
	 * We don't have to lock against concurrent tlbies,
	 * but we do have to coordinate across hardware threads.
	 */
371 372 373 374 375 376 377 378 379 380 381
	/* Increment entry count iff exit count is zero. */
	ld	r5,HSTATE_KVM_VCORE(r13)
	addi	r9,r5,VCORE_ENTRY_EXIT
21:	lwarx	r3,0,r9
	cmpwi	r3,0x100		/* any threads starting to exit? */
	bge	secondary_too_late	/* if so we're too late to the party */
	addi	r3,r3,1
	stwcx.	r3,0,r9
	bne	21b

	/* Primary thread switches to guest partition. */
382 383
	ld	r9,VCORE_KVM(r5)	/* pointer to struct kvm */
	lbz	r6,HSTATE_PTID(r13)
384 385
	cmpwi	r6,0
	bne	20f
386 387 388 389 390 391 392 393
	ld	r6,KVM_SDR1(r9)
	lwz	r7,KVM_LPID(r9)
	li	r0,LPID_RSVD		/* switch to reserved LPID */
	mtspr	SPRN_LPID,r0
	ptesync
	mtspr	SPRN_SDR1,r6		/* switch to partition page table */
	mtspr	SPRN_LPID,r7
	isync
394 395 396 397 398 399 400 401

	/* See if we need to flush the TLB */
	lhz	r6,PACAPACAINDEX(r13)	/* test_bit(cpu, need_tlb_flush) */
	clrldi	r7,r6,64-6		/* extract bit number (6 bits) */
	srdi	r6,r6,6			/* doubleword number */
	sldi	r6,r6,3			/* address offset */
	add	r6,r6,r9
	addi	r6,r6,KVM_NEED_FLUSH	/* dword in kvm->arch.need_tlb_flush */
402
	li	r0,1
403 404 405 406 407 408 409 410
	sld	r0,r0,r7
	ld	r7,0(r6)
	and.	r7,r7,r0
	beq	22f
23:	ldarx	r7,0,r6			/* if set, clear the bit */
	andc	r7,r7,r0
	stdcx.	r7,0,r6
	bne	23b
411 412 413 414 415 416 417
	/* Flush the TLB of any entries for this LPID */
	/* use arch 2.07S as a proxy for POWER8 */
BEGIN_FTR_SECTION
	li	r6,512			/* POWER8 has 512 sets */
FTR_SECTION_ELSE
	li	r6,128			/* POWER7 has 128 sets */
ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
418 419 420 421 422 423 424 425
	mtctr	r6
	li	r7,0x800		/* IS field = 0b10 */
	ptesync
28:	tlbiel	r7
	addi	r7,r7,0x1000
	bdnz	28b
	ptesync

426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
	/* Add timebase offset onto timebase */
22:	ld	r8,VCORE_TB_OFFSET(r5)
	cmpdi	r8,0
	beq	37f
	mftb	r6		/* current host timebase */
	add	r8,r8,r6
	mtspr	SPRN_TBU40,r8	/* update upper 40 bits */
	mftb	r7		/* check if lower 24 bits overflowed */
	clrldi	r6,r6,40
	clrldi	r7,r7,40
	cmpld	r7,r6
	bge	37f
	addis	r8,r8,0x100	/* if so, increment upper 40 bits */
	mtspr	SPRN_TBU40,r8

441 442 443 444 445 446
	/* Load guest PCR value to select appropriate compat mode */
37:	ld	r7, VCORE_PCR(r5)
	cmpdi	r7, 0
	beq	38f
	mtspr	SPRN_PCR, r7
38:
447 448 449 450 451 452 453

BEGIN_FTR_SECTION
	/* DPDES is shared between threads */
	ld	r8, VCORE_DPDES(r5)
	mtspr	SPRN_DPDES, r8
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)

454
	li	r0,1
455 456 457 458 459 460 461
	stb	r0,VCORE_IN_GUEST(r5)	/* signal secondaries to continue */
	b	10f

	/* Secondary threads wait for primary to have done partition switch */
20:	lbz	r0,VCORE_IN_GUEST(r5)
	cmpwi	r0,0
	beq	20b
462

463
	/* Set LPCR and RMOR. */
464
10:	ld	r8,VCORE_LPCR(r5)
465
	mtspr	SPRN_LPCR,r8
466 467
	ld	r8,KVM_RMOR(r9)
	mtspr	SPRN_RMOR,r8
468 469 470 471
	isync

	/* Check if HDEC expires soon */
	mfspr	r3,SPRN_HDEC
472
	cmpwi	r3,512		/* 1 microsecond */
473 474
	li	r12,BOOK3S_INTERRUPT_HV_DECREMENTER
	blt	hdec_soon
475

476 477 478 479
	/* Do we have a guest vcpu to run? */
	cmpdi	r4, 0
	beq	kvmppc_primary_no_guest
kvmppc_got_guest:
480 481

	/* Load up guest SLB entries */
482
	lwz	r5,VCPU_SLB_MAX(r4)
483 484 485 486 487 488 489 490 491 492
	cmpwi	r5,0
	beq	9f
	mtctr	r5
	addi	r6,r4,VCPU_SLB
1:	ld	r8,VCPU_SLB_E(r6)
	ld	r9,VCPU_SLB_V(r6)
	slbmte	r9,r8
	addi	r6,r6,VCPU_SLB_SIZE
	bdnz	1b
9:
493 494 495 496
	/* Increment yield count if they have a VPA */
	ld	r3, VCPU_VPA(r4)
	cmpdi	r3, 0
	beq	25f
497 498
	li	r6, LPPACA_YIELDCOUNT
	LWZX_BE	r5, r3, r6
499
	addi	r5, r5, 1
500
	STWX_BE	r5, r3, r6
501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
	li	r6, 1
	stb	r6, VCPU_VPA_DIRTY(r4)
25:

	/* Save purr/spurr */
	mfspr	r5,SPRN_PURR
	mfspr	r6,SPRN_SPURR
	std	r5,HSTATE_PURR(r13)
	std	r6,HSTATE_SPURR(r13)
	ld	r7,VCPU_PURR(r4)
	ld	r8,VCPU_SPURR(r4)
	mtspr	SPRN_PURR,r7
	mtspr	SPRN_SPURR,r8

BEGIN_FTR_SECTION
	/* Set partition DABR */
	/* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
518
	lwz	r5,VCPU_DABRX(r4)
519 520 521 522 523 524
	ld	r6,VCPU_DABR(r4)
	mtspr	SPRN_DABRX,r5
	mtspr	SPRN_DABR,r6
	isync
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)

525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BEGIN_FTR_SECTION
	b	skip_tm
END_FTR_SECTION_IFCLR(CPU_FTR_TM)

	/* Turn on TM/FP/VSX/VMX so we can restore them. */
	mfmsr	r5
	li	r6, MSR_TM >> 32
	sldi	r6, r6, 32
	or	r5, r5, r6
	ori	r5, r5, MSR_FP
	oris	r5, r5, (MSR_VEC | MSR_VSX)@h
	mtmsrd	r5

	/*
	 * The user may change these outside of a transaction, so they must
	 * always be context switched.
	 */
	ld	r5, VCPU_TFHAR(r4)
	ld	r6, VCPU_TFIAR(r4)
	ld	r7, VCPU_TEXASR(r4)
	mtspr	SPRN_TFHAR, r5
	mtspr	SPRN_TFIAR, r6
	mtspr	SPRN_TEXASR, r7

	ld	r5, VCPU_MSR(r4)
	rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
	beq	skip_tm	/* TM not active in guest */

	/* Make sure the failure summary is set, otherwise we'll program check
	 * when we trechkpt.  It's possible that this might have been not set
	 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
	 * host.
	 */
	oris	r7, r7, (TEXASR_FS)@h
	mtspr	SPRN_TEXASR, r7

	/*
	 * We need to load up the checkpointed state for the guest.
	 * We need to do this early as it will blow away any GPRs, VSRs and
	 * some SPRs.
	 */

	mr	r31, r4
	addi	r3, r31, VCPU_FPRS_TM
570
	bl	load_fp_state
571
	addi	r3, r31, VCPU_VRS_TM
572
	bl	load_vr_state
573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
	mr	r4, r31
	lwz	r7, VCPU_VRSAVE_TM(r4)
	mtspr	SPRN_VRSAVE, r7

	ld	r5, VCPU_LR_TM(r4)
	lwz	r6, VCPU_CR_TM(r4)
	ld	r7, VCPU_CTR_TM(r4)
	ld	r8, VCPU_AMR_TM(r4)
	ld	r9, VCPU_TAR_TM(r4)
	mtlr	r5
	mtcr	r6
	mtctr	r7
	mtspr	SPRN_AMR, r8
	mtspr	SPRN_TAR, r9

	/*
	 * Load up PPR and DSCR values but don't put them in the actual SPRs
	 * till the last moment to avoid running with userspace PPR and DSCR for
	 * too long.
	 */
	ld	r29, VCPU_DSCR_TM(r4)
	ld	r30, VCPU_PPR_TM(r4)

	std	r2, PACATMSCRATCH(r13) /* Save TOC */

	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
	li	r5, 0
	mtmsrd	r5, 1

	/* Load GPRs r0-r28 */
	reg = 0
	.rept	29
	ld	reg, VCPU_GPRS_TM(reg)(r31)
	reg = reg + 1
	.endr

	mtspr	SPRN_DSCR, r29
	mtspr	SPRN_PPR, r30

	/* Load final GPRs */
	ld	29, VCPU_GPRS_TM(29)(r31)
	ld	30, VCPU_GPRS_TM(30)(r31)
	ld	31, VCPU_GPRS_TM(31)(r31)

	/* TM checkpointed state is now setup.  All GPRs are now volatile. */
	TRECHKPT

	/* Now let's get back the state we need. */
	HMT_MEDIUM
	GET_PACA(r13)
	ld	r29, HSTATE_DSCR(r13)
	mtspr	SPRN_DSCR, r29
	ld	r4, HSTATE_KVM_VCPU(r13)
	ld	r1, HSTATE_HOST_R1(r13)
	ld	r2, PACATMSCRATCH(r13)

	/* Set the MSR RI since we have our registers back. */
	li	r5, MSR_RI
	mtmsrd	r5, 1
skip_tm:
#endif

635 636 637 638 639 640
	/* Load guest PMU registers */
	/* R4 is live here (vcpu pointer) */
	li	r3, 1
	sldi	r3, r3, 31		/* MMCR0_FC (freeze counters) bit */
	mtspr	SPRN_MMCR0, r3		/* freeze all counters, disable ints */
	isync
641 642 643 644 645 646
BEGIN_FTR_SECTION
	ld	r3, VCPU_MMCR(r4)
	andi.	r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
	cmpwi	r5, MMCR0_PMAO
	beql	kvmppc_fix_pmao
END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
	lwz	r3, VCPU_PMC(r4)	/* always load up guest PMU registers */
	lwz	r5, VCPU_PMC + 4(r4)	/* to prevent information leak */
	lwz	r6, VCPU_PMC + 8(r4)
	lwz	r7, VCPU_PMC + 12(r4)
	lwz	r8, VCPU_PMC + 16(r4)
	lwz	r9, VCPU_PMC + 20(r4)
	mtspr	SPRN_PMC1, r3
	mtspr	SPRN_PMC2, r5
	mtspr	SPRN_PMC3, r6
	mtspr	SPRN_PMC4, r7
	mtspr	SPRN_PMC5, r8
	mtspr	SPRN_PMC6, r9
	ld	r3, VCPU_MMCR(r4)
	ld	r5, VCPU_MMCR + 8(r4)
	ld	r6, VCPU_MMCR + 16(r4)
	ld	r7, VCPU_SIAR(r4)
	ld	r8, VCPU_SDAR(r4)
	mtspr	SPRN_MMCR1, r5
	mtspr	SPRN_MMCRA, r6
	mtspr	SPRN_SIAR, r7
	mtspr	SPRN_SDAR, r8
668 669 670 671 672 673 674 675 676 677 678 679
BEGIN_FTR_SECTION
	ld	r5, VCPU_MMCR + 24(r4)
	ld	r6, VCPU_SIER(r4)
	lwz	r7, VCPU_PMC + 24(r4)
	lwz	r8, VCPU_PMC + 28(r4)
	ld	r9, VCPU_MMCR + 32(r4)
	mtspr	SPRN_MMCR2, r5
	mtspr	SPRN_SIER, r6
	mtspr	SPRN_SPMC1, r7
	mtspr	SPRN_SPMC2, r8
	mtspr	SPRN_MMCRS, r9
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708
	mtspr	SPRN_MMCR0, r3
	isync

	/* Load up FP, VMX and VSX registers */
	bl	kvmppc_load_fp

	ld	r14, VCPU_GPR(R14)(r4)
	ld	r15, VCPU_GPR(R15)(r4)
	ld	r16, VCPU_GPR(R16)(r4)
	ld	r17, VCPU_GPR(R17)(r4)
	ld	r18, VCPU_GPR(R18)(r4)
	ld	r19, VCPU_GPR(R19)(r4)
	ld	r20, VCPU_GPR(R20)(r4)
	ld	r21, VCPU_GPR(R21)(r4)
	ld	r22, VCPU_GPR(R22)(r4)
	ld	r23, VCPU_GPR(R23)(r4)
	ld	r24, VCPU_GPR(R24)(r4)
	ld	r25, VCPU_GPR(R25)(r4)
	ld	r26, VCPU_GPR(R26)(r4)
	ld	r27, VCPU_GPR(R27)(r4)
	ld	r28, VCPU_GPR(R28)(r4)
	ld	r29, VCPU_GPR(R29)(r4)
	ld	r30, VCPU_GPR(R30)(r4)
	ld	r31, VCPU_GPR(R31)(r4)

	/* Switch DSCR to guest value */
	ld	r5, VCPU_DSCR(r4)
	mtspr	SPRN_DSCR, r5

709
BEGIN_FTR_SECTION
710
	/* Skip next section on POWER7 */
711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737
	b	8f
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
	/* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
	mfmsr	r8
	li	r0, 1
	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
	mtmsrd	r8

	/* Load up POWER8-specific registers */
	ld	r5, VCPU_IAMR(r4)
	lwz	r6, VCPU_PSPB(r4)
	ld	r7, VCPU_FSCR(r4)
	mtspr	SPRN_IAMR, r5
	mtspr	SPRN_PSPB, r6
	mtspr	SPRN_FSCR, r7
	ld	r5, VCPU_DAWR(r4)
	ld	r6, VCPU_DAWRX(r4)
	ld	r7, VCPU_CIABR(r4)
	ld	r8, VCPU_TAR(r4)
	mtspr	SPRN_DAWR, r5
	mtspr	SPRN_DAWRX, r6
	mtspr	SPRN_CIABR, r7
	mtspr	SPRN_TAR, r8
	ld	r5, VCPU_IC(r4)
	ld	r6, VCPU_VTB(r4)
	mtspr	SPRN_IC, r5
	mtspr	SPRN_VTB, r6
738
	ld	r8, VCPU_EBBHR(r4)
739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757
	mtspr	SPRN_EBBHR, r8
	ld	r5, VCPU_EBBRR(r4)
	ld	r6, VCPU_BESCR(r4)
	ld	r7, VCPU_CSIGR(r4)
	ld	r8, VCPU_TACR(r4)
	mtspr	SPRN_EBBRR, r5
	mtspr	SPRN_BESCR, r6
	mtspr	SPRN_CSIGR, r7
	mtspr	SPRN_TACR, r8
	ld	r5, VCPU_TCSCR(r4)
	ld	r6, VCPU_ACOP(r4)
	lwz	r7, VCPU_GUEST_PID(r4)
	ld	r8, VCPU_WORT(r4)
	mtspr	SPRN_TCSCR, r5
	mtspr	SPRN_ACOP, r6
	mtspr	SPRN_PID, r7
	mtspr	SPRN_WORT, r8
8:

758 759 760 761
	/*
	 * Set the decrementer to the guest decrementer.
	 */
	ld	r8,VCPU_DEC_EXPIRES(r4)
762 763 764 765
	/* r8 is a host timebase value here, convert to guest TB */
	ld	r5,HSTATE_KVM_VCORE(r13)
	ld	r6,VCORE_TB_OFFSET(r5)
	add	r8,r8,r6
766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792
	mftb	r7
	subf	r3,r7,r8
	mtspr	SPRN_DEC,r3
	stw	r3,VCPU_DEC(r4)

	ld	r5, VCPU_SPRG0(r4)
	ld	r6, VCPU_SPRG1(r4)
	ld	r7, VCPU_SPRG2(r4)
	ld	r8, VCPU_SPRG3(r4)
	mtspr	SPRN_SPRG0, r5
	mtspr	SPRN_SPRG1, r6
	mtspr	SPRN_SPRG2, r7
	mtspr	SPRN_SPRG3, r8

	/* Load up DAR and DSISR */
	ld	r5, VCPU_DAR(r4)
	lwz	r6, VCPU_DSISR(r4)
	mtspr	SPRN_DAR, r5
	mtspr	SPRN_DSISR, r6

	/* Restore AMR and UAMOR, set AMOR to all 1s */
	ld	r5,VCPU_AMR(r4)
	ld	r6,VCPU_UAMOR(r4)
	li	r7,-1
	mtspr	SPRN_AMR,r5
	mtspr	SPRN_UAMOR,r6
	mtspr	SPRN_AMOR,r7
793 794 795 796 797 798 799 800 801 802 803 804 805 806 807

	/* Restore state of CTRL run bit; assume 1 on entry */
	lwz	r5,VCPU_CTRL(r4)
	andi.	r5,r5,1
	bne	4f
	mfspr	r6,SPRN_CTRLF
	clrrdi	r6,r6,1
	mtspr	SPRN_CTRLT,r6
4:
	ld	r6, VCPU_CTR(r4)
	lwz	r7, VCPU_XER(r4)

	mtctr	r6
	mtxer	r7

808
kvmppc_cede_reentry:		/* r4 = vcpu, r13 = paca */
809 810
	ld	r10, VCPU_PC(r4)
	ld	r11, VCPU_MSR(r4)
811 812
	ld	r6, VCPU_SRR0(r4)
	ld	r7, VCPU_SRR1(r4)
813 814
	mtspr	SPRN_SRR0, r6
	mtspr	SPRN_SRR1, r7
815

816
deliver_guest_interrupt:
817
	/* r11 = vcpu->arch.msr & ~MSR_HV */
818 819 820 821
	rldicl	r11, r11, 63 - MSR_HV_LG, 1
	rotldi	r11, r11, 1 + MSR_HV_LG
	ori	r11, r11, MSR_ME

822
	/* Check if we can deliver an external or decrementer interrupt now */
823 824 825 826 827 828 829 830
	ld	r0, VCPU_PENDING_EXC(r4)
	rldicl	r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
	cmpdi	cr1, r0, 0
	andi.	r8, r11, MSR_EE
	mfspr	r8, SPRN_LPCR
	/* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
	rldimi	r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
	mtspr	SPRN_LPCR, r8
831 832
	isync
	beq	5f
833 834 835 836 837 838
	li	r0, BOOK3S_INTERRUPT_EXTERNAL
	bne	cr1, 12f
	mfspr	r0, SPRN_DEC
	cmpwi	r0, 0
	li	r0, BOOK3S_INTERRUPT_DECREMENTER
	bge	5f
839

840
12:	mtspr	SPRN_SRR0, r10
841
	mr	r10,r0
842
	mtspr	SPRN_SRR1, r11
843 844
	mr	r9, r4
	bl	kvmppc_msr_interrupt
845
5:
846

847 848 849 850 851 852 853
/*
 * Required state:
 * R4 = vcpu
 * R10: value for HSRR0
 * R11: value for HSRR1
 * R13 = PACA
 */
854
fast_guest_return:
855 856
	li	r0,0
	stb	r0,VCPU_CEDED(r4)	/* cancel cede */
857 858 859 860
	mtspr	SPRN_HSRR0,r10
	mtspr	SPRN_HSRR1,r11

	/* Activate guest mode, so faults get handled by KVM */
861
	li	r9, KVM_GUEST_MODE_GUEST_HV
862 863 864 865
	stb	r9, HSTATE_IN_GUEST(r13)

	/* Enter guest */

866 867 868 869
BEGIN_FTR_SECTION
	ld	r5, VCPU_CFAR(r4)
	mtspr	SPRN_CFAR, r5
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
870 871 872
BEGIN_FTR_SECTION
	ld	r0, VCPU_PPR(r4)
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
873

874 875 876 877 878
	ld	r5, VCPU_LR(r4)
	lwz	r6, VCPU_CR(r4)
	mtlr	r5
	mtcr	r6

879 880 881 882 883 884 885 886 887 888 889 890 891
	ld	r1, VCPU_GPR(R1)(r4)
	ld	r2, VCPU_GPR(R2)(r4)
	ld	r3, VCPU_GPR(R3)(r4)
	ld	r5, VCPU_GPR(R5)(r4)
	ld	r6, VCPU_GPR(R6)(r4)
	ld	r7, VCPU_GPR(R7)(r4)
	ld	r8, VCPU_GPR(R8)(r4)
	ld	r9, VCPU_GPR(R9)(r4)
	ld	r10, VCPU_GPR(R10)(r4)
	ld	r11, VCPU_GPR(R11)(r4)
	ld	r12, VCPU_GPR(R12)(r4)
	ld	r13, VCPU_GPR(R13)(r4)

892 893 894 895
BEGIN_FTR_SECTION
	mtspr	SPRN_PPR, r0
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
	ld	r0, VCPU_GPR(R0)(r4)
896
	ld	r4, VCPU_GPR(R4)(r4)
897 898 899 900 901 902 903 904 905 906 907 908 909

	hrfid
	b	.

/******************************************************************************
 *                                                                            *
 *                               Exit code                                    *
 *                                                                            *
 *****************************************************************************/

/*
 * We come here from the first-level interrupt handlers.
 */
910 911
	.globl	kvmppc_interrupt_hv
kvmppc_interrupt_hv:
912 913 914 915 916 917 918
	/*
	 * Register contents:
	 * R12		= interrupt vector
	 * R13		= PACA
	 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
	 * guest R13 saved in SPRN_SCRATCH0
	 */
919
	std	r9, HSTATE_SCRATCH2(r13)
920 921 922 923

	lbz	r9, HSTATE_IN_GUEST(r13)
	cmpwi	r9, KVM_GUEST_MODE_HOST_HV
	beq	kvmppc_bad_host_intr
924 925
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
	cmpwi	r9, KVM_GUEST_MODE_GUEST
926
	ld	r9, HSTATE_SCRATCH2(r13)
927 928
	beq	kvmppc_interrupt_pr
#endif
929 930 931 932
	/* We're now back in the host but in guest MMU context */
	li	r9, KVM_GUEST_MODE_HOST_HV
	stb	r9, HSTATE_IN_GUEST(r13)

933 934 935 936
	ld	r9, HSTATE_KVM_VCPU(r13)

	/* Save registers */

937 938 939 940 941 942 943 944 945
	std	r0, VCPU_GPR(R0)(r9)
	std	r1, VCPU_GPR(R1)(r9)
	std	r2, VCPU_GPR(R2)(r9)
	std	r3, VCPU_GPR(R3)(r9)
	std	r4, VCPU_GPR(R4)(r9)
	std	r5, VCPU_GPR(R5)(r9)
	std	r6, VCPU_GPR(R6)(r9)
	std	r7, VCPU_GPR(R7)(r9)
	std	r8, VCPU_GPR(R8)(r9)
946
	ld	r0, HSTATE_SCRATCH2(r13)
947 948 949
	std	r0, VCPU_GPR(R9)(r9)
	std	r10, VCPU_GPR(R10)(r9)
	std	r11, VCPU_GPR(R11)(r9)
950 951
	ld	r3, HSTATE_SCRATCH0(r13)
	lwz	r4, HSTATE_SCRATCH1(r13)
952
	std	r3, VCPU_GPR(R12)(r9)
953
	stw	r4, VCPU_CR(r9)
954 955 956 957
BEGIN_FTR_SECTION
	ld	r3, HSTATE_CFAR(r13)
	std	r3, VCPU_CFAR(r9)
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
958 959 960 961
BEGIN_FTR_SECTION
	ld	r4, HSTATE_PPR(r13)
	std	r4, VCPU_PPR(r9)
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980

	/* Restore R1/R2 so we can handle faults */
	ld	r1, HSTATE_HOST_R1(r13)
	ld	r2, PACATOC(r13)

	mfspr	r10, SPRN_SRR0
	mfspr	r11, SPRN_SRR1
	std	r10, VCPU_SRR0(r9)
	std	r11, VCPU_SRR1(r9)
	andi.	r0, r12, 2		/* need to read HSRR0/1? */
	beq	1f
	mfspr	r10, SPRN_HSRR0
	mfspr	r11, SPRN_HSRR1
	clrrdi	r12, r12, 2
1:	std	r10, VCPU_PC(r9)
	std	r11, VCPU_MSR(r9)

	GET_SCRATCH0(r3)
	mflr	r4
981
	std	r3, VCPU_GPR(R13)(r9)
982 983 984 985
	std	r4, VCPU_LR(r9)

	stw	r12,VCPU_TRAP(r9)

986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002
	/* Save HEIR (HV emulation assist reg) in last_inst
	   if this is an HEI (HV emulation interrupt, e40) */
	li	r3,KVM_INST_FETCH_FAILED
	cmpwi	r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
	bne	11f
	mfspr	r3,SPRN_HEIR
11:	stw	r3,VCPU_LAST_INST(r9)

	/* these are volatile across C function calls */
	mfctr	r3
	mfxer	r4
	std	r3, VCPU_CTR(r9)
	stw	r4, VCPU_XER(r9)

	/* If this is a page table miss then see if it's theirs or ours */
	cmpwi	r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
	beq	kvmppc_hdsi
1003 1004
	cmpwi	r12, BOOK3S_INTERRUPT_H_INST_STORAGE
	beq	kvmppc_hisi
1005

1006 1007 1008 1009 1010 1011 1012
	/* See if this is a leftover HDEC interrupt */
	cmpwi	r12,BOOK3S_INTERRUPT_HV_DECREMENTER
	bne	2f
	mfspr	r3,SPRN_HDEC
	cmpwi	r3,0
	bge	ignore_hdec
2:
1013
	/* See if this is an hcall we can handle in real mode */
1014 1015
	cmpwi	r12,BOOK3S_INTERRUPT_SYSCALL
	beq	hcall_try_real_mode
1016

1017 1018 1019 1020 1021 1022 1023
	/* External interrupt ? */
	cmpwi	r12, BOOK3S_INTERRUPT_EXTERNAL
	bne+	ext_interrupt_to_host

	/* External interrupt, first check for host_ipi. If this is
	 * set, we know the host wants us out so let's do it now
	 */
1024 1025 1026
	bl	kvmppc_read_intr
	cmpdi	r3, 0
	bgt	ext_interrupt_to_host
1027

1028 1029 1030 1031 1032 1033
	/* Check if any CPU is heading out to the host, if so head out too */
	ld	r5, HSTATE_KVM_VCORE(r13)
	lwz	r0, VCORE_ENTRY_EXIT(r5)
	cmpwi	r0, 0x100
	bge	ext_interrupt_to_host

1034 1035 1036
	/* Return to guest after delivering any pending interrupt */
	mr	r4, r9
	b	deliver_guest_interrupt
1037 1038

ext_interrupt_to_host:
1039

1040
guest_exit_cont:		/* r9 = vcpu, r12 = trap, r13 = paca */
1041 1042 1043 1044 1045
	/* Save more register state  */
	mfdar	r6
	mfdsisr	r7
	std	r6, VCPU_DAR(r9)
	stw	r7, VCPU_DSISR(r9)
1046
	/* don't overwrite fault_dar/fault_dsisr if HDSI */
1047 1048
	cmpwi	r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
	beq	6f
1049
	std	r6, VCPU_FAULT_DAR(r9)
1050 1051
	stw	r7, VCPU_FAULT_DSISR(r9)

1052 1053 1054 1055 1056
	/* See if it is a machine check */
	cmpwi	r12, BOOK3S_INTERRUPT_MACHINE_CHECK
	beq	machine_check_realmode
mc_cont:

1057
	/* Save guest CTRL register, set runlatch to 1 */
1058
6:	mfspr	r6,SPRN_CTRLF
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
	stw	r6,VCPU_CTRL(r9)
	andi.	r0,r6,1
	bne	4f
	ori	r6,r6,1
	mtspr	SPRN_CTRLT,r6
4:
	/* Read the guest SLB and save it away */
	lwz	r0,VCPU_SLB_NR(r9)	/* number of entries in SLB */
	mtctr	r0
	li	r6,0
	addi	r7,r9,VCPU_SLB
	li	r5,0
1:	slbmfee	r8,r6
	andis.	r0,r8,SLB_ESID_V@h
	beq	2f
	add	r8,r8,r6		/* put index in */
	slbmfev	r3,r6
	std	r8,VCPU_SLB_E(r7)
	std	r3,VCPU_SLB_V(r7)
	addi	r7,r7,VCPU_SLB_SIZE
	addi	r5,r5,1
2:	addi	r6,r6,1
	bdnz	1b
	stw	r5,VCPU_SLB_MAX(r9)

	/*
	 * Save the guest PURR/SPURR
	 */
	mfspr	r5,SPRN_PURR
	mfspr	r6,SPRN_SPURR
	ld	r7,VCPU_PURR(r9)
	ld	r8,VCPU_SPURR(r9)
	std	r5,VCPU_PURR(r9)
	std	r6,VCPU_SPURR(r9)
	subf	r5,r7,r5
	subf	r6,r8,r6

	/*
	 * Restore host PURR/SPURR and add guest times
	 * so that the time in the guest gets accounted.
	 */
	ld	r3,HSTATE_PURR(r13)
	ld	r4,HSTATE_SPURR(r13)
	add	r3,r3,r5
	add	r4,r4,r6
	mtspr	SPRN_PURR,r3
	mtspr	SPRN_SPURR,r4

1107 1108 1109 1110 1111
	/* Save DEC */
	mfspr	r5,SPRN_DEC
	mftb	r6
	extsw	r5,r5
	add	r5,r5,r6
1112 1113 1114 1115
	/* r5 is a guest timebase value here, convert to host TB */
	ld	r3,HSTATE_KVM_VCORE(r13)
	ld	r4,VCORE_TB_OFFSET(r3)
	subf	r5,r4,r5
1116 1117
	std	r5,VCPU_DEC_EXPIRES(r9)

1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133
BEGIN_FTR_SECTION
	b	8f
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
	/* Save POWER8-specific registers */
	mfspr	r5, SPRN_IAMR
	mfspr	r6, SPRN_PSPB
	mfspr	r7, SPRN_FSCR
	std	r5, VCPU_IAMR(r9)
	stw	r6, VCPU_PSPB(r9)
	std	r7, VCPU_FSCR(r9)
	mfspr	r5, SPRN_IC
	mfspr	r6, SPRN_VTB
	mfspr	r7, SPRN_TAR
	std	r5, VCPU_IC(r9)
	std	r6, VCPU_VTB(r9)
	std	r7, VCPU_TAR(r9)
1134
	mfspr	r8, SPRN_EBBHR
1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
	std	r8, VCPU_EBBHR(r9)
	mfspr	r5, SPRN_EBBRR
	mfspr	r6, SPRN_BESCR
	mfspr	r7, SPRN_CSIGR
	mfspr	r8, SPRN_TACR
	std	r5, VCPU_EBBRR(r9)
	std	r6, VCPU_BESCR(r9)
	std	r7, VCPU_CSIGR(r9)
	std	r8, VCPU_TACR(r9)
	mfspr	r5, SPRN_TCSCR
	mfspr	r6, SPRN_ACOP
	mfspr	r7, SPRN_PID
	mfspr	r8, SPRN_WORT
	std	r5, VCPU_TCSCR(r9)
	std	r6, VCPU_ACOP(r9)
	stw	r7, VCPU_GUEST_PID(r9)
	std	r8, VCPU_WORT(r9)
8:

1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200
	/* Save and reset AMR and UAMOR before turning on the MMU */
	mfspr	r5,SPRN_AMR
	mfspr	r6,SPRN_UAMOR
	std	r5,VCPU_AMR(r9)
	std	r6,VCPU_UAMOR(r9)
	li	r6,0
	mtspr	SPRN_AMR,r6

	/* Switch DSCR back to host value */
	mfspr	r8, SPRN_DSCR
	ld	r7, HSTATE_DSCR(r13)
	std	r8, VCPU_DSCR(r9)
	mtspr	SPRN_DSCR, r7

	/* Save non-volatile GPRs */
	std	r14, VCPU_GPR(R14)(r9)
	std	r15, VCPU_GPR(R15)(r9)
	std	r16, VCPU_GPR(R16)(r9)
	std	r17, VCPU_GPR(R17)(r9)
	std	r18, VCPU_GPR(R18)(r9)
	std	r19, VCPU_GPR(R19)(r9)
	std	r20, VCPU_GPR(R20)(r9)
	std	r21, VCPU_GPR(R21)(r9)
	std	r22, VCPU_GPR(R22)(r9)
	std	r23, VCPU_GPR(R23)(r9)
	std	r24, VCPU_GPR(R24)(r9)
	std	r25, VCPU_GPR(R25)(r9)
	std	r26, VCPU_GPR(R26)(r9)
	std	r27, VCPU_GPR(R27)(r9)
	std	r28, VCPU_GPR(R28)(r9)
	std	r29, VCPU_GPR(R29)(r9)
	std	r30, VCPU_GPR(R30)(r9)
	std	r31, VCPU_GPR(R31)(r9)

	/* Save SPRGs */
	mfspr	r3, SPRN_SPRG0
	mfspr	r4, SPRN_SPRG1
	mfspr	r5, SPRN_SPRG2
	mfspr	r6, SPRN_SPRG3
	std	r3, VCPU_SPRG0(r9)
	std	r4, VCPU_SPRG1(r9)
	std	r5, VCPU_SPRG2(r9)
	std	r6, VCPU_SPRG3(r9)

	/* save FP state */
	mr	r3, r9
	bl	kvmppc_save_fp
1201

1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BEGIN_FTR_SECTION
	b	2f
END_FTR_SECTION_IFCLR(CPU_FTR_TM)
	/* Turn on TM. */
	mfmsr	r8
	li	r0, 1
	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
	mtmsrd	r8

	ld	r5, VCPU_MSR(r9)
	rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
	beq	1f	/* TM not active in guest. */

	li	r3, TM_CAUSE_KVM_RESCHED

	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
	li	r5, 0
	mtmsrd	r5, 1

	/* All GPRs are volatile at this point. */
	TRECLAIM(R3)

	/* Temporarily store r13 and r9 so we have some regs to play with */
	SET_SCRATCH0(r13)
	GET_PACA(r13)
	std	r9, PACATMSCRATCH(r13)
	ld	r9, HSTATE_KVM_VCPU(r13)

	/* Get a few more GPRs free. */
	std	r29, VCPU_GPRS_TM(29)(r9)
	std	r30, VCPU_GPRS_TM(30)(r9)
	std	r31, VCPU_GPRS_TM(31)(r9)

	/* Save away PPR and DSCR soon so don't run with user values. */
	mfspr	r31, SPRN_PPR
	HMT_MEDIUM
	mfspr	r30, SPRN_DSCR
	ld	r29, HSTATE_DSCR(r13)
	mtspr	SPRN_DSCR, r29

	/* Save all but r9, r13 & r29-r31 */
	reg = 0
	.rept	29
	.if (reg != 9) && (reg != 13)
	std	reg, VCPU_GPRS_TM(reg)(r9)
	.endif
	reg = reg + 1
	.endr
	/* ... now save r13 */
	GET_SCRATCH0(r4)
	std	r4, VCPU_GPRS_TM(13)(r9)
	/* ... and save r9 */
	ld	r4, PACATMSCRATCH(r13)
	std	r4, VCPU_GPRS_TM(9)(r9)

	/* Reload stack pointer and TOC. */
	ld	r1, HSTATE_HOST_R1(r13)
	ld	r2, PACATOC(r13)

	/* Set MSR RI now we have r1 and r13 back. */
	li	r5, MSR_RI
	mtmsrd	r5, 1

	/* Save away checkpinted SPRs. */
	std	r31, VCPU_PPR_TM(r9)
	std	r30, VCPU_DSCR_TM(r9)
	mflr	r5
	mfcr	r6
	mfctr	r7
	mfspr	r8, SPRN_AMR
	mfspr	r10, SPRN_TAR
	std	r5, VCPU_LR_TM(r9)
	stw	r6, VCPU_CR_TM(r9)
	std	r7, VCPU_CTR_TM(r9)
	std	r8, VCPU_AMR_TM(r9)
	std	r10, VCPU_TAR_TM(r9)

	/* Restore r12 as trap number. */
	lwz	r12, VCPU_TRAP(r9)

	/* Save FP/VSX. */
	addi	r3, r9, VCPU_FPRS_TM
1285
	bl	store_fp_state
1286
	addi	r3, r9, VCPU_VRS_TM
1287
	bl	store_vr_state
1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305
	mfspr	r6, SPRN_VRSAVE
	stw	r6, VCPU_VRSAVE_TM(r9)
1:
	/*
	 * We need to save these SPRs after the treclaim so that the software
	 * error code is recorded correctly in the TEXASR.  Also the user may
	 * change these outside of a transaction, so they must always be
	 * context switched.
	 */
	mfspr	r5, SPRN_TFHAR
	mfspr	r6, SPRN_TFIAR
	mfspr	r7, SPRN_TEXASR
	std	r5, VCPU_TFHAR(r9)
	std	r6, VCPU_TFIAR(r9)
	std	r7, VCPU_TEXASR(r9)
2:
#endif

1306 1307 1308 1309
	/* Increment yield count if they have a VPA */
	ld	r8, VCPU_VPA(r9)	/* do they have a VPA? */
	cmpdi	r8, 0
	beq	25f
1310 1311
	li	r4, LPPACA_YIELDCOUNT
	LWZX_BE	r3, r8, r4
1312
	addi	r3, r3, 1
1313
	STWX_BE	r3, r8, r4
1314 1315 1316 1317 1318
	li	r3, 1
	stb	r3, VCPU_VPA_DIRTY(r9)
25:
	/* Save PMU registers if requested */
	/* r8 and cr0.eq are live here */
1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342
BEGIN_FTR_SECTION
	/*
	 * POWER8 seems to have a hardware bug where setting
	 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
	 * when some counters are already negative doesn't seem
	 * to cause a performance monitor alert (and hence interrupt).
	 * The effect of this is that when saving the PMU state,
	 * if there is no PMU alert pending when we read MMCR0
	 * before freezing the counters, but one becomes pending
	 * before we read the counters, we lose it.
	 * To work around this, we need a way to freeze the counters
	 * before reading MMCR0.  Normally, freezing the counters
	 * is done by writing MMCR0 (to set MMCR0[FC]) which
	 * unavoidably writes MMCR0[PMA0] as well.  On POWER8,
	 * we can also freeze the counters using MMCR2, by writing
	 * 1s to all the counter freeze condition bits (there are
	 * 9 bits each for 6 counters).
	 */
	li	r3, -1			/* set all freeze bits */
	clrrdi	r3, r3, 10
	mfspr	r10, SPRN_MMCR2
	mtspr	SPRN_MMCR2, r3
	isync
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1343 1344 1345 1346 1347
	li	r3, 1
	sldi	r3, r3, 31		/* MMCR0_FC (freeze counters) bit */
	mfspr	r4, SPRN_MMCR0		/* save MMCR0 */
	mtspr	SPRN_MMCR0, r3		/* freeze all counters, disable ints */
	mfspr	r6, SPRN_MMCRA
1348
	/* Clear MMCRA in order to disable SDAR updates */
1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363
	li	r7, 0
	mtspr	SPRN_MMCRA, r7
	isync
	beq	21f			/* if no VPA, save PMU stuff anyway */
	lbz	r7, LPPACA_PMCINUSE(r8)
	cmpwi	r7, 0			/* did they ask for PMU stuff to be saved? */
	bne	21f
	std	r3, VCPU_MMCR(r9)	/* if not, set saved MMCR0 to FC */
	b	22f
21:	mfspr	r5, SPRN_MMCR1
	mfspr	r7, SPRN_SIAR
	mfspr	r8, SPRN_SDAR
	std	r4, VCPU_MMCR(r9)
	std	r5, VCPU_MMCR + 8(r9)
	std	r6, VCPU_MMCR + 16(r9)
1364 1365 1366
BEGIN_FTR_SECTION
	std	r10, VCPU_MMCR + 24(r9)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380
	std	r7, VCPU_SIAR(r9)
	std	r8, VCPU_SDAR(r9)
	mfspr	r3, SPRN_PMC1
	mfspr	r4, SPRN_PMC2
	mfspr	r5, SPRN_PMC3
	mfspr	r6, SPRN_PMC4
	mfspr	r7, SPRN_PMC5
	mfspr	r8, SPRN_PMC6
	stw	r3, VCPU_PMC(r9)
	stw	r4, VCPU_PMC + 4(r9)
	stw	r5, VCPU_PMC + 8(r9)
	stw	r6, VCPU_PMC + 12(r9)
	stw	r7, VCPU_PMC + 16(r9)
	stw	r8, VCPU_PMC + 20(r9)
1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392
BEGIN_FTR_SECTION
	mfspr	r5, SPRN_SIER
	mfspr	r6, SPRN_SPMC1
	mfspr	r7, SPRN_SPMC2
	mfspr	r8, SPRN_MMCRS
	std	r5, VCPU_SIER(r9)
	stw	r6, VCPU_PMC + 24(r9)
	stw	r7, VCPU_PMC + 28(r9)
	std	r8, VCPU_MMCR + 32(r9)
	lis	r4, 0x8000
	mtspr	SPRN_MMCRS, r4
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1393
22:
1394 1395 1396 1397 1398 1399
	/* Clear out SLB */
	li	r5,0
	slbmte	r5,r5
	slbia
	ptesync

1400
hdec_soon:			/* r12 = trap, r13 = paca */
1401
	/*
1402
	 * POWER7/POWER8 guest -> host partition switch code.
1403 1404 1405
	 * We don't have to lock against tlbies but we do
	 * have to coordinate the hardware threads.
	 */
1406 1407 1408 1409 1410 1411 1412 1413
	/* Increment the threads-exiting-guest count in the 0xff00
	   bits of vcore->entry_exit_count */
	ld	r5,HSTATE_KVM_VCORE(r13)
	addi	r6,r5,VCORE_ENTRY_EXIT
41:	lwarx	r3,0,r6
	addi	r0,r3,0x100
	stwcx.	r0,0,r6
	bne	41b
1414
	isync		/* order stwcx. vs. reading napping_threads */
1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427

	/*
	 * At this point we have an interrupt that we have to pass
	 * up to the kernel or qemu; we can't handle it in real mode.
	 * Thus we have to do a partition switch, so we have to
	 * collect the other threads, if we are the first thread
	 * to take an interrupt.  To do this, we set the HDEC to 0,
	 * which causes an HDEC interrupt in all threads within 2ns
	 * because the HDEC register is shared between all 4 threads.
	 * However, we don't need to bother if this is an HDEC
	 * interrupt, since the other threads will already be on their
	 * way here in that case.
	 */
1428 1429
	cmpwi	r3,0x100	/* Are we the first here? */
	bge	43f
1430 1431 1432 1433 1434
	cmpwi	r12,BOOK3S_INTERRUPT_HV_DECREMENTER
	beq	40f
	li	r0,0
	mtspr	SPRN_HDEC,r0
40:
1435 1436 1437 1438 1439
	/*
	 * Send an IPI to any napping threads, since an HDEC interrupt
	 * doesn't wake CPUs up from nap.
	 */
	lwz	r3,VCORE_NAPPING_THREADS(r5)
1440
	lbz	r4,HSTATE_PTID(r13)
1441
	li	r0,1
1442
	sld	r0,r0,r4
1443 1444
	andc.	r3,r3,r0		/* no sense IPI'ing ourselves */
	beq	43f
1445 1446
	/* Order entry/exit update vs. IPIs */
	sync
1447 1448 1449 1450 1451 1452
	mulli	r4,r4,PACA_SIZE		/* get paca for thread 0 */
	subf	r6,r4,r13
42:	andi.	r0,r3,1
	beq	44f
	ld	r8,HSTATE_XICS_PHYS(r6)	/* get thread's XICS reg addr */
	li	r0,IPI_PRIORITY
1453
	li	r7,XICS_MFRR
1454 1455 1456 1457
	stbcix	r0,r7,r8		/* trigger the IPI */
44:	srdi.	r3,r3,1
	addi	r6,r6,PACA_SIZE
	bne	42b
1458

1459
secondary_too_late:
1460
	/* Secondary threads wait for primary to do partition switch */
1461 1462 1463
43:	ld	r5,HSTATE_KVM_VCORE(r13)
	ld	r4,VCORE_KVM(r5)	/* pointer to struct kvm */
	lbz	r3,HSTATE_PTID(r13)
1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481
	cmpwi	r3,0
	beq	15f
	HMT_LOW
13:	lbz	r3,VCORE_IN_GUEST(r5)
	cmpwi	r3,0
	bne	13b
	HMT_MEDIUM
	b	16f

	/* Primary thread waits for all the secondaries to exit guest */
15:	lwz	r3,VCORE_ENTRY_EXIT(r5)
	srwi	r0,r3,8
	clrldi	r3,r3,56
	cmpw	r3,r0
	bne	15b
	isync

	/* Primary thread switches back to host partition */
1482 1483 1484 1485 1486 1487 1488 1489
	ld	r6,KVM_HOST_SDR1(r4)
	lwz	r7,KVM_HOST_LPID(r4)
	li	r8,LPID_RSVD		/* switch to reserved LPID */
	mtspr	SPRN_LPID,r8
	ptesync
	mtspr	SPRN_SDR1,r6		/* switch to partition page table */
	mtspr	SPRN_LPID,r7
	isync
1490

1491 1492 1493 1494 1495 1496 1497 1498 1499
BEGIN_FTR_SECTION
	/* DPDES is shared between threads */
	mfspr	r7, SPRN_DPDES
	std	r7, VCORE_DPDES(r5)
	/* clear DPDES so we don't get guest doorbells in the host */
	li	r8, 0
	mtspr	SPRN_DPDES, r8
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)

1500 1501 1502 1503
	/* Subtract timebase offset from timebase */
	ld	r8,VCORE_TB_OFFSET(r5)
	cmpdi	r8,0
	beq	17f
1504
	mftb	r6			/* current guest timebase */
1505 1506 1507 1508 1509 1510 1511 1512 1513 1514
	subf	r8,r8,r6
	mtspr	SPRN_TBU40,r8		/* update upper 40 bits */
	mftb	r7			/* check if lower 24 bits overflowed */
	clrldi	r6,r6,40
	clrldi	r7,r7,40
	cmpld	r7,r6
	bge	17f
	addis	r8,r8,0x100		/* if so, increment upper 40 bits */
	mtspr	SPRN_TBU40,r8

1515 1516 1517 1518 1519 1520 1521
	/* Reset PCR */
17:	ld	r0, VCORE_PCR(r5)
	cmpdi	r0, 0
	beq	18f
	li	r0, 0
	mtspr	SPRN_PCR, r0
18:
1522
	/* Signal secondary CPUs to continue */
1523
	stb	r0,VCORE_IN_GUEST(r5)
1524 1525 1526
	lis	r8,0x7fff		/* MAX_INT@h */
	mtspr	SPRN_HDEC,r8

1527
16:	ld	r8,KVM_HOST_LPCR(r4)
1528 1529 1530 1531
	mtspr	SPRN_LPCR,r8
	isync

	/* load host SLB entries */
1532
	ld	r8,PACA_SLBSHADOWPTR(r13)
1533 1534

	.rept	SLB_NUM_BOLTED
1535 1536 1537 1538
	li	r3, SLBSHADOW_SAVEAREA
	LDX_BE	r5, r8, r3
	addi	r3, r3, 8
	LDX_BE	r6, r8, r3
1539 1540 1541 1542 1543 1544
	andis.	r7,r5,SLB_ESID_V@h
	beq	1f
	slbmte	r6,r5
1:	addi	r8,r8,16
	.endr

1545 1546 1547 1548
	/* Unset guest mode */
	li	r0, KVM_GUEST_MODE_NONE
	stb	r0, HSTATE_IN_GUEST(r13)

1549 1550 1551 1552
	ld	r0, 112+PPC_LR_STKOFF(r1)
	addi	r1, r1, 112
	mtlr	r0
	blr
1553

1554 1555 1556 1557 1558 1559 1560 1561 1562 1563
/*
 * Check whether an HDSI is an HPTE not found fault or something else.
 * If it is an HPTE not found fault that is due to the guest accessing
 * a page that they have mapped but which we have paged out, then
 * we continue on with the guest exit path.  In all other cases,
 * reflect the HDSI to the guest as a DSI.
 */
kvmppc_hdsi:
	mfspr	r4, SPRN_HDAR
	mfspr	r6, SPRN_HDSISR
1564 1565
	/* HPTE not found fault or protection fault? */
	andis.	r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
1566 1567 1568 1569
	beq	1f			/* if not, send it to the guest */
	andi.	r0, r11, MSR_DR		/* data relocation enabled? */
	beq	3f
	clrrdi	r0, r4, 28
1570
	PPC_SLBFEE_DOT(R5, R0)		/* if so, look up SLB */
1571 1572 1573 1574 1575 1576
	bne	1f			/* if no SLB entry found */
4:	std	r4, VCPU_FAULT_DAR(r9)
	stw	r6, VCPU_FAULT_DSISR(r9)

	/* Search the hash table. */
	mr	r3, r9			/* vcpu pointer */
1577
	li	r7, 1			/* data fault */
1578
	bl	kvmppc_hpte_hv_fault
1579 1580 1581 1582 1583 1584 1585
	ld	r9, HSTATE_KVM_VCPU(r13)
	ld	r10, VCPU_PC(r9)
	ld	r11, VCPU_MSR(r9)
	li	r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
	cmpdi	r3, 0			/* retry the instruction */
	beq	6f
	cmpdi	r3, -1			/* handle in kernel mode */
1586
	beq	guest_exit_cont
1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597
	cmpdi	r3, -2			/* MMIO emulation; need instr word */
	beq	2f

	/* Synthesize a DSI for the guest */
	ld	r4, VCPU_FAULT_DAR(r9)
	mr	r6, r3
1:	mtspr	SPRN_DAR, r4
	mtspr	SPRN_DSISR, r6
	mtspr	SPRN_SRR0, r10
	mtspr	SPRN_SRR1, r11
	li	r10, BOOK3S_INTERRUPT_DATA_STORAGE
1598
	bl	kvmppc_msr_interrupt
1599
fast_interrupt_c_return:
1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629
6:	ld	r7, VCPU_CTR(r9)
	lwz	r8, VCPU_XER(r9)
	mtctr	r7
	mtxer	r8
	mr	r4, r9
	b	fast_guest_return

3:	ld	r5, VCPU_KVM(r9)	/* not relocated, use VRMA */
	ld	r5, KVM_VRMA_SLB_V(r5)
	b	4b

	/* If this is for emulated MMIO, load the instruction word */
2:	li	r8, KVM_INST_FETCH_FAILED	/* In case lwz faults */

	/* Set guest mode to 'jump over instruction' so if lwz faults
	 * we'll just continue at the next IP. */
	li	r0, KVM_GUEST_MODE_SKIP
	stb	r0, HSTATE_IN_GUEST(r13)

	/* Do the access with MSR:DR enabled */
	mfmsr	r3
	ori	r4, r3, MSR_DR		/* Enable paging for data */
	mtmsrd	r4
	lwz	r8, 0(r10)
	mtmsrd	r3

	/* Store the result */
	stw	r8, VCPU_LAST_INST(r9)

	/* Unset guest mode. */
1630
	li	r0, KVM_GUEST_MODE_HOST_HV
1631
	stb	r0, HSTATE_IN_GUEST(r13)
1632
	b	guest_exit_cont
1633

1634 1635 1636 1637 1638 1639 1640 1641 1642 1643
/*
 * Similarly for an HISI, reflect it to the guest as an ISI unless
 * it is an HPTE not found fault for a page that we have paged out.
 */
kvmppc_hisi:
	andis.	r0, r11, SRR1_ISI_NOPT@h
	beq	1f
	andi.	r0, r11, MSR_IR		/* instruction relocation enabled? */
	beq	3f
	clrrdi	r0, r10, 28
1644
	PPC_SLBFEE_DOT(R5, R0)		/* if so, look up SLB */
1645 1646 1647 1648 1649 1650 1651
	bne	1f			/* if no SLB entry found */
4:
	/* Search the hash table. */
	mr	r3, r9			/* vcpu pointer */
	mr	r4, r10
	mr	r6, r11
	li	r7, 0			/* instruction fault */
1652
	bl	kvmppc_hpte_hv_fault
1653 1654 1655 1656 1657
	ld	r9, HSTATE_KVM_VCPU(r13)
	ld	r10, VCPU_PC(r9)
	ld	r11, VCPU_MSR(r9)
	li	r12, BOOK3S_INTERRUPT_H_INST_STORAGE
	cmpdi	r3, 0			/* retry the instruction */
1658
	beq	fast_interrupt_c_return
1659
	cmpdi	r3, -1			/* handle in kernel mode */
1660
	beq	guest_exit_cont
1661 1662 1663 1664 1665 1666

	/* Synthesize an ISI for the guest */
	mr	r11, r3
1:	mtspr	SPRN_SRR0, r10
	mtspr	SPRN_SRR1, r11
	li	r10, BOOK3S_INTERRUPT_INST_STORAGE
1667
	bl	kvmppc_msr_interrupt
1668
	b	fast_interrupt_c_return
1669 1670 1671 1672 1673

3:	ld	r6, VCPU_KVM(r9)	/* not relocated, use VRMA */
	ld	r5, KVM_VRMA_SLB_V(r6)
	b	4b

1674 1675 1676 1677 1678 1679 1680 1681
/*
 * Try to handle an hcall in real mode.
 * Returns to the guest if we handle it, or continues on up to
 * the kernel if we can't (i.e. if we don't have a handler for
 * it, or if the handler returns H_TOO_HARD).
 */
	.globl	hcall_try_real_mode
hcall_try_real_mode:
1682
	ld	r3,VCPU_GPR(R3)(r9)
1683
	andi.	r0,r11,MSR_PR
1684 1685
	/* sc 1 from userspace - reflect to guest syscall */
	bne	sc_1_fast_return
1686 1687
	clrrdi	r3,r3,2
	cmpldi	r3,hcall_real_table_end - hcall_real_table
1688
	bge	guest_exit_cont
1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699
	/* See if this hcall is enabled for in-kernel handling */
	ld	r4, VCPU_KVM(r9)
	srdi	r0, r3, 8	/* r0 = (r3 / 4) >> 6 */
	sldi	r0, r0, 3	/* index into kvm->arch.enabled_hcalls[] */
	add	r4, r4, r0
	ld	r0, KVM_ENABLED_HCALLS(r4)
	rlwinm	r4, r3, 32-2, 0x3f	/* r4 = (r3 / 4) & 0x3f */
	srd	r0, r0, r4
	andi.	r0, r0, 1
	beq	guest_exit_cont
	/* Get pointer to handler, if any, and call it */
1700
	LOAD_REG_ADDR(r4, hcall_real_table)
1701
	lwax	r3,r3,r4
1702
	cmpwi	r3,0
1703
	beq	guest_exit_cont
1704 1705
	add	r12,r3,r4
	mtctr	r12
1706
	mr	r3,r9		/* get vcpu pointer */
1707
	ld	r4,VCPU_GPR(R4)(r9)
1708 1709 1710 1711
	bctrl
	cmpdi	r3,H_TOO_HARD
	beq	hcall_real_fallback
	ld	r4,HSTATE_KVM_VCPU(r13)
1712
	std	r3,VCPU_GPR(R3)(r4)
1713 1714 1715 1716
	ld	r10,VCPU_PC(r4)
	ld	r11,VCPU_MSR(r4)
	b	fast_guest_return

1717 1718 1719 1720
sc_1_fast_return:
	mtspr	SPRN_SRR0,r10
	mtspr	SPRN_SRR1,r11
	li	r10, BOOK3S_INTERRUPT_SYSCALL
1721
	bl	kvmppc_msr_interrupt
1722 1723 1724
	mr	r4,r9
	b	fast_guest_return

1725 1726 1727 1728 1729 1730 1731
	/* We've attempted a real mode hcall, but it's punted it back
	 * to userspace.  We need to restore some clobbered volatiles
	 * before resuming the pass-it-to-qemu path */
hcall_real_fallback:
	li	r12,BOOK3S_INTERRUPT_SYSCALL
	ld	r9, HSTATE_KVM_VCPU(r13)

1732
	b	guest_exit_cont
1733 1734 1735 1736

	.globl	hcall_real_table
hcall_real_table:
	.long	0		/* 0 - unused */
1737 1738 1739
	.long	DOTSYM(kvmppc_h_remove) - hcall_real_table
	.long	DOTSYM(kvmppc_h_enter) - hcall_real_table
	.long	DOTSYM(kvmppc_h_read) - hcall_real_table
1740 1741
	.long	0		/* 0x10 - H_CLEAR_MOD */
	.long	0		/* 0x14 - H_CLEAR_REF */
1742 1743 1744
	.long	DOTSYM(kvmppc_h_protect) - hcall_real_table
	.long	DOTSYM(kvmppc_h_get_tce) - hcall_real_table
	.long	DOTSYM(kvmppc_h_put_tce) - hcall_real_table
1745
	.long	0		/* 0x24 - H_SET_SPRG0 */
1746
	.long	DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760
	.long	0		/* 0x2c */
	.long	0		/* 0x30 */
	.long	0		/* 0x34 */
	.long	0		/* 0x38 */
	.long	0		/* 0x3c */
	.long	0		/* 0x40 */
	.long	0		/* 0x44 */
	.long	0		/* 0x48 */
	.long	0		/* 0x4c */
	.long	0		/* 0x50 */
	.long	0		/* 0x54 */
	.long	0		/* 0x58 */
	.long	0		/* 0x5c */
	.long	0		/* 0x60 */
1761
#ifdef CONFIG_KVM_XICS
1762 1763 1764
	.long	DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
	.long	DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
	.long	DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
1765
	.long	0		/* 0x70 - H_IPOLL */
1766
	.long	DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
1767 1768 1769 1770 1771 1772 1773
#else
	.long	0		/* 0x64 - H_EOI */
	.long	0		/* 0x68 - H_CPPR */
	.long	0		/* 0x6c - H_IPI */
	.long	0		/* 0x70 - H_IPOLL */
	.long	0		/* 0x74 - H_XIRR */
#endif
1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799
	.long	0		/* 0x78 */
	.long	0		/* 0x7c */
	.long	0		/* 0x80 */
	.long	0		/* 0x84 */
	.long	0		/* 0x88 */
	.long	0		/* 0x8c */
	.long	0		/* 0x90 */
	.long	0		/* 0x94 */
	.long	0		/* 0x98 */
	.long	0		/* 0x9c */
	.long	0		/* 0xa0 */
	.long	0		/* 0xa4 */
	.long	0		/* 0xa8 */
	.long	0		/* 0xac */
	.long	0		/* 0xb0 */
	.long	0		/* 0xb4 */
	.long	0		/* 0xb8 */
	.long	0		/* 0xbc */
	.long	0		/* 0xc0 */
	.long	0		/* 0xc4 */
	.long	0		/* 0xc8 */
	.long	0		/* 0xcc */
	.long	0		/* 0xd0 */
	.long	0		/* 0xd4 */
	.long	0		/* 0xd8 */
	.long	0		/* 0xdc */
1800
	.long	DOTSYM(kvmppc_h_cede) - hcall_real_table
1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816
	.long	0		/* 0xe4 */
	.long	0		/* 0xe8 */
	.long	0		/* 0xec */
	.long	0		/* 0xf0 */
	.long	0		/* 0xf4 */
	.long	0		/* 0xf8 */
	.long	0		/* 0xfc */
	.long	0		/* 0x100 */
	.long	0		/* 0x104 */
	.long	0		/* 0x108 */
	.long	0		/* 0x10c */
	.long	0		/* 0x110 */
	.long	0		/* 0x114 */
	.long	0		/* 0x118 */
	.long	0		/* 0x11c */
	.long	0		/* 0x120 */
1817
	.long	DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
1818 1819 1820
	.long	0		/* 0x128 */
	.long	0		/* 0x12c */
	.long	0		/* 0x130 */
1821
	.long	DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
1822
	.globl	hcall_real_table_end
1823 1824
hcall_real_table_end:

1825 1826 1827 1828
ignore_hdec:
	mr	r4,r9
	b	fast_guest_return

1829 1830 1831 1832 1833 1834 1835 1836 1837
_GLOBAL(kvmppc_h_set_xdabr)
	andi.	r0, r5, DABRX_USER | DABRX_KERNEL
	beq	6f
	li	r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
	andc.	r0, r5, r0
	beq	3f
6:	li	r3, H_PARAMETER
	blr

1838
_GLOBAL(kvmppc_h_set_dabr)
1839 1840
	li	r5, DABRX_USER | DABRX_KERNEL
3:
1841 1842 1843
BEGIN_FTR_SECTION
	b	2f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1844
	std	r4,VCPU_DABR(r3)
1845 1846
	stw	r5, VCPU_DABRX(r3)
	mtspr	SPRN_DABRX, r5
1847 1848 1849 1850 1851 1852
	/* Work around P7 bug where DABR can get corrupted on mtspr */
1:	mtspr	SPRN_DABR,r4
	mfspr	r5, SPRN_DABR
	cmpd	r4, r5
	bne	1b
	isync
1853 1854 1855
	li	r3,0
	blr

1856 1857 1858 1859 1860 1861 1862 1863 1864
	/* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
2:	rlwimi	r5, r4, 5, DAWRX_DR | DAWRX_DW
	rlwimi	r5, r4, 1, DAWRX_WT
	clrrdi	r4, r4, 3
	std	r4, VCPU_DAWR(r3)
	std	r5, VCPU_DAWRX(r3)
	mtspr	SPRN_DAWR, r4
	mtspr	SPRN_DAWRX, r5
	li	r3, 0
1865 1866
	blr

1867 1868 1869 1870 1871 1872 1873 1874
_GLOBAL(kvmppc_h_cede)
	ori	r11,r11,MSR_EE
	std	r11,VCPU_MSR(r3)
	li	r0,1
	stb	r0,VCPU_CEDED(r3)
	sync			/* order setting ceded vs. testing prodded */
	lbz	r5,VCPU_PRODDED(r3)
	cmpwi	r5,0
1875
	bne	kvm_cede_prodded
1876 1877 1878
	li	r0,0		/* set trap to 0 to say hcall is handled */
	stw	r0,VCPU_TRAP(r3)
	li	r0,H_SUCCESS
1879
	std	r0,VCPU_GPR(R3)(r3)
1880 1881 1882 1883 1884 1885 1886

	/*
	 * Set our bit in the bitmask of napping threads unless all the
	 * other threads are already napping, in which case we send this
	 * up to the host.
	 */
	ld	r5,HSTATE_KVM_VCORE(r13)
1887
	lbz	r6,HSTATE_PTID(r13)
1888 1889 1890 1891 1892 1893 1894
	lwz	r8,VCORE_ENTRY_EXIT(r5)
	clrldi	r8,r8,56
	li	r0,1
	sld	r0,r0,r6
	addi	r6,r5,VCORE_NAPPING_THREADS
31:	lwarx	r4,0,r6
	or	r4,r4,r0
1895
	PPC_POPCNTW(R7,R4)
1896
	cmpw	r7,r8
1897
	bge	kvm_cede_exit
1898 1899
	stwcx.	r4,0,r6
	bne	31b
1900 1901
	/* order napping_threads update vs testing entry_exit_count */
	isync
1902
	li	r0,NAPPING_CEDE
1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914
	stb	r0,HSTATE_NAPPING(r13)
	lwz	r7,VCORE_ENTRY_EXIT(r5)
	cmpwi	r7,0x100
	bge	33f		/* another thread already exiting */

/*
 * Although not specifically required by the architecture, POWER7
 * preserves the following registers in nap mode, even if an SMT mode
 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
 */
	/* Save non-volatile GPRs */
1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932
	std	r14, VCPU_GPR(R14)(r3)
	std	r15, VCPU_GPR(R15)(r3)
	std	r16, VCPU_GPR(R16)(r3)
	std	r17, VCPU_GPR(R17)(r3)
	std	r18, VCPU_GPR(R18)(r3)
	std	r19, VCPU_GPR(R19)(r3)
	std	r20, VCPU_GPR(R20)(r3)
	std	r21, VCPU_GPR(R21)(r3)
	std	r22, VCPU_GPR(R22)(r3)
	std	r23, VCPU_GPR(R23)(r3)
	std	r24, VCPU_GPR(R24)(r3)
	std	r25, VCPU_GPR(R25)(r3)
	std	r26, VCPU_GPR(R26)(r3)
	std	r27, VCPU_GPR(R27)(r3)
	std	r28, VCPU_GPR(R28)(r3)
	std	r29, VCPU_GPR(R29)(r3)
	std	r30, VCPU_GPR(R30)(r3)
	std	r31, VCPU_GPR(R31)(r3)
1933 1934

	/* save FP state */
1935
	bl	kvmppc_save_fp
1936 1937

	/*
1938
	 * Take a nap until a decrementer or external or doobell interrupt
1939 1940
	 * occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the
	 * runlatch bit before napping.
1941
	 */
1942 1943 1944 1945
	mfspr	r2, SPRN_CTRLF
	clrrdi	r2, r2, 1
	mtspr	SPRN_CTRLT, r2

1946 1947
	li	r0,1
	stb	r0,HSTATE_HWTHREAD_REQ(r13)
1948 1949
	mfspr	r5,SPRN_LPCR
	ori	r5,r5,LPCR_PECE0 | LPCR_PECE1
1950 1951 1952
BEGIN_FTR_SECTION
	oris	r5,r5,LPCR_PECEDP@h
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963
	mtspr	SPRN_LPCR,r5
	isync
	li	r0, 0
	std	r0, HSTATE_SCRATCH0(r13)
	ptesync
	ld	r0, HSTATE_SCRATCH0(r13)
1:	cmpd	r0, r0
	bne	1b
	nap
	b	.

1964 1965 1966 1967 1968
33:	mr	r4, r3
	li	r3, 0
	li	r12, 0
	b	34f

1969
kvm_end_cede:
1970 1971 1972
	/* get vcpu pointer */
	ld	r4, HSTATE_KVM_VCPU(r13)

1973 1974 1975 1976 1977 1978 1979
	/* Woken by external or decrementer interrupt */
	ld	r1, HSTATE_HOST_R1(r13)

	/* load up FP state */
	bl	kvmppc_load_fp

	/* Load NV GPRS */
1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997
	ld	r14, VCPU_GPR(R14)(r4)
	ld	r15, VCPU_GPR(R15)(r4)
	ld	r16, VCPU_GPR(R16)(r4)
	ld	r17, VCPU_GPR(R17)(r4)
	ld	r18, VCPU_GPR(R18)(r4)
	ld	r19, VCPU_GPR(R19)(r4)
	ld	r20, VCPU_GPR(R20)(r4)
	ld	r21, VCPU_GPR(R21)(r4)
	ld	r22, VCPU_GPR(R22)(r4)
	ld	r23, VCPU_GPR(R23)(r4)
	ld	r24, VCPU_GPR(R24)(r4)
	ld	r25, VCPU_GPR(R25)(r4)
	ld	r26, VCPU_GPR(R26)(r4)
	ld	r27, VCPU_GPR(R27)(r4)
	ld	r28, VCPU_GPR(R28)(r4)
	ld	r29, VCPU_GPR(R29)(r4)
	ld	r30, VCPU_GPR(R30)(r4)
	ld	r31, VCPU_GPR(R31)(r4)
1998 1999 2000
 
	/* Check the wake reason in SRR1 to see why we got here */
	bl	kvmppc_check_wake_reason
2001 2002

	/* clear our bit in vcore->napping_threads */
2003 2004
34:	ld	r5,HSTATE_KVM_VCORE(r13)
	lbz	r7,HSTATE_PTID(r13)
2005
	li	r0,1
2006
	sld	r0,r0,r7
2007 2008 2009 2010 2011 2012 2013 2014
	addi	r6,r5,VCORE_NAPPING_THREADS
32:	lwarx	r7,0,r6
	andc	r7,r7,r0
	stwcx.	r7,0,r6
	bne	32b
	li	r0,0
	stb	r0,HSTATE_NAPPING(r13)

2015 2016
	/* See if the wake reason means we need to exit */
	stw	r12, VCPU_TRAP(r4)
2017
	mr	r9, r4
2018 2019
	cmpdi	r3, 0
	bgt	guest_exit_cont
2020

2021 2022 2023
	/* see if any other thread is already exiting */
	lwz	r0,VCORE_ENTRY_EXIT(r5)
	cmpwi	r0,0x100
2024
	bge	guest_exit_cont
2025

2026
	b	kvmppc_cede_reentry	/* if not go back to guest */
2027 2028

	/* cede when already previously prodded case */
2029 2030
kvm_cede_prodded:
	li	r0,0
2031 2032 2033 2034 2035 2036 2037
	stb	r0,VCPU_PRODDED(r3)
	sync			/* order testing prodded vs. clearing ceded */
	stb	r0,VCPU_CEDED(r3)
	li	r3,H_SUCCESS
	blr

	/* we've ceded but we want to give control to the host */
2038
kvm_cede_exit:
2039
	b	hcall_real_fallback
2040

2041 2042 2043
	/* Try to handle a machine check in real mode */
machine_check_realmode:
	mr	r3, r9		/* get vcpu pointer */
2044
	bl	kvmppc_realmode_machine_check
2045
	nop
2046
	cmpdi	r3, 0		/* Did we handle MCE ? */
2047 2048
	ld	r9, HSTATE_KVM_VCPU(r13)
	li	r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062
	/*
	 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through
	 * machine check interrupt (set HSRR0 to 0x200). And for handled
	 * errors (no-fatal), just go back to guest execution with current
	 * HSRR0 instead of exiting guest. This new approach will inject
	 * machine check to guest for fatal error causing guest to crash.
	 *
	 * The old code used to return to host for unhandled errors which
	 * was causing guest to hang with soft lockups inside guest and
	 * makes it difficult to recover guest instance.
	 */
	ld	r10, VCPU_PC(r9)
	ld	r11, VCPU_MSR(r9)
	bne	2f	/* Continue guest execution. */
2063 2064
	/* If not, deliver a machine check.  SRR0/1 are already set */
	li	r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2065
	ld	r11, VCPU_MSR(r9)
2066
	bl	kvmppc_msr_interrupt
2067
2:	b	fast_interrupt_c_return
2068

2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080
/*
 * Check the reason we woke from nap, and take appropriate action.
 * Returns:
 *	0 if nothing needs to be done
 *	1 if something happened that needs to be handled by the host
 *	-1 if there was a guest wakeup (IPI)
 *
 * Also sets r12 to the interrupt vector for any interrupt that needs
 * to be handled now by the host (0x500 for external interrupt), or zero.
 */
kvmppc_check_wake_reason:
	mfspr	r6, SPRN_SRR1
2081 2082 2083 2084 2085 2086
BEGIN_FTR_SECTION
	rlwinm	r6, r6, 45-31, 0xf	/* extract wake reason field (P8) */
FTR_SECTION_ELSE
	rlwinm	r6, r6, 45-31, 0xe	/* P7 wake reason field is 3 bits */
ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
	cmpwi	r6, 8			/* was it an external interrupt? */
2087 2088 2089 2090 2091 2092
	li	r12, BOOK3S_INTERRUPT_EXTERNAL
	beq	kvmppc_read_intr	/* if so, see what it was */
	li	r3, 0
	li	r12, 0
	cmpwi	r6, 6			/* was it the decrementer? */
	beq	0f
2093 2094 2095
BEGIN_FTR_SECTION
	cmpwi	r6, 5			/* privileged doorbell? */
	beq	0f
2096 2097
	cmpwi	r6, 3			/* hypervisor doorbell? */
	beq	3f
2098
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2099 2100 2101
	li	r3, 1			/* anything else, return 1 */
0:	blr

2102 2103 2104 2105 2106
	/* hypervisor doorbell */
3:	li	r12, BOOK3S_INTERRUPT_H_DOORBELL
	li	r3, 1
	blr

2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119
/*
 * Determine what sort of external interrupt is pending (if any).
 * Returns:
 *	0 if no interrupt is pending
 *	1 if an interrupt is pending that needs to be handled by the host
 *	-1 if there was a guest wakeup IPI (which has now been cleared)
 */
kvmppc_read_intr:
	/* see if a host IPI is pending */
	li	r3, 1
	lbz	r0, HSTATE_HOST_IPI(r13)
	cmpwi	r0, 0
	bne	1f
2120

2121 2122
	/* Now read the interrupt from the ICP */
	ld	r6, HSTATE_XICS_PHYS(r13)
2123
	li	r7, XICS_XIRR
2124 2125 2126
	cmpdi	r6, 0
	beq-	1f
	lwzcix	r0, r6, r7
2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138
	/*
	 * Save XIRR for later. Since we get in in reverse endian on LE
	 * systems, save it byte reversed and fetch it back in host endian.
	 */
	li	r3, HSTATE_SAVED_XIRR
	STWX_BE	r0, r3, r13
#ifdef __LITTLE_ENDIAN__
	lwz	r3, HSTATE_SAVED_XIRR(r13)
#else
	mr	r3, r0
#endif
	rlwinm.	r3, r3, 0, 0xffffff
2139
	sync
2140
	beq	1f			/* if nothing pending in the ICP */
2141

2142 2143 2144 2145 2146 2147 2148 2149
	/* We found something in the ICP...
	 *
	 * If it's not an IPI, stash it in the PACA and return to
	 * the host, we don't (yet) handle directing real external
	 * interrupts directly to the guest
	 */
	cmpwi	r3, XICS_IPI		/* if there is, is it an IPI? */
	bne	42f
2150

2151 2152 2153 2154 2155 2156
	/* It's an IPI, clear the MFRR and EOI it */
	li	r3, 0xff
	li	r8, XICS_MFRR
	stbcix	r3, r6, r8		/* clear the IPI */
	stwcix	r0, r6, r7		/* EOI it */
	sync
2157

2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169
	/* We need to re-check host IPI now in case it got set in the
	 * meantime. If it's clear, we bounce the interrupt to the
	 * guest
	 */
	lbz	r0, HSTATE_HOST_IPI(r13)
	cmpwi	r0, 0
	bne-	43f

	/* OK, it's an IPI for us */
	li	r3, -1
1:	blr

2170 2171
42:	/* It's not an IPI and it's for the host. We saved a copy of XIRR in
	 * the PACA earlier, it will be picked up by the host ICP driver
2172
	 */
2173
	li	r3, 1
2174 2175 2176 2177 2178 2179
	b	1b

43:	/* We raced with the host, we need to resend that IPI, bummer */
	li	r0, IPI_PRIORITY
	stbcix	r0, r6, r8		/* set the IPI */
	sync
2180
	li	r3, 1
2181
	b	1b
2182

2183 2184 2185
/*
 * Save away FP, VMX and VSX registers.
 * r3 = vcpu pointer
2186 2187
 * N.B. r30 and r31 are volatile across this function,
 * thus it is not callable from C.
2188
 */
2189 2190 2191
kvmppc_save_fp:
	mflr	r30
	mr	r31,r3
2192 2193
	mfmsr	r5
	ori	r8,r5,MSR_FP
2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
	oris	r8,r8,MSR_VEC@h
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
	oris	r8,r8,MSR_VSX@h
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
	mtmsrd	r8
2205
	addi	r3,r3,VCPU_FPRS
2206
	bl	store_fp_state
2207 2208
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
2209
	addi	r3,r31,VCPU_VRS
2210
	bl	store_vr_state
2211 2212 2213
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
	mfspr	r6,SPRN_VRSAVE
2214
	stw	r6,VCPU_VRSAVE(r31)
2215
	mtlr	r30
2216 2217 2218 2219 2220
	blr

/*
 * Load up FP, VMX and VSX registers
 * r4 = vcpu pointer
2221 2222
 * N.B. r30 and r31 are volatile across this function,
 * thus it is not callable from C.
2223 2224
 */
kvmppc_load_fp:
2225 2226
	mflr	r30
	mr	r31,r4
2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239
	mfmsr	r9
	ori	r8,r9,MSR_FP
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
	oris	r8,r8,MSR_VEC@h
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
	oris	r8,r8,MSR_VSX@h
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
	mtmsrd	r8
2240
	addi	r3,r4,VCPU_FPRS
2241
	bl	load_fp_state
2242 2243
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
2244
	addi	r3,r31,VCPU_VRS
2245
	bl	load_vr_state
2246 2247
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
2248
	lwz	r7,VCPU_VRSAVE(r31)
2249
	mtspr	SPRN_VRSAVE,r7
2250 2251
	mtlr	r30
	mr	r4,r31
2252
	blr
2253 2254 2255 2256 2257 2258 2259 2260

/*
 * We come here if we get any exception or interrupt while we are
 * executing host real mode code while in guest MMU context.
 * For now just spin, but we should do something better.
 */
kvmppc_bad_host_intr:
	b	.
2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277

/*
 * This mimics the MSR transition on IRQ delivery.  The new guest MSR is taken
 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
 *   r11 has the guest MSR value (in/out)
 *   r9 has a vcpu pointer (in)
 *   r0 is used as a scratch register
 */
kvmppc_msr_interrupt:
	rldicl	r0, r11, 64 - MSR_TS_S_LG, 62
	cmpwi	r0, 2 /* Check if we are in transactional state..  */
	ld	r11, VCPU_INTR_MSR(r9)
	bne	1f
	/* ... if transactional, change to suspended */
	li	r0, 1
1:	rldimi	r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
	blr
2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295

/*
 * This works around a hardware bug on POWER8E processors, where
 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
 * performance monitor interrupt.  Instead, when we need to have
 * an interrupt pending, we have to arrange for a counter to overflow.
 */
kvmppc_fix_pmao:
	li	r3, 0
	mtspr	SPRN_MMCR2, r3
	lis	r3, (MMCR0_PMXE | MMCR0_FCECE)@h
	ori	r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
	mtspr	SPRN_MMCR0, r3
	lis	r3, 0x7fff
	ori	r3, r3, 0xffff
	mtspr	SPRN_PMC6, r3
	isync
	blr