tm.S 8.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * Derived from book3s_hv_rmhandlers.S, which is:
 *
 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
 *
 */

#include <asm/reg.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/export.h>
#include <asm/tm.h>
#include <asm/cputable.h>

#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)

/*
 * Save transactional state and TM-related registers.
29 30 31 32
 * Called with:
 * - r3 pointing to the vcpu struct
 * - r4 points to the MSR with current TS bits:
 * 	(For HV KVM, it is VCPU_MSR ; For PR KVM, it is host MSR).
33
 * This can modify all checkpointed registers, but
34
 * restores r1, r2 before exit.
35
 */
36
_GLOBAL(__kvmppc_save_tm)
37 38 39 40 41 42 43
	mflr	r0
	std	r0, PPC_LR_STKOFF(r1)

	/* Turn on TM. */
	mfmsr	r8
	li	r0, 1
	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
44 45
	ori     r8, r8, MSR_FP
	oris    r8, r8, (MSR_VEC | MSR_VSX)@h
46 47
	mtmsrd	r8

48
	rldicl. r4, r4, 64 - MSR_TS_S_LG, 62
49 50
	beq	1f	/* TM not active in guest. */

51 52
	std	r1, HSTATE_SCRATCH2(r13)
	std	r3, HSTATE_SCRATCH1(r13)
53 54 55 56 57

#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
BEGIN_FTR_SECTION
	/* Emulation of the treclaim instruction needs TEXASR before treclaim */
	mfspr	r6, SPRN_TEXASR
58
	std	r6, VCPU_ORIG_TEXASR(r3)
59 60 61 62 63 64 65
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
#endif

	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
	li	r5, 0
	mtmsrd	r5, 1

66 67
	li	r3, TM_CAUSE_KVM_RESCHED

68 69 70 71 72 73 74
	/* All GPRs are volatile at this point. */
	TRECLAIM(R3)

	/* Temporarily store r13 and r9 so we have some regs to play with */
	SET_SCRATCH0(r13)
	GET_PACA(r13)
	std	r9, PACATMSCRATCH(r13)
75
	ld	r9, HSTATE_SCRATCH1(r13)
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106

	/* Get a few more GPRs free. */
	std	r29, VCPU_GPRS_TM(29)(r9)
	std	r30, VCPU_GPRS_TM(30)(r9)
	std	r31, VCPU_GPRS_TM(31)(r9)

	/* Save away PPR and DSCR soon so don't run with user values. */
	mfspr	r31, SPRN_PPR
	HMT_MEDIUM
	mfspr	r30, SPRN_DSCR
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
	ld	r29, HSTATE_DSCR(r13)
	mtspr	SPRN_DSCR, r29
#endif

	/* Save all but r9, r13 & r29-r31 */
	reg = 0
	.rept	29
	.if (reg != 9) && (reg != 13)
	std	reg, VCPU_GPRS_TM(reg)(r9)
	.endif
	reg = reg + 1
	.endr
	/* ... now save r13 */
	GET_SCRATCH0(r4)
	std	r4, VCPU_GPRS_TM(13)(r9)
	/* ... and save r9 */
	ld	r4, PACATMSCRATCH(r13)
	std	r4, VCPU_GPRS_TM(9)(r9)

	/* Reload stack pointer and TOC. */
107
	ld	r1, HSTATE_SCRATCH2(r13)
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
	ld	r2, PACATOC(r13)

	/* Set MSR RI now we have r1 and r13 back. */
	li	r5, MSR_RI
	mtmsrd	r5, 1

	/* Save away checkpinted SPRs. */
	std	r31, VCPU_PPR_TM(r9)
	std	r30, VCPU_DSCR_TM(r9)
	mflr	r5
	mfcr	r6
	mfctr	r7
	mfspr	r8, SPRN_AMR
	mfspr	r10, SPRN_TAR
	mfxer	r11
	std	r5, VCPU_LR_TM(r9)
	stw	r6, VCPU_CR_TM(r9)
	std	r7, VCPU_CTR_TM(r9)
	std	r8, VCPU_AMR_TM(r9)
	std	r10, VCPU_TAR_TM(r9)
	std	r11, VCPU_XER_TM(r9)

	/* Restore r12 as trap number. */
	lwz	r12, VCPU_TRAP(r9)

	/* Save FP/VSX. */
	addi	r3, r9, VCPU_FPRS_TM
	bl	store_fp_state
	addi	r3, r9, VCPU_VRS_TM
	bl	store_vr_state
	mfspr	r6, SPRN_VRSAVE
	stw	r6, VCPU_VRSAVE_TM(r9)
1:
	/*
	 * We need to save these SPRs after the treclaim so that the software
	 * error code is recorded correctly in the TEXASR.  Also the user may
	 * change these outside of a transaction, so they must always be
	 * context switched.
	 */
	mfspr	r7, SPRN_TEXASR
	std	r7, VCPU_TEXASR(r9)
11:
	mfspr	r5, SPRN_TFHAR
	mfspr	r6, SPRN_TFIAR
	std	r5, VCPU_TFHAR(r9)
	std	r6, VCPU_TFIAR(r9)

	ld	r0, PPC_LR_STKOFF(r1)
	mtlr	r0
	blr

159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
/*
 * _kvmppc_save_tm_pr() is a wrapper around __kvmppc_save_tm(), so that it can
 * be invoked from C function by PR KVM only.
 */
_GLOBAL(_kvmppc_save_tm_pr)
	mflr	r5
	std	r5, PPC_LR_STKOFF(r1)
	stdu    r1, -SWITCH_FRAME_SIZE(r1)
	SAVE_NVGPRS(r1)

	/* save MSR since TM/math bits might be impacted
	 * by __kvmppc_save_tm().
	 */
	mfmsr	r5
	SAVE_GPR(5, r1)

	/* also save DSCR/CR so that it can be recovered later */
	mfspr   r6, SPRN_DSCR
	SAVE_GPR(6, r1)

	mfcr    r7
	stw     r7, _CCR(r1)

	bl	__kvmppc_save_tm

	ld      r7, _CCR(r1)
	mtcr	r7

	REST_GPR(6, r1)
	mtspr   SPRN_DSCR, r6

	/* need preserve current MSR's MSR_TS bits */
	REST_GPR(5, r1)
	mfmsr   r6
	rldicl  r6, r6, 64 - MSR_TS_S_LG, 62
	rldimi  r5, r6, MSR_TS_S_LG, 63 - MSR_TS_T_LG
	mtmsrd  r5

	REST_NVGPRS(r1)
	addi    r1, r1, SWITCH_FRAME_SIZE
	ld	r5, PPC_LR_STKOFF(r1)
	mtlr	r5
	blr

EXPORT_SYMBOL_GPL(_kvmppc_save_tm_pr);

205 206
/*
 * Restore transactional state and TM-related registers.
207 208 209 210 211
 * Called with:
 *  - r3 pointing to the vcpu struct.
 *  - r4 is the guest MSR with desired TS bits:
 * 	For HV KVM, it is VCPU_MSR
 * 	For PR KVM, it is provided by caller
212
 * This potentially modifies all checkpointed registers.
213
 * It restores r1, r2 from the PACA.
214
 */
215
_GLOBAL(__kvmppc_restore_tm)
216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
	mflr	r0
	std	r0, PPC_LR_STKOFF(r1)

	/* Turn on TM/FP/VSX/VMX so we can restore them. */
	mfmsr	r5
	li	r6, MSR_TM >> 32
	sldi	r6, r6, 32
	or	r5, r5, r6
	ori	r5, r5, MSR_FP
	oris	r5, r5, (MSR_VEC | MSR_VSX)@h
	mtmsrd	r5

	/*
	 * The user may change these outside of a transaction, so they must
	 * always be context switched.
	 */
232 233 234
	ld	r5, VCPU_TFHAR(r3)
	ld	r6, VCPU_TFIAR(r3)
	ld	r7, VCPU_TEXASR(r3)
235 236 237 238
	mtspr	SPRN_TFHAR, r5
	mtspr	SPRN_TFIAR, r6
	mtspr	SPRN_TEXASR, r7

239
	mr	r5, r4
240 241
	rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
	beqlr		/* TM not active in guest */
242
	std	r1, HSTATE_SCRATCH2(r13)
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257

	/* Make sure the failure summary is set, otherwise we'll program check
	 * when we trechkpt.  It's possible that this might have been not set
	 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
	 * host.
	 */
	oris	r7, r7, (TEXASR_FS)@h
	mtspr	SPRN_TEXASR, r7

	/*
	 * We need to load up the checkpointed state for the guest.
	 * We need to do this early as it will blow away any GPRs, VSRs and
	 * some SPRs.
	 */

258
	mr	r31, r3
259 260 261 262
	addi	r3, r31, VCPU_FPRS_TM
	bl	load_fp_state
	addi	r3, r31, VCPU_VRS_TM
	bl	load_vr_state
263 264
	mr	r3, r31
	lwz	r7, VCPU_VRSAVE_TM(r3)
265 266
	mtspr	SPRN_VRSAVE, r7

267 268 269 270 271 272
	ld	r5, VCPU_LR_TM(r3)
	lwz	r6, VCPU_CR_TM(r3)
	ld	r7, VCPU_CTR_TM(r3)
	ld	r8, VCPU_AMR_TM(r3)
	ld	r9, VCPU_TAR_TM(r3)
	ld	r10, VCPU_XER_TM(r3)
273 274 275 276 277 278 279 280 281 282 283 284
	mtlr	r5
	mtcr	r6
	mtctr	r7
	mtspr	SPRN_AMR, r8
	mtspr	SPRN_TAR, r9
	mtxer	r10

	/*
	 * Load up PPR and DSCR values but don't put them in the actual SPRs
	 * till the last moment to avoid running with userspace PPR and DSCR for
	 * too long.
	 */
285 286
	ld	r29, VCPU_DSCR_TM(r3)
	ld	r30, VCPU_PPR_TM(r3)
287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318

	std	r2, PACATMSCRATCH(r13) /* Save TOC */

	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
	li	r5, 0
	mtmsrd	r5, 1

	/* Load GPRs r0-r28 */
	reg = 0
	.rept	29
	ld	reg, VCPU_GPRS_TM(reg)(r31)
	reg = reg + 1
	.endr

	mtspr	SPRN_DSCR, r29
	mtspr	SPRN_PPR, r30

	/* Load final GPRs */
	ld	29, VCPU_GPRS_TM(29)(r31)
	ld	30, VCPU_GPRS_TM(30)(r31)
	ld	31, VCPU_GPRS_TM(31)(r31)

	/* TM checkpointed state is now setup.  All GPRs are now volatile. */
	TRECHKPT

	/* Now let's get back the state we need. */
	HMT_MEDIUM
	GET_PACA(r13)
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
	ld	r29, HSTATE_DSCR(r13)
	mtspr	SPRN_DSCR, r29
#endif
319
	ld	r1, HSTATE_SCRATCH2(r13)
320 321 322 323 324 325 326 327
	ld	r2, PACATMSCRATCH(r13)

	/* Set the MSR RI since we have our registers back. */
	li	r5, MSR_RI
	mtmsrd	r5, 1
	ld	r0, PPC_LR_STKOFF(r1)
	mtlr	r0
	blr
328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371

/*
 * _kvmppc_restore_tm_pr() is a wrapper around __kvmppc_restore_tm(), so that it
 * can be invoked from C function by PR KVM only.
 */
_GLOBAL(_kvmppc_restore_tm_pr)
	mflr	r5
	std	r5, PPC_LR_STKOFF(r1)
	stdu    r1, -SWITCH_FRAME_SIZE(r1)
	SAVE_NVGPRS(r1)

	/* save MSR to avoid TM/math bits change */
	mfmsr	r5
	SAVE_GPR(5, r1)

	/* also save DSCR/CR so that it can be recovered later */
	mfspr   r6, SPRN_DSCR
	SAVE_GPR(6, r1)

	mfcr    r7
	stw     r7, _CCR(r1)

	bl	__kvmppc_restore_tm

	ld      r7, _CCR(r1)
	mtcr	r7

	REST_GPR(6, r1)
	mtspr   SPRN_DSCR, r6

	/* need preserve current MSR's MSR_TS bits */
	REST_GPR(5, r1)
	mfmsr   r6
	rldicl  r6, r6, 64 - MSR_TS_S_LG, 62
	rldimi  r5, r6, MSR_TS_S_LG, 63 - MSR_TS_T_LG
	mtmsrd  r5

	REST_NVGPRS(r1)
	addi    r1, r1, SWITCH_FRAME_SIZE
	ld	r5, PPC_LR_STKOFF(r1)
	mtlr	r5
	blr

EXPORT_SYMBOL_GPL(_kvmppc_restore_tm_pr);
372
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */