tm.S 11.7 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Transactional memory support routines to reclaim and recheckpoint
 * transactional process state.
 *
 * Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation.
 */

#include <asm/asm-offsets.h>
#include <asm/ppc_asm.h>
#include <asm/ppc-opcode.h>
#include <asm/ptrace.h>
#include <asm/reg.h>
14
#include <asm/bug.h>
15
#include <asm/export.h>
16 17

#ifdef CONFIG_VSX
18 19
/* See fpu.S, this is borrowed from there */
#define __SAVE_32FPRS_VSRS(n,c,base)		\
20 21 22
BEGIN_FTR_SECTION				\
	b	2f;				\
END_FTR_SECTION_IFSET(CPU_FTR_VSX);		\
23
	SAVE_32FPRS(n,base);			\
24
	b	3f;				\
25
2:	SAVE_32VSRS(n,c,base);			\
26 27 28 29 30 31 32 33 34 35
3:
#define __REST_32FPRS_VSRS(n,c,base)		\
BEGIN_FTR_SECTION				\
	b	2f;				\
END_FTR_SECTION_IFSET(CPU_FTR_VSX);		\
	REST_32FPRS(n,base);			\
	b	3f;				\
2:	REST_32VSRS(n,c,base);			\
3:
#else
36 37
#define __SAVE_32FPRS_VSRS(n,c,base)	SAVE_32FPRS(n, base)
#define __REST_32FPRS_VSRS(n,c,base)	REST_32FPRS(n, base)
38
#endif
39 40
#define SAVE_32FPRS_VSRS(n,c,base) \
	__SAVE_32FPRS_VSRS(n,__REG_##c,__REG_##base)
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
#define REST_32FPRS_VSRS(n,c,base) \
	__REST_32FPRS_VSRS(n,__REG_##c,__REG_##base)

/* Stack frame offsets for local variables. */
#define TM_FRAME_L0	TM_FRAME_SIZE-16
#define TM_FRAME_L1	TM_FRAME_SIZE-8


/* In order to access the TM SPRs, TM must be enabled.  So, do so: */
_GLOBAL(tm_enable)
	mfmsr	r4
	li	r3, MSR_TM >> 32
	sldi	r3, r3, 32
	and.	r0, r4, r3
	bne	1f
	or	r4, r4, r3
	mtmsrd	r4
1:	blr
59 60 61 62 63 64 65 66 67 68
EXPORT_SYMBOL_GPL(tm_enable);

_GLOBAL(tm_disable)
	mfmsr	r4
	li	r3, MSR_TM >> 32
	sldi	r3, r3, 32
	andc	r4, r4, r3
	mtmsrd	r4
	blr
EXPORT_SYMBOL_GPL(tm_disable);
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91

_GLOBAL(tm_save_sprs)
	mfspr	r0, SPRN_TFHAR
	std	r0, THREAD_TM_TFHAR(r3)
	mfspr	r0, SPRN_TEXASR
	std	r0, THREAD_TM_TEXASR(r3)
	mfspr	r0, SPRN_TFIAR
	std	r0, THREAD_TM_TFIAR(r3)
	blr

_GLOBAL(tm_restore_sprs)
	ld	r0, THREAD_TM_TFHAR(r3)
	mtspr	SPRN_TFHAR, r0
	ld	r0, THREAD_TM_TEXASR(r3)
	mtspr	SPRN_TEXASR, r0
	ld	r0, THREAD_TM_TFIAR(r3)
	mtspr	SPRN_TFIAR, r0
	blr

	/* Passed an 8-bit failure cause as first argument. */
_GLOBAL(tm_abort)
	TABORT(R3)
	blr
92
EXPORT_SYMBOL_GPL(tm_abort);
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110

/* void tm_reclaim(struct thread_struct *thread,
 *		   uint8_t cause)
 *
 *	- Performs a full reclaim.  This destroys outstanding
 *	  transactions and updates thread->regs.tm_ckpt_* with the
 *	  original checkpointed state.  Note that thread->regs is
 *	  unchanged.
 *
 * Purpose is to both abort transactions of, and preserve the state of,
 * a transactions at a context switch. We preserve/restore both sets of process
 * state to restore them when the thread's scheduled again.  We continue in
 * userland as though nothing happened, but when the transaction is resumed
 * they will abort back to the checkpointed state we save out here.
 *
 * Call with IRQs off, stacks get all out of sync for some periods in here!
 */
_GLOBAL(tm_reclaim)
111
	mfcr	r5
112
	mflr	r0
113
	stw	r5, 8(r1)
114
	std	r0, 16(r1)
115
	std	r2, STK_GOT(r1)
116 117 118 119
	stdu	r1, -TM_FRAME_SIZE(r1)

	/* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. */

A
Anton Blanchard 已提交
120
	std	r3, STK_PARAM(R3)(r1)
121 122
	SAVE_NVGPRS(r1)

123
	/* We need to setup MSR for VSX register save instructions. */
124 125 126
	mfmsr	r14
	mr	r15, r14
	ori	r15, r15, MSR_FP
127
	li	r16, 0
128
	ori	r16, r16, MSR_EE /* IRQs hard off */
129
	andc	r15, r15, r16
130 131 132 133 134 135 136 137 138
	oris	r15, r15, MSR_VEC@h
#ifdef CONFIG_VSX
	BEGIN_FTR_SECTION
	oris	r15,r15, MSR_VSX@h
	END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
	mtmsrd	r15
	std	r14, TM_FRAME_L0(r1)

139 140 141 142 143 144 145
	/* Do sanity check on MSR to make sure we are suspended */
	li	r7, (MSR_TS_S)@higher
	srdi	r6, r14, 32
	and	r6, r6, r7
1:	tdeqi   r6, 0
	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0

146 147 148
	/* Stash the stack pointer away for use after reclaim */
	std	r1, PACAR1(r13)

149
	/* Clear MSR RI since we are about to change r1, EE is already off. */
150 151
	li	r5, 0
	mtmsrd	r5, 1
152 153 154 155 156 157 158 159

	/*
	 * BE CAREFUL HERE:
	 * At this point we can't take an SLB miss since we have MSR_RI
	 * off. Load only to/from the stack/paca which are in SLB bolted regions
	 * until we turn MSR RI back on.
	 *
	 * The moment we treclaim, ALL of our GPRs will switch
160 161 162
	 * to user register state.  (FPRs, CCR etc. also!)
	 * Use an sprg and a tm_scratch in the PACA to shuffle.
	 */
163
	TRECLAIM(R4)				/* Cause in r4 */
164 165 166 167 168 169 170 171 172 173 174 175 176 177

	/* ******************** GPRs ******************** */
	/* Stash the checkpointed r13 away in the scratch SPR and get the real
	 *  paca
	 */
	SET_SCRATCH0(r13)
	GET_PACA(r13)

	/* Stash the checkpointed r1 away in paca tm_scratch and get the real
	 * stack pointer back
	 */
	std	r1, PACATMSCRATCH(r13)
	ld	r1, PACAR1(r13)

178 179
	/* Store the PPR in r11 and reset to decent value */
	std	r11, GPR11(r1)			/* Temporary stash */
180 181 182 183 184

	/* Reset MSR RI so we can take SLB faults again */
	li	r11, MSR_RI
	mtmsrd	r11, 1

185 186 187
	mfspr	r11, SPRN_PPR
	HMT_MEDIUM

188 189 190
	/* Now get some more GPRS free */
	std	r7, GPR7(r1)			/* Temporary stash */
	std	r12, GPR12(r1)			/* ''   ''    ''   */
A
Anton Blanchard 已提交
191
	ld	r12, STK_PARAM(R3)(r1)		/* Param 0, thread_struct * */
192

193 194
	std	r11, THREAD_TM_PPR(r12)		/* Store PPR and free r11 */

195 196 197 198 199 200 201 202 203 204 205
	addi	r7, r12, PT_CKPT_REGS		/* Thread's ckpt_regs */

	/* Make r7 look like an exception frame so that we
	 * can use the neat GPRx(n) macros.  r7 is NOT a pt_regs ptr!
	 */
	subi	r7, r7, STACK_FRAME_OVERHEAD

	/* Sync the userland GPRs 2-12, 14-31 to thread->regs: */
	SAVE_GPR(0, r7)				/* user r0 */
	SAVE_GPR(2, r7)			/* user r2 */
	SAVE_4GPRS(3, r7)			/* user r3-r6 */
206 207 208
	SAVE_GPR(8, r7)				/* user r8 */
	SAVE_GPR(9, r7)				/* user r9 */
	SAVE_GPR(10, r7)			/* user r10 */
209 210
	ld	r3, PACATMSCRATCH(r13)		/* user r1 */
	ld	r4, GPR7(r1)			/* user r7 */
211 212 213
	ld	r5, GPR11(r1)			/* user r11 */
	ld	r6, GPR12(r1)			/* user r12 */
	GET_SCRATCH0(8)				/* user r13 */
214 215
	std	r3, GPR1(r7)
	std	r4, GPR7(r7)
216 217 218
	std	r5, GPR11(r7)
	std	r6, GPR12(r7)
	std	r8, GPR13(r7)
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239

	SAVE_NVGPRS(r7)				/* user r14-r31 */

	/* ******************** NIP ******************** */
	mfspr	r3, SPRN_TFHAR
	std	r3, _NIP(r7)			/* Returns to failhandler */
	/* The checkpointed NIP is ignored when rescheduling/rechkpting,
	 * but is used in signal return to 'wind back' to the abort handler.
	 */

	/* ******************** CR,LR,CCR,MSR ********** */
	mfctr	r3
	mflr	r4
	mfcr	r5
	mfxer	r6

	std	r3, _CTR(r7)
	std	r4, _LINK(r7)
	std	r5, _CCR(r7)
	std	r6, _XER(r7)

240

241
	/* ******************** TAR, DSCR ********** */
242
	mfspr	r3, SPRN_TAR
243
	mfspr	r4, SPRN_DSCR
244 245

	std	r3, THREAD_TM_TAR(r12)
246
	std	r4, THREAD_TM_DSCR(r12)
247

248 249 250 251
	/* MSR and flags:  We don't change CRs, and we don't need to alter
	 * MSR.
	 */

252 253

	/* ******************** FPR/VR/VSRs ************
254
	 * After reclaiming, capture the checkpointed FPRs/VRs.
255 256 257 258 259 260
	 *
	 * We enabled VEC/FP/VSX in the msr above, so we can execute these
	 * instructions!
	 */
	mr	r3, r12

261
	/* Altivec (VEC/VMX/VR)*/
262
	addi	r7, r3, THREAD_CKVRSTATE
263 264 265 266
	SAVE_32VRS(0, r6, r7)	/* r6 scratch, r7 transact vr state */
	mfvscr	v0
	li	r6, VRSTATE_VSCR
	stvx	v0, r7, r6
267 268

	/* VRSAVE */
269
	mfspr	r0, SPRN_VRSAVE
270
	std	r0, THREAD_CKVRSAVE(r3)
271

272
	/* Floating Point (FP) */
273
	addi	r7, r3, THREAD_CKFPSTATE
274 275 276 277 278
	SAVE_32FPRS_VSRS(0, R6, R7)	/* r6 scratch, r7 transact fp state */
	mffs    fr0
	stfd    fr0,FPSTATE_FPSCR(r7)


279 280 281 282 283 284 285 286 287 288 289
	/* TM regs, incl TEXASR -- these live in thread_struct.  Note they've
	 * been updated by the treclaim, to explain to userland the failure
	 * cause (aborted).
	 */
	mfspr	r0, SPRN_TEXASR
	mfspr	r3, SPRN_TFHAR
	mfspr	r4, SPRN_TFIAR
	std	r0, THREAD_TM_TEXASR(r12)
	std	r3, THREAD_TM_TFHAR(r12)
	std	r4, THREAD_TM_TFIAR(r12)

290
	/* AMR is checkpointed too, but is unsupported by Linux. */
291 292 293

	/* Restore original MSR/IRQ state & clear TM mode */
	ld	r14, TM_FRAME_L0(r1)		/* Orig MSR */
294

295 296 297 298 299 300 301
	li	r15, 0
	rldimi  r14, r15, MSR_TS_LG, (63-MSR_TS_LG)-1
	mtmsrd  r14

	REST_NVGPRS(r1)

	addi    r1, r1, TM_FRAME_SIZE
302
	lwz	r4, 8(r1)
303 304 305
	ld	r0, 16(r1)
	mtcr	r4
	mtlr	r0
306
	ld	r2, STK_GOT(r1)
307

308
	/* Load CPU's default DSCR */
309
	ld	r0, PACA_DSCR_DEFAULT(r13)
310 311
	mtspr	SPRN_DSCR, r0

312 313 314
	blr


M
Michael Neuling 已提交
315 316
	/* void __tm_recheckpoint(struct thread_struct *thread,
	 *			  unsigned long orig_msr)
317 318 319 320 321 322
	 *	- Restore the checkpointed register state saved by tm_reclaim
	 *	  when we switch_to a process.
	 *
	 *	Call with IRQs off, stacks get all out of sync for
	 *	some periods in here!
	 */
323
_GLOBAL(__tm_recheckpoint)
324 325
	mfcr	r5
	mflr	r0
326
	stw	r5, 8(r1)
327
	std	r0, 16(r1)
328
	std	r2, STK_GOT(r1)
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
	stdu	r1, -TM_FRAME_SIZE(r1)

	/* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD].
	 * This is used for backing up the NVGPRs:
	 */
	SAVE_NVGPRS(r1)

	/* Load complete register state from ts_ckpt* registers */

	addi	r7, r3, PT_CKPT_REGS		/* Thread's ckpt_regs */

	/* Make r7 look like an exception frame so that we
	 * can use the neat GPRx(n) macros.  r7 is now NOT a pt_regs ptr!
	 */
	subi	r7, r7, STACK_FRAME_OVERHEAD

345
	/* We need to setup MSR for FP/VMX/VSX register save instructions. */
346
	mfmsr	r6
347
	mr	r5, r6
348
	ori	r5, r5, MSR_FP
349 350 351
#ifdef CONFIG_ALTIVEC
	oris	r5, r5, MSR_VEC@h
#endif
352 353
#ifdef CONFIG_VSX
	BEGIN_FTR_SECTION
354
	oris	r5,r5, MSR_VSX@h
355 356
	END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
357
	mtmsrd	r5
358

359
#ifdef CONFIG_ALTIVEC
360 361 362 363 364
	/*
	 * FP and VEC registers: These are recheckpointed from
	 * thread.ckfp_state and thread.ckvr_state respectively. The
	 * thread.fp_state[] version holds the 'live' (transactional)
	 * and will be loaded subsequently by any FPUnavailable trap.
365
	 */
366
	addi	r8, r3, THREAD_CKVRSTATE
367
	li	r5, VRSTATE_VSCR
368 369
	lvx	v0, r8, r5
	mtvscr	v0
370
	REST_32VRS(0, r5, r8)			/* r5 scratch, r8 ptr */
371
	ld	r5, THREAD_CKVRSAVE(r3)
372
	mtspr	SPRN_VRSAVE, r5
373
#endif
374

375
	addi	r8, r3, THREAD_CKFPSTATE
376
	lfd	fr0, FPSTATE_FPSCR(r8)
377
	MTFSF_L(fr0)
378
	REST_32FPRS_VSRS(0, R4, R8)
379 380 381 382

	mtmsr	r6				/* FP/Vec off again! */

restore_gprs:
383

384 385 386 387
	/* ******************** CR,LR,CCR,MSR ********** */
	ld	r4, _CTR(r7)
	ld	r5, _LINK(r7)
	ld	r8, _XER(r7)
388

389 390 391
	mtctr	r4
	mtlr	r5
	mtxer	r8
392

393 394 395
	/* ******************** TAR ******************** */
	ld	r4, THREAD_TM_TAR(r3)
	mtspr	SPRN_TAR,	r4
396

397 398 399
	/* Load up the PPR and DSCR in GPRs only at this stage */
	ld	r5, THREAD_TM_DSCR(r3)
	ld	r6, THREAD_TM_PPR(r3)
400

401 402
	REST_GPR(0, r7)				/* GPR0 */
	REST_2GPRS(2, r7)			/* GPR2-3 */
403
	REST_GPR(4, r7)				/* GPR4 */
404 405 406 407 408
	REST_4GPRS(8, r7)			/* GPR8-11 */
	REST_2GPRS(12, r7)			/* GPR12-13 */

	REST_NVGPRS(r7)				/* GPR14-31 */

409 410 411 412 413
	/* Load up PPR and DSCR here so we don't run with user values for long
	 */
	mtspr	SPRN_DSCR, r5
	mtspr	SPRN_PPR, r6

414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
	/* Do final sanity check on TEXASR to make sure FS is set.  Do this
	 * here before we load up the userspace r1 so any bugs we hit will get
	 * a call chain */
	mfspr	r5, SPRN_TEXASR
	srdi	r5, r5, 16
	li	r6, (TEXASR_FS)@h
	and	r6, r6, r5
1:	tdeqi	r6, 0
	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0

	/* Do final sanity check on MSR to make sure we are not transactional
	 * or suspended
	 */
	mfmsr   r6
	li	r5, (MSR_TS_MASK)@higher
	srdi	r6, r6, 32
	and	r6, r6, r5
1:	tdnei   r6, 0
	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0

	/* Restore CR */
	ld	r6, _CCR(r7)
	mtcr    r6

438
	REST_GPR(6, r7)
439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462

	/*
	 * Store r1 and r5 on the stack so that we can access them
	 * after we clear MSR RI.
	 */

	REST_GPR(5, r7)
	std	r5, -8(r1)
	ld	r5, GPR1(r7)
	std	r5, -16(r1)

	REST_GPR(7, r7)

	/* Clear MSR RI since we are about to change r1. EE is already off */
	li	r5, 0
	mtmsrd	r5, 1

	/*
	 * BE CAREFUL HERE:
	 * At this point we can't take an SLB miss since we have MSR_RI
	 * off. Load only to/from the stack/paca which are in SLB bolted regions
	 * until we turn MSR RI back on.
	 */

463
	SET_SCRATCH0(r1)
464 465
	ld	r5, -8(r1)
	ld	r1, -16(r1)
466 467 468 469

	/* Commit register state as checkpointed state: */
	TRECHKPT

470 471
	HMT_MEDIUM

472 473 474 475 476 477 478 479 480 481
	/* Our transactional state has now changed.
	 *
	 * Now just get out of here.  Transactional (current) state will be
	 * updated once restore is called on the return path in the _switch-ed
	 * -to process.
	 */

	GET_PACA(r13)
	GET_SCRATCH0(r1)

482 483 484 485
	/* R1 is restored, so we are recoverable again.  EE is still off */
	li	r4, MSR_RI
	mtmsrd	r4, 1

486 487 488
	REST_NVGPRS(r1)

	addi    r1, r1, TM_FRAME_SIZE
489
	lwz	r4, 8(r1)
490 491 492
	ld	r0, 16(r1)
	mtcr	r4
	mtlr	r0
493
	ld	r2, STK_GOT(r1)
494

495
	/* Load CPU's default DSCR */
496
	ld	r0, PACA_DSCR_DEFAULT(r13)
497 498
	mtspr	SPRN_DSCR, r0

499 500 501
	blr

	/* ****************************************************************** */