tsb.S 12.2 KB
Newer Older
1 2 3 4 5
/* tsb.S: Sparc64 TSB table handling.
 *
 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
 */

6

7
#include <asm/tsb.h>
8
#include <asm/hypervisor.h>
9 10 11
#include <asm/page.h>
#include <asm/cpudata.h>
#include <asm/mmu.h>
12 13 14 15 16 17 18 19 20 21 22 23 24 25

	.text
	.align	32

	/* Invoked from TLB miss handler, we are in the
	 * MMU global registers and they are setup like
	 * this:
	 *
	 * %g1: TSB entry pointer
	 * %g2:	available temporary
	 * %g3:	FAULT_CODE_{D,I}TLB
	 * %g4:	available temporary
	 * %g5:	available temporary
	 * %g6: TAG TARGET
26 27
	 * %g7:	available temporary, will be loaded by us with
	 *      the physical address base of the linux page
28 29 30 31 32
	 *      tables for the current address space
	 */
tsb_miss_dtlb:
	mov		TLB_TAG_ACCESS, %g4
	ba,pt		%xcc, tsb_miss_page_table_walk
33
	 ldxa		[%g4] ASI_DMMU, %g4
34 35 36 37

tsb_miss_itlb:
	mov		TLB_TAG_ACCESS, %g4
	ba,pt		%xcc, tsb_miss_page_table_walk
38
	 ldxa		[%g4] ASI_IMMU, %g4
39

40
	/* At this point we have:
41
	 * %g1 --	PAGE_SIZE TSB entry address
42 43
	 * %g3 --	FAULT_CODE_{D,I}TLB
	 * %g4 --	missing virtual address
44
	 * %g6 --	TAG TARGET (vaddr >> 22)
45
	 */
46
tsb_miss_page_table_walk:
47
	TRAP_LOAD_TRAP_BLOCK(%g7, %g5)
48

49 50 51
	/* Before committing to a full page table walk,
	 * check the huge page TSB.
	 */
52
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98

661:	ldx		[%g7 + TRAP_PER_CPU_TSB_HUGE], %g5
	nop
	.section	.sun4v_2insn_patch, "ax"
	.word		661b
	mov		SCRATCHPAD_UTSBREG2, %g5
	ldxa		[%g5] ASI_SCRATCHPAD, %g5
	.previous

	cmp		%g5, -1
	be,pt		%xcc, 80f
	 nop

	/* We need an aligned pair of registers containing 2 values
	 * which can be easily rematerialized.  %g6 and %g7 foot the
	 * bill just nicely.  We'll save %g6 away into %g2 for the
	 * huge page TSB TAG comparison.
	 *
	 * Perform a huge page TSB lookup.
	 */
	mov		%g6, %g2
	and		%g5, 0x7, %g6
	mov		512, %g7
	andn		%g5, 0x7, %g5
	sllx		%g7, %g6, %g7
	srlx		%g4, HPAGE_SHIFT, %g6
	sub		%g7, 1, %g7
	and		%g6, %g7, %g6
	sllx		%g6, 4, %g6
	add		%g5, %g6, %g5

	TSB_LOAD_QUAD(%g5, %g6)
	cmp		%g6, %g2
	be,a,pt		%xcc, tsb_tlb_reload
	 mov		%g7, %g5

	/* No match, remember the huge page TSB entry address,
	 * and restore %g6 and %g7.
	 */
	TRAP_LOAD_TRAP_BLOCK(%g7, %g6)
	srlx		%g4, 22, %g6
80:	stx		%g5, [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP]

#endif

	ldx		[%g7 + TRAP_PER_CPU_PGD_PADDR], %g7
99

100 101 102
	/* At this point we have:
	 * %g1 --	TSB entry address
	 * %g3 --	FAULT_CODE_{D,I}TLB
103
	 * %g4 --	missing virtual address
104
	 * %g6 --	TAG TARGET (vaddr >> 22)
105 106 107 108
	 * %g7 --	page table physical address
	 *
	 * We know that both the base PAGE_SIZE TSB and the HPAGE_SIZE
	 * TSB both lack a matching entry.
109
	 */
110 111
tsb_miss_page_table_walk_sun4v_fastpath:
	USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
112

113
	/* Valid PTE is now in %g5.  */
114

115
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
661:	sethi		%uhi(_PAGE_SZALL_4U), %g7
	sllx		%g7, 32, %g7
	.section	.sun4v_2insn_patch, "ax"
	.word		661b
	mov		_PAGE_SZALL_4V, %g7
	nop
	.previous

	and		%g5, %g7, %g2

661:	sethi		%uhi(_PAGE_SZHUGE_4U), %g7
	sllx		%g7, 32, %g7
	.section	.sun4v_2insn_patch, "ax"
	.word		661b
	mov		_PAGE_SZHUGE_4V, %g7
	nop
	.previous

	cmp		%g2, %g7
	bne,pt		%xcc, 60f
	 nop

	/* It is a huge page, use huge page TSB entry address we
	 * calculated above.
	 */
	TRAP_LOAD_TRAP_BLOCK(%g7, %g2)
	ldx		[%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g2
	cmp		%g2, -1
	movne		%xcc, %g2, %g1
60:
#endif
147

148 149 150 151 152 153 154 155
	/* At this point we have:
	 * %g1 --	TSB entry address
	 * %g3 --	FAULT_CODE_{D,I}TLB
	 * %g5 --	valid PTE
	 * %g6 --	TAG TARGET (vaddr >> 22)
	 */
tsb_reload:
	TSB_LOCK_TAG(%g1, %g2, %g7)
156 157 158 159 160 161 162 163 164
	TSB_WRITE(%g1, %g5, %g6)

	/* Finally, load TLB and return from trap.  */
tsb_tlb_reload:
	cmp		%g3, FAULT_CODE_DTLB
	bne,pn		%xcc, tsb_itlb_load
	 nop

tsb_dtlb_load:
165 166

661:	stxa		%g5, [%g0] ASI_DTLB_DATA_IN
167
	retry
168
	.section	.sun4v_2insn_patch, "ax"
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
	.word		661b
	nop
	nop
	.previous

	/* For sun4v the ASI_DTLB_DATA_IN store and the retry
	 * instruction get nop'd out and we get here to branch
	 * to the sun4v tlb load code.  The registers are setup
	 * as follows:
	 *
	 * %g4: vaddr
	 * %g5: PTE
	 * %g6:	TAG
	 *
	 * The sun4v TLB load wants the PTE in %g3 so we fix that
	 * up here.
	 */
	ba,pt		%xcc, sun4v_dtlb_load
	 mov		%g5, %g3
188 189

tsb_itlb_load:
190
	/* Executable bit must be set.  */
191 192 193
661:	sethi		%hi(_PAGE_EXEC_4U), %g4
	andcc		%g5, %g4, %g0
	.section	.sun4v_2insn_patch, "ax"
194 195
	.word		661b
	andcc		%g5, _PAGE_EXEC_4V, %g0
196
	nop
197 198 199 200
	.previous

	be,pn		%xcc, tsb_do_fault
	 nop
201 202

661:	stxa		%g5, [%g0] ASI_ITLB_DATA_IN
203
	retry
204
	.section	.sun4v_2insn_patch, "ax"
205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
	.word		661b
	nop
	nop
	.previous

	/* For sun4v the ASI_ITLB_DATA_IN store and the retry
	 * instruction get nop'd out and we get here to branch
	 * to the sun4v tlb load code.  The registers are setup
	 * as follows:
	 *
	 * %g4: vaddr
	 * %g5: PTE
	 * %g6:	TAG
	 *
	 * The sun4v TLB load wants the PTE in %g3 so we fix that
	 * up here.
	 */
	ba,pt		%xcc, sun4v_itlb_load
	 mov		%g5, %g3
224 225 226 227 228 229 230 231

	/* No valid entry in the page tables, do full fault
	 * processing.
	 */

	.globl		tsb_do_fault
tsb_do_fault:
	cmp		%g3, FAULT_CODE_DTLB
232 233 234

661:	rdpr		%pstate, %g5
	wrpr		%g5, PSTATE_AG | PSTATE_MG, %pstate
235
	.section	.sun4v_2insn_patch, "ax"
236
	.word		661b
237
	SET_GL(1)
238
	ldxa		[%g0] ASI_SCRATCHPAD, %g4
239 240
	.previous

241
	bne,pn		%xcc, tsb_do_itlb_fault
242
	 nop
243 244

tsb_do_dtlb_fault:
245 246 247 248
	rdpr	%tl, %g3
	cmp	%g3, 1

661:	mov	TLB_TAG_ACCESS, %g4
249
	ldxa	[%g4] ASI_DMMU, %g5
250
	.section .sun4v_2insn_patch, "ax"
251
	.word	661b
252
	ldx	[%g4 + HV_FAULT_D_ADDR_OFFSET], %g5
253 254 255
	nop
	.previous

256 257 258 259 260 261 262 263 264 265 266 267
	be,pt	%xcc, sparc64_realfault_common
	 mov	FAULT_CODE_DTLB, %g4
	ba,pt	%xcc, winfix_trampoline
	 nop

tsb_do_itlb_fault:
	rdpr	%tpc, %g5
	ba,pt	%xcc, sparc64_realfault_common
	 mov	FAULT_CODE_ITLB, %g4

	.globl	sparc64_realfault_common
sparc64_realfault_common:
268 269 270
	/* fault code in %g4, fault address in %g5, etrap will
	 * preserve these two values in %l4 and %l5 respectively
	 */
271 272
	ba,pt	%xcc, etrap			! Save trap state
1:	 rd	%pc, %g7			! ...
273 274
	stb	%l4, [%g6 + TI_FAULT_CODE]	! Save fault code
	stx	%l5, [%g6 + TI_FAULT_ADDR]	! Save fault address
275 276
	call	do_sparc64_fault		! Call fault handler
	 add	%sp, PTREGS_OFF, %o0		! Compute pt_regs arg
277
	ba,pt	%xcc, rtrap			! Restore cpu state
278 279 280 281 282 283 284 285
	 nop					! Delay slot (fill me)

winfix_trampoline:
	rdpr	%tpc, %g3			! Prepare winfixup TNPC
	or	%g3, 0x7c, %g3			! Compute branch offset
	wrpr	%g3, %tnpc			! Write it into TNPC
	done					! Trap return

286 287
	/* Insert an entry into the TSB.
	 *
288
	 * %o0: TSB entry pointer (virt or phys address)
289 290 291 292
	 * %o1: tag
	 * %o2:	pte
	 */
	.align	32
293 294
	.globl	__tsb_insert
__tsb_insert:
295 296 297 298 299 300 301
	rdpr	%pstate, %o5
	wrpr	%o5, PSTATE_IE, %pstate
	TSB_LOCK_TAG(%o0, %g2, %g3)
	TSB_WRITE(%o0, %o2, %o1)
	wrpr	%o5, %pstate
	retl
	 nop
302
	.size	__tsb_insert, .-__tsb_insert
303

304 305 306 307 308 309 310 311
	/* Flush the given TSB entry if it has the matching
	 * tag.
	 *
	 * %o0: TSB entry pointer (virt or phys address)
	 * %o1:	tag
	 */
	.align	32
	.globl	tsb_flush
312
	.type	tsb_flush,#function
313 314 315 316 317 318
tsb_flush:
	sethi	%hi(TSB_TAG_LOCK_HIGH), %g2
1:	TSB_LOAD_TAG(%o0, %g1)
	srlx	%g1, 32, %o3
	andcc	%o3, %g2, %g0
	bne,pn	%icc, 1b
319
	 nop
320
	cmp	%g1, %o1
321
	mov	1, %o3
322
	bne,pt	%xcc, 2f
323
	 sllx	%o3, TSB_TAG_INVALID_BIT, %o3
324 325 326 327 328
	TSB_CAS_TAG(%o0, %g1, %o3)
	cmp	%g1, %o3
	bne,pn	%xcc, 1b
	 nop
2:	retl
329
	 nop
330
	.size	tsb_flush, .-tsb_flush
331

332 333 334 335
	/* Reload MMU related context switch state at
	 * schedule() time.
	 *
	 * %o0: page table physical address
336 337 338
	 * %o1:	TSB base config pointer
	 * %o2:	TSB huge config pointer, or NULL if none
	 * %o3:	Hypervisor TSB descriptor physical address
339 340 341 342
	 *
	 * We have to run this whole thing with interrupts
	 * disabled so that the current cpu doesn't change
	 * due to preemption.
343
	 */
344
	.align	32
345
	.globl	__tsb_context_switch
346
	.type	__tsb_context_switch,#function
347
__tsb_context_switch:
348 349 350 351
	rdpr	%pstate, %g1
	wrpr	%g1, PSTATE_IE, %pstate

	TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
352

353
	stx	%o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
354

355 356 357 358 359 360 361 362 363 364 365 366
	ldx	[%o1 + TSB_CONFIG_REG_VAL], %o0
	brz,pt	%o2, 1f
	 mov	-1, %g3

	ldx	[%o2 + TSB_CONFIG_REG_VAL], %g3

1:	stx	%g3, [%g2 + TRAP_PER_CPU_TSB_HUGE]

	sethi	%hi(tlb_type), %g2
	lduw	[%g2 + %lo(tlb_type)], %g2
	cmp	%g2, 3
	bne,pt	%icc, 50f
367 368 369
	 nop

	/* Hypervisor TSB switch. */
370 371 372 373 374 375 376 377
	mov	SCRATCHPAD_UTSBREG1, %o5
	stxa	%o0, [%o5] ASI_SCRATCHPAD
	mov	SCRATCHPAD_UTSBREG2, %o5
	stxa	%g3, [%o5] ASI_SCRATCHPAD

	mov	2, %o0
	cmp	%g3, -1
	move	%xcc, 1, %o0
378

379
	mov	HV_FAST_MMU_TSB_CTXNON0, %o5
380
	mov	%o3, %o1
381 382
	ta	HV_FAST_TRAP

383
	/* Finish up.  */
384
	ba,pt	%xcc, 9f
385
	 nop
386

387
	/* SUN4U TSB switch.  */
388 389
50:	mov	TSB_REG, %o5
	stxa	%o0, [%o5] ASI_DMMU
390
	membar	#Sync
391
	stxa	%o0, [%o5] ASI_IMMU
392 393
	membar	#Sync

394 395 396
2:	ldx	[%o1 + TSB_CONFIG_MAP_VADDR], %o4
	brz	%o4, 9f
	 ldx	[%o1 + TSB_CONFIG_MAP_PTE], %o5
397

398
	sethi	%hi(sparc64_highest_unlocked_tlb_ent), %g2
399
	mov	TLB_TAG_ACCESS, %g3
400
	lduw	[%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2
401
	stxa	%o4, [%g3] ASI_DMMU
402 403
	membar	#Sync
	sllx	%g2, 3, %g2
404 405 406 407 408 409 410 411 412 413 414 415 416
	stxa	%o5, [%g2] ASI_DTLB_DATA_ACCESS
	membar	#Sync

	brz,pt	%o2, 9f
	 nop

	ldx	[%o2 + TSB_CONFIG_MAP_VADDR], %o4
	ldx	[%o2 + TSB_CONFIG_MAP_PTE], %o5
	mov	TLB_TAG_ACCESS, %g3
	stxa	%o4, [%g3] ASI_DMMU
	membar	#Sync
	sub	%g2, (1 << 3), %g2
	stxa	%o5, [%g2] ASI_DTLB_DATA_ACCESS
417
	membar	#Sync
418

419
9:
420
	wrpr	%g1, %pstate
421 422

	retl
423
	 nop
424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
	.size	__tsb_context_switch, .-__tsb_context_switch

#define TSB_PASS_BITS	((1 << TSB_TAG_LOCK_BIT) | \
			 (1 << TSB_TAG_INVALID_BIT))

	.align	32
	.globl	copy_tsb
	.type	copy_tsb,#function
copy_tsb:		/* %o0=old_tsb_base, %o1=old_tsb_size
			 * %o2=new_tsb_base, %o3=new_tsb_size
			 */
	sethi		%uhi(TSB_PASS_BITS), %g7
	srlx		%o3, 4, %o3
	add		%o0, %o1, %g1	/* end of old tsb */
	sllx		%g7, 32, %g7
	sub		%o3, 1, %o3	/* %o3 == new tsb hash mask */

661:	prefetcha	[%o0] ASI_N, #one_read
	.section	.tsb_phys_patch, "ax"
	.word		661b
	prefetcha	[%o0] ASI_PHYS_USE_EC, #one_read
	.previous

90:	andcc		%o0, (64 - 1), %g0
	bne		1f
	 add		%o0, 64, %o5

661:	prefetcha	[%o5] ASI_N, #one_read
	.section	.tsb_phys_patch, "ax"
	.word		661b
	prefetcha	[%o5] ASI_PHYS_USE_EC, #one_read
	.previous

1:	TSB_LOAD_QUAD(%o0, %g2)		/* %g2/%g3 == TSB entry */
	andcc		%g2, %g7, %g0	/* LOCK or INVALID set? */
	bne,pn		%xcc, 80f	/* Skip it */
	 sllx		%g2, 22, %o4	/* TAG --> VADDR */

	/* This can definitely be computed faster... */
	srlx		%o0, 4, %o5	/* Build index */
	and		%o5, 511, %o5	/* Mask index */
	sllx		%o5, PAGE_SHIFT, %o5 /* Put into vaddr position */
	or		%o4, %o5, %o4	/* Full VADDR. */
	srlx		%o4, PAGE_SHIFT, %o4 /* Shift down to create index */
	and		%o4, %o3, %o4	/* Mask with new_tsb_nents-1 */
	sllx		%o4, 4, %o4	/* Shift back up into tsb ent offset */
	TSB_STORE(%o2 + %o4, %g2)	/* Store TAG */
	add		%o4, 0x8, %o4	/* Advance to TTE */
	TSB_STORE(%o2 + %o4, %g3)	/* Store TTE */

80:	add		%o0, 16, %o0
	cmp		%o0, %g1
	bne,pt		%xcc, 90b
	 nop

	retl
480
	 nop
481
	.size		copy_tsb, .-copy_tsb
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547

	/* Set the invalid bit in all TSB entries.  */
	.align		32
	.globl		tsb_init
	.type		tsb_init,#function
tsb_init:		/* %o0 = TSB vaddr, %o1 = size in bytes */
	prefetch	[%o0 + 0x000], #n_writes
	mov		1, %g1
	prefetch	[%o0 + 0x040], #n_writes
	sllx		%g1, TSB_TAG_INVALID_BIT, %g1
	prefetch	[%o0 + 0x080], #n_writes
1:	prefetch	[%o0 + 0x0c0], #n_writes
	stx		%g1, [%o0 + 0x00]
	stx		%g1, [%o0 + 0x10]
	stx		%g1, [%o0 + 0x20]
	stx		%g1, [%o0 + 0x30]
	prefetch	[%o0 + 0x100], #n_writes
	stx		%g1, [%o0 + 0x40]
	stx		%g1, [%o0 + 0x50]
	stx		%g1, [%o0 + 0x60]
	stx		%g1, [%o0 + 0x70]
	prefetch	[%o0 + 0x140], #n_writes
	stx		%g1, [%o0 + 0x80]
	stx		%g1, [%o0 + 0x90]
	stx		%g1, [%o0 + 0xa0]
	stx		%g1, [%o0 + 0xb0]
	prefetch	[%o0 + 0x180], #n_writes
	stx		%g1, [%o0 + 0xc0]
	stx		%g1, [%o0 + 0xd0]
	stx		%g1, [%o0 + 0xe0]
	stx		%g1, [%o0 + 0xf0]
	subcc		%o1, 0x100, %o1
	bne,pt		%xcc, 1b
	 add		%o0, 0x100, %o0
	retl
	 nop
	nop
	nop
	.size		tsb_init, .-tsb_init

	.globl		NGtsb_init
	.type		NGtsb_init,#function
NGtsb_init:
	rd		%asi, %g2
	mov		1, %g1
	wr		%g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
	sllx		%g1, TSB_TAG_INVALID_BIT, %g1
1:	stxa		%g1, [%o0 + 0x00] %asi
	stxa		%g1, [%o0 + 0x10] %asi
	stxa		%g1, [%o0 + 0x20] %asi
	stxa		%g1, [%o0 + 0x30] %asi
	stxa		%g1, [%o0 + 0x40] %asi
	stxa		%g1, [%o0 + 0x50] %asi
	stxa		%g1, [%o0 + 0x60] %asi
	stxa		%g1, [%o0 + 0x70] %asi
	stxa		%g1, [%o0 + 0x80] %asi
	stxa		%g1, [%o0 + 0x90] %asi
	stxa		%g1, [%o0 + 0xa0] %asi
	stxa		%g1, [%o0 + 0xb0] %asi
	stxa		%g1, [%o0 + 0xc0] %asi
	stxa		%g1, [%o0 + 0xd0] %asi
	stxa		%g1, [%o0 + 0xe0] %asi
	stxa		%g1, [%o0 + 0xf0] %asi
	subcc		%o1, 0x100, %o1
	bne,pt		%xcc, 1b
	 add		%o0, 0x100, %o0
548
	membar		#Sync
549 550 551
	retl
	 wr		%g2, 0x0, %asi
	.size		NGtsb_init, .-NGtsb_init