ktlb.S 7.2 KB
Newer Older
1 2
/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
 *
3
 * Copyright (C) 1995, 1997, 2005, 2008 David S. Miller <davem@davemloft.net>
4 5 6
 * Copyright (C) 1996 Eddie C. Dost        (ecd@brainaid.de)
 * Copyright (C) 1996 Miguel de Icaza      (miguel@nuclecu.unam.mx)
 * Copyright (C) 1996,98,99 Jakub Jelinek  (jj@sunsite.mff.cuni.cz)
7
 */
8 9 10 11 12

#include <asm/head.h>
#include <asm/asi.h>
#include <asm/page.h>
#include <asm/pgtable.h>
13
#include <asm/tsb.h>
14 15 16 17

	.text
	.align		32

18 19 20 21 22
kvmap_itlb:
	/* g6: TAG TARGET */
	mov		TLB_TAG_ACCESS, %g4
	ldxa		[%g4] ASI_IMMU, %g4

23 24 25 26 27
	/* sun4v_itlb_miss branches here with the missing virtual
	 * address already loaded into %g4
	 */
kvmap_itlb_4v:

28 29 30 31 32 33 34 35 36 37
kvmap_itlb_nonlinear:
	/* Catch kernel NULL pointer calls.  */
	sethi		%hi(PAGE_SIZE), %g5
	cmp		%g4, %g5
	bleu,pn		%xcc, kvmap_dtlb_longpath
	 nop

	KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)

kvmap_itlb_tsb_miss:
38 39
	sethi		%hi(LOW_OBP_ADDRESS), %g5
	cmp		%g4, %g5
40
	blu,pn		%xcc, kvmap_itlb_vmalloc_addr
41 42 43
	 mov		0x1, %g5
	sllx		%g5, 32, %g5
	cmp		%g4, %g5
44
	blu,pn		%xcc, kvmap_itlb_obp
45 46
	 nop

47 48 49
kvmap_itlb_vmalloc_addr:
	KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)

50
	TSB_LOCK_TAG(%g1, %g2, %g7)
51 52 53

	/* Load and check PTE.  */
	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
54 55
	mov		1, %g7
	sllx		%g7, TSB_TAG_INVALID_BIT, %g7
56
	brgez,a,pn	%g5, kvmap_itlb_longpath
57
	 TSB_STORE(%g1, %g7)
58

59
	TSB_WRITE(%g1, %g5, %g6)
60 61 62 63

	/* fallthrough to TLB load */

kvmap_itlb_load:
64 65

661:	stxa		%g5, [%g0] ASI_ITLB_DATA_IN
66
	retry
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
	.section	.sun4v_2insn_patch, "ax"
	.word		661b
	nop
	nop
	.previous

	/* For sun4v the ASI_ITLB_DATA_IN store and the retry
	 * instruction get nop'd out and we get here to branch
	 * to the sun4v tlb load code.  The registers are setup
	 * as follows:
	 *
	 * %g4: vaddr
	 * %g5: PTE
	 * %g6:	TAG
	 *
	 * The sun4v TLB load wants the PTE in %g3 so we fix that
	 * up here.
	 */
	ba,pt		%xcc, sun4v_itlb_load
	 mov		%g5, %g3
87

88
kvmap_itlb_longpath:
89 90

661:	rdpr	%pstate, %g5
91
	wrpr	%g5, PSTATE_AG | PSTATE_MG, %pstate
92
	.section .sun4v_2insn_patch, "ax"
93
	.word	661b
94
	SET_GL(1)
95 96 97
	nop
	.previous

98 99 100 101 102 103 104
	rdpr	%tpc, %g5
	ba,pt	%xcc, sparc64_realfault_common
	 mov	FAULT_CODE_ITLB, %g4

kvmap_itlb_obp:
	OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)

105
	TSB_LOCK_TAG(%g1, %g2, %g7)
106

107
	TSB_WRITE(%g1, %g5, %g6)
108 109 110 111 112 113 114

	ba,pt		%xcc, kvmap_itlb_load
	 nop

kvmap_dtlb_obp:
	OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)

115
	TSB_LOCK_TAG(%g1, %g2, %g7)
116

117
	TSB_WRITE(%g1, %g5, %g6)
118 119 120

	ba,pt		%xcc, kvmap_dtlb_load
	 nop
121

122
	.align		32
123
kvmap_dtlb_tsb4m_load:
124 125
	TSB_LOCK_TAG(%g1, %g2, %g7)
	TSB_WRITE(%g1, %g5, %g6)
126 127 128
	ba,pt		%xcc, kvmap_dtlb_load
	 nop

129 130 131 132
kvmap_dtlb:
	/* %g6: TAG TARGET */
	mov		TLB_TAG_ACCESS, %g4
	ldxa		[%g4] ASI_DMMU, %g4
133 134 135 136 137

	/* sun4v_dtlb_miss branches here with the missing virtual
	 * address already loaded into %g4
	 */
kvmap_dtlb_4v:
138
	brgez,pn	%g4, kvmap_dtlb_nonlinear
139 140
	 nop

141 142 143 144 145 146
#ifdef CONFIG_DEBUG_PAGEALLOC
	/* Index through the base page size TSB even for linear
	 * mappings when using page allocation debugging.
	 */
	KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
#else
147 148
	/* Correct TAG_TARGET is already in %g6, check 4mb TSB.  */
	KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
149
#endif
150 151 152 153
	/* TSB entry address left in %g1, lookup linear PTE.
	 * Must preserve %g1 and %g6 (TAG).
	 */
kvmap_dtlb_tsb4m_miss:
154 155 156 157
	/* Clear the PAGE_OFFSET top virtual bits, shift
	 * down to get PFN, and make sure PFN is in range.
	 */
	sllx		%g4, 21, %g5
158

159 160
	/* Check to see if we know about valid memory at the 4MB
	 * chunk this physical address will reside within.
161
	 */
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
	srlx		%g5, 21 + 41, %g2
	brnz,pn		%g2, kvmap_dtlb_longpath
	 nop

	/* This unconditional branch and delay-slot nop gets patched
	 * by the sethi sequence once the bitmap is properly setup.
	 */
	.globl		valid_addr_bitmap_insn
valid_addr_bitmap_insn:
	ba,pt		%xcc, 2f
	 nop
	.subsection	2
	.globl		valid_addr_bitmap_patch
valid_addr_bitmap_patch:
	sethi		%hi(sparc64_valid_addr_bitmap), %g7
	or		%g7, %lo(sparc64_valid_addr_bitmap), %g7
	.previous

	srlx		%g5, 21 + 22, %g2
	srlx		%g2, 6, %g5
	and		%g2, 63, %g2
	sllx		%g5, 3, %g5
	ldx		[%g7 + %g5], %g5
	mov		1, %g7
	sllx		%g7, %g2, %g7
	andcc		%g5, %g7, %g0
	be,pn		%xcc, kvmap_dtlb_longpath

2:	 sethi		%hi(kpte_linear_bitmap), %g2
	or		%g2, %lo(kpte_linear_bitmap), %g2

	/* Get the 256MB physical address index. */
194 195 196 197 198 199 200 201 202 203 204
	sllx		%g4, 21, %g5
	mov		1, %g7
	srlx		%g5, 21 + 28, %g5

	/* Don't try this at home kids... this depends upon srlx
	 * only taking the low 6 bits of the shift count in %g5.
	 */
	sllx		%g7, %g5, %g7

	/* Divide by 64 to get the offset into the bitmask.  */
	srlx		%g5, 6, %g5
205
	sllx		%g5, 3, %g5
206 207 208 209 210 211 212 213 214 215

	/* kern_linear_pte_xor[((mask & bit) ? 1 : 0)] */
	ldx		[%g2 + %g5], %g2
	andcc		%g2, %g7, %g0
	sethi		%hi(kern_linear_pte_xor), %g5
	or		%g5, %lo(kern_linear_pte_xor), %g5
	bne,a,pt	%xcc, 1f
	 add		%g5, 8, %g5

1:	ldx		[%g5], %g2
216

217 218
	.globl		kvmap_linear_patch
kvmap_linear_patch:
219
	ba,pt		%xcc, kvmap_dtlb_tsb4m_load
220 221
	 xor		%g2, %g4, %g5

222 223 224
kvmap_dtlb_vmalloc_addr:
	KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)

225
	TSB_LOCK_TAG(%g1, %g2, %g7)
226 227 228

	/* Load and check PTE.  */
	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
229 230
	mov		1, %g7
	sllx		%g7, TSB_TAG_INVALID_BIT, %g7
231
	brgez,a,pn	%g5, kvmap_dtlb_longpath
232
	 TSB_STORE(%g1, %g7)
233

234
	TSB_WRITE(%g1, %g5, %g6)
235 236 237 238

	/* fallthrough to TLB load */

kvmap_dtlb_load:
239 240

661:	stxa		%g5, [%g0] ASI_DTLB_DATA_IN	! Reload TLB
241
	retry
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
	.section	.sun4v_2insn_patch, "ax"
	.word		661b
	nop
	nop
	.previous

	/* For sun4v the ASI_DTLB_DATA_IN store and the retry
	 * instruction get nop'd out and we get here to branch
	 * to the sun4v tlb load code.  The registers are setup
	 * as follows:
	 *
	 * %g4: vaddr
	 * %g5: PTE
	 * %g6:	TAG
	 *
	 * The sun4v TLB load wants the PTE in %g3 so we fix that
	 * up here.
	 */
	ba,pt		%xcc, sun4v_dtlb_load
	 mov		%g5, %g3
262

263
#ifdef CONFIG_SPARSEMEM_VMEMMAP
D
David Miller 已提交
264 265 266 267 268 269 270 271
kvmap_vmemmap:
	sub		%g4, %g5, %g5
	srlx		%g5, 22, %g5
	sethi		%hi(vmemmap_table), %g1
	sllx		%g5, 3, %g5
	or		%g1, %lo(vmemmap_table), %g1
	ba,pt		%xcc, kvmap_dtlb_load
	 ldx		[%g1 + %g5], %g5
272
#endif
D
David Miller 已提交
273

274 275 276 277 278
kvmap_dtlb_nonlinear:
	/* Catch kernel NULL pointer derefs.  */
	sethi		%hi(PAGE_SIZE), %g5
	cmp		%g4, %g5
	bleu,pn		%xcc, kvmap_dtlb_longpath
279 280
	 nop

281
#ifdef CONFIG_SPARSEMEM_VMEMMAP
D
David Miller 已提交
282
	/* Do not use the TSB for vmemmap.  */
283 284
	mov		(VMEMMAP_BASE >> 40), %g5
	sllx		%g5, 40, %g5
D
David Miller 已提交
285 286 287
	cmp		%g4,%g5
	bgeu,pn		%xcc, kvmap_vmemmap
	 nop
288
#endif
D
David Miller 已提交
289

290 291 292
	KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)

kvmap_dtlb_tsbmiss:
293 294
	sethi		%hi(MODULES_VADDR), %g5
	cmp		%g4, %g5
295
	blu,pn		%xcc, kvmap_dtlb_longpath
296 297
	 mov		(VMALLOC_END >> 40), %g5
	sllx		%g5, 40, %g5
298
	cmp		%g4, %g5
299
	bgeu,pn		%xcc, kvmap_dtlb_longpath
300 301 302 303 304
	 nop

kvmap_check_obp:
	sethi		%hi(LOW_OBP_ADDRESS), %g5
	cmp		%g4, %g5
305
	blu,pn		%xcc, kvmap_dtlb_vmalloc_addr
306 307 308
	 mov		0x1, %g5
	sllx		%g5, 32, %g5
	cmp		%g4, %g5
309
	blu,pn		%xcc, kvmap_dtlb_obp
310
	 nop
311
	ba,pt		%xcc, kvmap_dtlb_vmalloc_addr
312 313
	 nop

314
kvmap_dtlb_longpath:
315 316

661:	rdpr	%pstate, %g5
317
	wrpr	%g5, PSTATE_AG | PSTATE_MG, %pstate
318
	.section .sun4v_2insn_patch, "ax"
319
	.word	661b
320 321
	SET_GL(1)
	ldxa		[%g0] ASI_SCRATCHPAD, %g5
322 323
	.previous

324 325 326 327
	rdpr	%tl, %g3
	cmp	%g3, 1

661:	mov	TLB_TAG_ACCESS, %g4
328
	ldxa	[%g4] ASI_DMMU, %g5
329 330
	.section .sun4v_2insn_patch, "ax"
	.word	661b
331
	ldx	[%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
332 333 334
	nop
	.previous

335 336 337 338
	be,pt	%xcc, sparc64_realfault_common
	 mov	FAULT_CODE_DTLB, %g4
	ba,pt	%xcc, winfix_trampoline
	 nop