ktlb.S 6.4 KB
Newer Older
1 2
/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
 *
3
 * Copyright (C) 1995, 1997, 2005, 2008 David S. Miller <davem@davemloft.net>
4 5 6
 * Copyright (C) 1996 Eddie C. Dost        (ecd@brainaid.de)
 * Copyright (C) 1996 Miguel de Icaza      (miguel@nuclecu.unam.mx)
 * Copyright (C) 1996,98,99 Jakub Jelinek  (jj@sunsite.mff.cuni.cz)
7
 */
8 9 10 11 12

#include <asm/head.h>
#include <asm/asi.h>
#include <asm/page.h>
#include <asm/pgtable.h>
13
#include <asm/tsb.h>
14 15 16 17

	.text
	.align		32

18 19 20 21 22
kvmap_itlb:
	/* g6: TAG TARGET */
	mov		TLB_TAG_ACCESS, %g4
	ldxa		[%g4] ASI_IMMU, %g4

23 24 25 26 27
	/* sun4v_itlb_miss branches here with the missing virtual
	 * address already loaded into %g4
	 */
kvmap_itlb_4v:

28 29 30 31 32 33 34 35 36 37
kvmap_itlb_nonlinear:
	/* Catch kernel NULL pointer calls.  */
	sethi		%hi(PAGE_SIZE), %g5
	cmp		%g4, %g5
	bleu,pn		%xcc, kvmap_dtlb_longpath
	 nop

	KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)

kvmap_itlb_tsb_miss:
38 39
	sethi		%hi(LOW_OBP_ADDRESS), %g5
	cmp		%g4, %g5
40
	blu,pn		%xcc, kvmap_itlb_vmalloc_addr
41 42 43
	 mov		0x1, %g5
	sllx		%g5, 32, %g5
	cmp		%g4, %g5
44
	blu,pn		%xcc, kvmap_itlb_obp
45 46
	 nop

47 48 49
kvmap_itlb_vmalloc_addr:
	KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)

50
	KTSB_LOCK_TAG(%g1, %g2, %g7)
51 52 53

	/* Load and check PTE.  */
	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
54 55
	mov		1, %g7
	sllx		%g7, TSB_TAG_INVALID_BIT, %g7
56
	brgez,a,pn	%g5, kvmap_itlb_longpath
57
	 KTSB_STORE(%g1, %g7)
58

59
	KTSB_WRITE(%g1, %g5, %g6)
60 61 62 63

	/* fallthrough to TLB load */

kvmap_itlb_load:
64 65

661:	stxa		%g5, [%g0] ASI_ITLB_DATA_IN
66
	retry
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
	.section	.sun4v_2insn_patch, "ax"
	.word		661b
	nop
	nop
	.previous

	/* For sun4v the ASI_ITLB_DATA_IN store and the retry
	 * instruction get nop'd out and we get here to branch
	 * to the sun4v tlb load code.  The registers are setup
	 * as follows:
	 *
	 * %g4: vaddr
	 * %g5: PTE
	 * %g6:	TAG
	 *
	 * The sun4v TLB load wants the PTE in %g3 so we fix that
	 * up here.
	 */
	ba,pt		%xcc, sun4v_itlb_load
	 mov		%g5, %g3
87

88
kvmap_itlb_longpath:
89 90

661:	rdpr	%pstate, %g5
91
	wrpr	%g5, PSTATE_AG | PSTATE_MG, %pstate
92
	.section .sun4v_2insn_patch, "ax"
93
	.word	661b
94
	SET_GL(1)
95 96 97
	nop
	.previous

98 99 100 101 102 103 104
	rdpr	%tpc, %g5
	ba,pt	%xcc, sparc64_realfault_common
	 mov	FAULT_CODE_ITLB, %g4

kvmap_itlb_obp:
	OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)

105
	KTSB_LOCK_TAG(%g1, %g2, %g7)
106

107
	KTSB_WRITE(%g1, %g5, %g6)
108 109 110 111 112 113 114

	ba,pt		%xcc, kvmap_itlb_load
	 nop

kvmap_dtlb_obp:
	OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)

115
	KTSB_LOCK_TAG(%g1, %g2, %g7)
116

117
	KTSB_WRITE(%g1, %g5, %g6)
118 119 120

	ba,pt		%xcc, kvmap_dtlb_load
	 nop
121

122
	.align		32
123 124 125 126 127 128
kvmap_dtlb_tsb4m_load:
	KTSB_LOCK_TAG(%g1, %g2, %g7)
	KTSB_WRITE(%g1, %g5, %g6)
	ba,pt		%xcc, kvmap_dtlb_load
	 nop

129 130 131 132
kvmap_dtlb:
	/* %g6: TAG TARGET */
	mov		TLB_TAG_ACCESS, %g4
	ldxa		[%g4] ASI_DMMU, %g4
133 134 135 136 137

	/* sun4v_dtlb_miss branches here with the missing virtual
	 * address already loaded into %g4
	 */
kvmap_dtlb_4v:
138
	brgez,pn	%g4, kvmap_dtlb_nonlinear
139 140
	 nop

141 142 143 144 145 146
#ifdef CONFIG_DEBUG_PAGEALLOC
	/* Index through the base page size TSB even for linear
	 * mappings when using page allocation debugging.
	 */
	KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
#else
147 148
	/* Correct TAG_TARGET is already in %g6, check 4mb TSB.  */
	KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
149
#endif
150 151 152 153
	/* TSB entry address left in %g1, lookup linear PTE.
	 * Must preserve %g1 and %g6 (TAG).
	 */
kvmap_dtlb_tsb4m_miss:
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
	sethi		%hi(kpte_linear_bitmap), %g2
	or		%g2, %lo(kpte_linear_bitmap), %g2

	/* Clear the PAGE_OFFSET top virtual bits, then shift
	 * down to get a 256MB physical address index.
	 */
	sllx		%g4, 21, %g5
	mov		1, %g7
	srlx		%g5, 21 + 28, %g5

	/* Don't try this at home kids... this depends upon srlx
	 * only taking the low 6 bits of the shift count in %g5.
	 */
	sllx		%g7, %g5, %g7

	/* Divide by 64 to get the offset into the bitmask.  */
	srlx		%g5, 6, %g5
171
	sllx		%g5, 3, %g5
172 173 174 175 176 177 178 179 180 181

	/* kern_linear_pte_xor[((mask & bit) ? 1 : 0)] */
	ldx		[%g2 + %g5], %g2
	andcc		%g2, %g7, %g0
	sethi		%hi(kern_linear_pte_xor), %g5
	or		%g5, %lo(kern_linear_pte_xor), %g5
	bne,a,pt	%xcc, 1f
	 add		%g5, 8, %g5

1:	ldx		[%g5], %g2
182

183 184
	.globl		kvmap_linear_patch
kvmap_linear_patch:
185
	ba,pt		%xcc, kvmap_dtlb_tsb4m_load
186 187
	 xor		%g2, %g4, %g5

188 189 190
kvmap_dtlb_vmalloc_addr:
	KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)

191
	KTSB_LOCK_TAG(%g1, %g2, %g7)
192 193 194

	/* Load and check PTE.  */
	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
195 196
	mov		1, %g7
	sllx		%g7, TSB_TAG_INVALID_BIT, %g7
197
	brgez,a,pn	%g5, kvmap_dtlb_longpath
198
	 KTSB_STORE(%g1, %g7)
199

200
	KTSB_WRITE(%g1, %g5, %g6)
201 202 203 204

	/* fallthrough to TLB load */

kvmap_dtlb_load:
205 206

661:	stxa		%g5, [%g0] ASI_DTLB_DATA_IN	! Reload TLB
207
	retry
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
	.section	.sun4v_2insn_patch, "ax"
	.word		661b
	nop
	nop
	.previous

	/* For sun4v the ASI_DTLB_DATA_IN store and the retry
	 * instruction get nop'd out and we get here to branch
	 * to the sun4v tlb load code.  The registers are setup
	 * as follows:
	 *
	 * %g4: vaddr
	 * %g5: PTE
	 * %g6:	TAG
	 *
	 * The sun4v TLB load wants the PTE in %g3 so we fix that
	 * up here.
	 */
	ba,pt		%xcc, sun4v_dtlb_load
	 mov		%g5, %g3
228

229
#ifdef CONFIG_SPARSEMEM_VMEMMAP
D
David Miller 已提交
230 231 232 233 234 235 236 237
kvmap_vmemmap:
	sub		%g4, %g5, %g5
	srlx		%g5, 22, %g5
	sethi		%hi(vmemmap_table), %g1
	sllx		%g5, 3, %g5
	or		%g1, %lo(vmemmap_table), %g1
	ba,pt		%xcc, kvmap_dtlb_load
	 ldx		[%g1 + %g5], %g5
238
#endif
D
David Miller 已提交
239

240 241 242 243 244
kvmap_dtlb_nonlinear:
	/* Catch kernel NULL pointer derefs.  */
	sethi		%hi(PAGE_SIZE), %g5
	cmp		%g4, %g5
	bleu,pn		%xcc, kvmap_dtlb_longpath
245 246
	 nop

247
#ifdef CONFIG_SPARSEMEM_VMEMMAP
D
David Miller 已提交
248 249 250 251 252 253
	/* Do not use the TSB for vmemmap.  */
	mov		(VMEMMAP_BASE >> 24), %g5
	sllx		%g5, 24, %g5
	cmp		%g4,%g5
	bgeu,pn		%xcc, kvmap_vmemmap
	 nop
254
#endif
D
David Miller 已提交
255

256 257 258
	KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)

kvmap_dtlb_tsbmiss:
259 260
	sethi		%hi(MODULES_VADDR), %g5
	cmp		%g4, %g5
261
	blu,pn		%xcc, kvmap_dtlb_longpath
262 263 264
	 mov		(VMALLOC_END >> 24), %g5
	sllx		%g5, 24, %g5
	cmp		%g4, %g5
265
	bgeu,pn		%xcc, kvmap_dtlb_longpath
266 267 268 269 270
	 nop

kvmap_check_obp:
	sethi		%hi(LOW_OBP_ADDRESS), %g5
	cmp		%g4, %g5
271
	blu,pn		%xcc, kvmap_dtlb_vmalloc_addr
272 273 274
	 mov		0x1, %g5
	sllx		%g5, 32, %g5
	cmp		%g4, %g5
275
	blu,pn		%xcc, kvmap_dtlb_obp
276
	 nop
277
	ba,pt		%xcc, kvmap_dtlb_vmalloc_addr
278 279
	 nop

280
kvmap_dtlb_longpath:
281 282

661:	rdpr	%pstate, %g5
283
	wrpr	%g5, PSTATE_AG | PSTATE_MG, %pstate
284
	.section .sun4v_2insn_patch, "ax"
285
	.word	661b
286 287
	SET_GL(1)
	ldxa		[%g0] ASI_SCRATCHPAD, %g5
288 289
	.previous

290 291 292 293
	rdpr	%tl, %g3
	cmp	%g3, 1

661:	mov	TLB_TAG_ACCESS, %g4
294
	ldxa	[%g4] ASI_DMMU, %g5
295 296
	.section .sun4v_2insn_patch, "ax"
	.word	661b
297
	ldx	[%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
298 299 300
	nop
	.previous

301 302 303 304
	be,pt	%xcc, sparc64_realfault_common
	 mov	FAULT_CODE_DTLB, %g4
	ba,pt	%xcc, winfix_trampoline
	 nop