slb_low.S 7.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Low-level SLB routines
 *
 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
 *
 * Based on earlier C version:
 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
 *    Copyright (c) 2001 Dave Engebretsen
 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 */

#include <asm/processor.h>
#include <asm/ppc_asm.h>
19
#include <asm/asm-offsets.h>
L
Linus Torvalds 已提交
20
#include <asm/cputable.h>
21 22 23
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/pgtable.h>
24
#include <asm/firmware.h>
L
Linus Torvalds 已提交
25

26
/* void slb_allocate_realmode(unsigned long ea);
L
Linus Torvalds 已提交
27 28 29 30 31 32
 *
 * Create an SLB entry for the given EA (user or kernel).
 * 	r3 = faulting address, r13 = PACA
 *	r9, r10, r11 are clobbered by this function
 * No other registers are examined or changed.
 */
33
_GLOBAL(slb_allocate_realmode)
34 35 36 37
	/*
	 * check for bad kernel/user address
	 * (ea & ~REGION_MASK) >= PGTABLE_RANGE
	 */
38
	rldicr. r9,r3,4,(63 - PGTABLE_EADDR_SIZE - 4)
39
	bne-	8f
L
Linus Torvalds 已提交
40 41

	srdi	r9,r3,60		/* get region */
42
	srdi	r10,r3,SID_SHIFT	/* get esid */
43
	cmpldi	cr7,r9,0xc		/* cmp PAGE_OFFSET for later use */
L
Linus Torvalds 已提交
44

45
	/* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
L
Linus Torvalds 已提交
46 47 48 49 50 51 52 53 54
	blt	cr7,0f			/* user or kernel? */

	/* kernel address: proto-VSID = ESID */
	/* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but
	 * this code will generate the protoVSID 0xfffffffff for the
	 * top segment.  That's ok, the scramble below will translate
	 * it to VSID 0, which is reserved as a bad VSID - one which
	 * will never have any pages in it.  */

55
	/* Check if hitting the linear mapping or some other kernel space
56 57 58 59 60 61
	*/
	bne	cr7,1f

	/* Linear mapping encoding bits, the "li" instruction below will
	 * be patched by the kernel at boot
	 */
62 63
.globl slb_miss_kernel_load_linear
slb_miss_kernel_load_linear:
64
	li	r11,0
A
Aneesh Kumar K.V 已提交
65
	/*
66 67
	 * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
	 * r9 = region id.
A
Aneesh Kumar K.V 已提交
68
	 */
69 70 71 72
	addis	r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
	addi	r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l


P
Paul Mackerras 已提交
73
BEGIN_FTR_SECTION
74
	b	slb_finish_load
75
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
P
Paul Mackerras 已提交
76
	b	slb_finish_load_1T
77

78 79 80 81 82
1:
#ifdef CONFIG_SPARSEMEM_VMEMMAP
	/* Check virtual memmap region. To be patches at kernel boot */
	cmpldi	cr0,r9,0xf
	bne	1f
83 84
.globl slb_miss_kernel_load_vmemmap
slb_miss_kernel_load_vmemmap:
85 86 87 88 89
	li	r11,0
	b	6f
1:
#endif /* CONFIG_SPARSEMEM_VMEMMAP */

90 91
	/* vmalloc mapping gets the encoding from the PACA as the mapping
	 * can be demoted from 64K -> 4K dynamically on some machines
92
	 */
93 94 95 96
	clrldi	r11,r10,48
	cmpldi	r11,(VMALLOC_SIZE >> 28) - 1
	bgt	5f
	lhz	r11,PACAVMALLOCSLLP(r13)
P
Paul Mackerras 已提交
97
	b	6f
98
5:
99
	/* IO mapping */
100 101
.globl slb_miss_kernel_load_io
slb_miss_kernel_load_io:
102
	li	r11,0
P
Paul Mackerras 已提交
103
6:
A
Aneesh Kumar K.V 已提交
104
	/*
105 106
	 * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
	 * r9 = region id.
A
Aneesh Kumar K.V 已提交
107
	 */
108 109 110
	addis	r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
	addi	r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l

P
Paul Mackerras 已提交
111
BEGIN_FTR_SECTION
112
	b	slb_finish_load
113
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
P
Paul Mackerras 已提交
114
	b	slb_finish_load_1T
115

116
0:
117 118 119 120 121 122 123 124 125 126 127
	/* when using slices, we extract the psize off the slice bitmaps
	 * and then we need to get the sllp encoding off the mmu_psize_defs
	 * array.
	 *
	 * XXX This is a bit inefficient especially for the normal case,
	 * so we should try to implement a fast path for the standard page
	 * size using the old sllp value so we avoid the array. We cannot
	 * really do dynamic patching unfortunately as processes might flip
	 * between 4k and 64k standard page size
	 */
#ifdef CONFIG_PPC_MM_SLICES
128
	/* r10 have esid */
129
	cmpldi	r10,16
130
	/* below SLICE_LOW_TOP */
131
	blt	5f
132 133 134 135 136 137 138 139 140 141
	/*
	 * Handle hpsizes,
	 * r9 is get_paca()->context.high_slices_psize[index], r11 is mask_index
	 */
	srdi    r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT + 1) /* index */
	addi	r9,r11,PACAHIGHSLICEPSIZE
	lbzx	r9,r13,r9		/* r9 is hpsizes[r11] */
	/* r11 = (r10 >> (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)) & 0x1 */
	rldicl	r11,r10,(64 - (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)),63
	b	6f
142

143 144 145 146 147 148 149 150 151 152
5:
	/*
	 * Handle lpsizes
	 * r9 is get_paca()->context.low_slices_psize, r11 is index
	 */
	ld	r9,PACALOWSLICESPSIZE(r13)
	mr	r11,r10
6:
	sldi	r11,r11,2  /* index * 4 */
	/* Extract the psize and multiply to get an array offset */
153 154 155
	srd	r9,r9,r11
	andi.	r9,r9,0xf
	mulli	r9,r9,MMUPSIZEDEFSIZE
156

157 158 159 160 161 162 163 164 165
	/* Now get to the array and obtain the sllp
	 */
	ld	r11,PACATOC(r13)
	ld	r11,mmu_psize_defs@got(r11)
	add	r11,r11,r9
	ld	r11,MMUPSIZESLLP(r11)
	ori	r11,r11,SLB_VSID_USER
#else
	/* paca context sllp already contains the SLB_VSID_USER bits */
166
	lhz	r11,PACACONTEXTSLLP(r13)
167 168
#endif /* CONFIG_PPC_MM_SLICES */

169
	ld	r9,PACACONTEXTID(r13)
P
Paul Mackerras 已提交
170 171 172
BEGIN_FTR_SECTION
	cmpldi	r10,0x1000
	bge	slb_finish_load_1T
173
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
174 175 176 177
	b	slb_finish_load

8:	/* invalid EA */
	li	r10,0			/* BAD_VSID */
178
	li	r9,0			/* BAD_VSID */
179 180 181 182 183 184
	li	r11,SLB_VSID_USER	/* flags don't much matter */
	b	slb_finish_load

/*
 * Finish loading of an SLB entry and return
 *
185
 * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
186 187
 */
slb_finish_load:
188
	rldimi  r10,r9,ESID_BITS,0
P
Paul Mackerras 已提交
189
	ASM_VSID_SCRAMBLE(r10,r9,256M)
190 191 192 193 194
	/*
	 * bits above VSID_BITS_256M need to be ignored from r10
	 * also combine VSID and flags
	 */
	rldimi	r11,r10,SLB_VSID_SHIFT,(64 - (SLB_VSID_SHIFT + VSID_BITS_256M))
195 196 197 198 199 200 201 202

	/* r3 = EA, r11 = VSID data */
	/*
	 * Find a slot, round robin. Previously we tried to find a
	 * free slot first but that took too long. Unfortunately we
 	 * dont have any LRU information to help us choose a slot.
 	 */

P
Paul Mackerras 已提交
203
7:	ld	r10,PACASTABRR(r13)
204
	addi	r10,r10,1
205
	/* This gets soft patched on boot. */
206 207
.globl slb_compare_rr_to_size
slb_compare_rr_to_size:
208
	cmpldi	r10,0
209 210 211 212 213 214 215 216 217 218 219 220

	blt+	4f
	li	r10,SLB_NUM_BOLTED

4:
	std	r10,PACASTABRR(r13)

3:
	rldimi	r3,r10,0,36		/* r3= EA[0:35] | entry */
	oris	r10,r3,SLB_ESID_V@h	/* r3 |= SLB_ESID_V */

	/* r3 = ESID data, r11 = VSID data */
L
Linus Torvalds 已提交
221 222 223 224 225 226 227

	/*
	 * No need for an isync before or after this slbmte. The exception
	 * we enter with and the rfid we exit with are context synchronizing.
	 */
	slbmte	r11,r10

228 229 230
	/* we're done for kernel addresses */
	crclr	4*cr0+eq		/* set result to "success" */
	bgelr	cr7
L
Linus Torvalds 已提交
231 232 233 234 235 236 237

	/* Update the slb cache */
	lhz	r3,PACASLBCACHEPTR(r13)	/* offset = paca->slb_cache_ptr */
	cmpldi	r3,SLB_CACHE_ENTRIES
	bge	1f

	/* still room in the slb cache */
238 239 240 241
	sldi	r11,r3,2		/* r11 = offset * sizeof(u32) */
	srdi    r10,r10,28		/* get the 36 bits of the ESID */
	add	r11,r11,r13		/* r11 = (u32 *)paca + offset */
	stw	r10,PACASLBCACHE(r11)	/* paca->slb_cache[offset] = esid */
L
Linus Torvalds 已提交
242 243 244 245 246 247
	addi	r3,r3,1			/* offset++ */
	b	2f
1:					/* offset >= SLB_CACHE_ENTRIES */
	li	r3,SLB_CACHE_ENTRIES+1
2:
	sth	r3,PACASLBCACHEPTR(r13)	/* paca->slb_cache_ptr = offset */
248
	crclr	4*cr0+eq		/* set result to "success" */
L
Linus Torvalds 已提交
249 250
	blr

P
Paul Mackerras 已提交
251 252 253
/*
 * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
 *
254
 * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9
P
Paul Mackerras 已提交
255 256
 */
slb_finish_load_1T:
257
	srdi	r10,r10,(SID_SHIFT_1T - SID_SHIFT)	/* get 1T ESID */
258
	rldimi  r10,r9,ESID_BITS_1T,0
P
Paul Mackerras 已提交
259
	ASM_VSID_SCRAMBLE(r10,r9,1T)
260 261 262 263 264
	/*
	 * bits above VSID_BITS_1T need to be ignored from r10
	 * also combine VSID and flags
	 */
	rldimi	r11,r10,SLB_VSID_SHIFT_1T,(64 - (SLB_VSID_SHIFT_1T + VSID_BITS_1T))
P
Paul Mackerras 已提交
265 266 267 268 269 270 271
	li	r10,MMU_SEGSIZE_1T
	rldimi	r11,r10,SLB_VSID_SSIZE_SHIFT,0	/* insert segment size */

	/* r3 = EA, r11 = VSID data */
	clrrdi	r3,r3,SID_SHIFT_1T	/* clear out non-ESID bits */
	b	7b