slb_low.S 6.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * arch/ppc64/mm/slb_low.S
 *
 * Low-level SLB routines
 *
 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
 *
 * Based on earlier C version:
 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
 *    Copyright (c) 2001 Dave Engebretsen
 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 */

#include <linux/config.h>
#include <asm/processor.h>
#include <asm/ppc_asm.h>
22
#include <asm/asm-offsets.h>
L
Linus Torvalds 已提交
23
#include <asm/cputable.h>
24 25 26
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/pgtable.h>
L
Linus Torvalds 已提交
27

28
/* void slb_allocate_realmode(unsigned long ea);
L
Linus Torvalds 已提交
29 30 31 32 33 34
 *
 * Create an SLB entry for the given EA (user or kernel).
 * 	r3 = faulting address, r13 = PACA
 *	r9, r10, r11 are clobbered by this function
 * No other registers are examined or changed.
 */
35 36
_GLOBAL(slb_allocate_realmode)
	/* r3 = faulting address */
L
Linus Torvalds 已提交
37 38

	srdi	r9,r3,60		/* get region */
39
	srdi	r10,r3,28		/* get esid */
L
Linus Torvalds 已提交
40 41
	cmpldi	cr7,r9,0xc		/* cmp KERNELBASE for later use */

42
	/* r3 = address, r10 = esid, cr7 = <>KERNELBASE */
L
Linus Torvalds 已提交
43 44 45 46 47 48 49 50 51
	blt	cr7,0f			/* user or kernel? */

	/* kernel address: proto-VSID = ESID */
	/* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but
	 * this code will generate the protoVSID 0xfffffffff for the
	 * top segment.  That's ok, the scramble below will translate
	 * it to VSID 0, which is reserved as a bad VSID - one which
	 * will never have any pages in it.  */

52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
	/* Check if hitting the linear mapping of the vmalloc/ioremap
	 * kernel space
	*/
	bne	cr7,1f

	/* Linear mapping encoding bits, the "li" instruction below will
	 * be patched by the kernel at boot
	 */
_GLOBAL(slb_miss_kernel_load_linear)
	li	r11,0
	b	slb_finish_load

1:	/* vmalloc/ioremap mapping encoding bits, the "li" instruction below
	 * will be patched by the kernel at boot
	 */
_GLOBAL(slb_miss_kernel_load_virtual)
	li	r11,0
	b	slb_finish_load


0:	/* user address: proto-VSID = context << 15 | ESID. First check
	 * if the address is within the boundaries of the user region
	 */
	srdi.	r9,r10,USER_ESID_BITS
L
Linus Torvalds 已提交
76 77
	bne-	8f			/* invalid ea bits set */

78
	/* Figure out if the segment contains huge pages */
L
Linus Torvalds 已提交
79 80
#ifdef CONFIG_HUGETLB_PAGE
BEGIN_FTR_SECTION
81 82
	b	1f
END_FTR_SECTION_IFCLR(CPU_FTR_16M_PAGE)
83 84 85 86 87 88
	cmpldi	r10,16

	lhz	r9,PACALOWHTLBAREAS(r13)
	mr	r11,r10
	blt	5f

89
	lhz	r9,PACAHIGHHTLBAREAS(r13)
90
	srdi	r11,r10,(HTLB_AREA_SHIFT-SID_SHIFT)
91 92 93

5:	srd	r9,r9,r11
	andi.	r9,r9,1
94 95 96 97 98
	beq	1f
_GLOBAL(slb_miss_user_load_huge)
	li	r11,0
	b	2f
1:
99
#endif /* CONFIG_HUGETLB_PAGE */
100

101 102
_GLOBAL(slb_miss_user_load_normal)
	li	r11,0
103

104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
2:
	ld	r9,PACACONTEXTID(r13)
	rldimi	r10,r9,USER_ESID_BITS,0
	b	slb_finish_load

8:	/* invalid EA */
	li	r10,0			/* BAD_VSID */
	li	r11,SLB_VSID_USER	/* flags don't much matter */
	b	slb_finish_load

#ifdef __DISABLED__

/* void slb_allocate_user(unsigned long ea);
 *
 * Create an SLB entry for the given EA (user or kernel).
 * 	r3 = faulting address, r13 = PACA
 *	r9, r10, r11 are clobbered by this function
 * No other registers are examined or changed.
 *
 * It is called with translation enabled in order to be able to walk the
 * page tables. This is not currently used.
 */
_GLOBAL(slb_allocate_user)
	/* r3 = faulting address */
	srdi	r10,r3,28		/* get esid */

	crset	4*cr7+lt		/* set "user" flag for later */

	/* check if we fit in the range covered by the pagetables*/
	srdi.	r9,r3,PGTABLE_EADDR_SIZE
	crnot	4*cr0+eq,4*cr0+eq
	beqlr
L
Linus Torvalds 已提交
136

137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
	/* now we need to get to the page tables in order to get the page
	 * size encoding from the PMD. In the future, we'll be able to deal
	 * with 1T segments too by getting the encoding from the PGD instead
	 */
	ld	r9,PACAPGDIR(r13)
	cmpldi	cr0,r9,0
	beqlr
	rlwinm	r11,r10,8,25,28
	ldx	r9,r9,r11		/* get pgd_t */
	cmpldi	cr0,r9,0
	beqlr
	rlwinm	r11,r10,3,17,28
	ldx	r9,r9,r11		/* get pmd_t */
	cmpldi	cr0,r9,0
	beqlr

	/* build vsid flags */
	andi.	r11,r9,SLB_VSID_LLP
	ori	r11,r11,SLB_VSID_USER

	/* get context to calculate proto-VSID */
158
	ld	r9,PACACONTEXTID(r13)
159 160 161 162 163
	rldimi	r10,r9,USER_ESID_BITS,0

	/* fall through slb_finish_load */

#endif /* __DISABLED__ */
L
Linus Torvalds 已提交
164 165


166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
/*
 * Finish loading of an SLB entry and return
 *
 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <>KERNELBASE
 */
slb_finish_load:
	ASM_VSID_SCRAMBLE(r10,r9)
	rldimi	r11,r10,SLB_VSID_SHIFT,16	/* combine VSID and flags */

	/* r3 = EA, r11 = VSID data */
	/*
	 * Find a slot, round robin. Previously we tried to find a
	 * free slot first but that took too long. Unfortunately we
 	 * dont have any LRU information to help us choose a slot.
 	 */
#ifdef CONFIG_PPC_ISERIES
	/*
	 * On iSeries, the "bolted" stack segment can be cast out on
	 * shared processor switch so we need to check for a miss on
	 * it and restore it to the right slot.
	 */
	ld	r9,PACAKSAVE(r13)
	clrrdi	r9,r9,28
	clrrdi	r3,r3,28
	li	r10,SLB_NUM_BOLTED-1	/* Stack goes in last bolted slot */
	cmpld	r9,r3
	beq	3f
#endif /* CONFIG_PPC_ISERIES */

	ld	r10,PACASTABRR(r13)
	addi	r10,r10,1
	/* use a cpu feature mask if we ever change our slb size */
	cmpldi	r10,SLB_NUM_ENTRIES

	blt+	4f
	li	r10,SLB_NUM_BOLTED

4:
	std	r10,PACASTABRR(r13)

3:
	rldimi	r3,r10,0,36		/* r3= EA[0:35] | entry */
	oris	r10,r3,SLB_ESID_V@h	/* r3 |= SLB_ESID_V */

	/* r3 = ESID data, r11 = VSID data */
L
Linus Torvalds 已提交
211 212 213 214 215 216 217

	/*
	 * No need for an isync before or after this slbmte. The exception
	 * we enter with and the rfid we exit with are context synchronizing.
	 */
	slbmte	r11,r10

218 219 220
	/* we're done for kernel addresses */
	crclr	4*cr0+eq		/* set result to "success" */
	bgelr	cr7
L
Linus Torvalds 已提交
221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237

	/* Update the slb cache */
	lhz	r3,PACASLBCACHEPTR(r13)	/* offset = paca->slb_cache_ptr */
	cmpldi	r3,SLB_CACHE_ENTRIES
	bge	1f

	/* still room in the slb cache */
	sldi	r11,r3,1		/* r11 = offset * sizeof(u16) */
	rldicl	r10,r10,36,28		/* get low 16 bits of the ESID */
	add	r11,r11,r13		/* r11 = (u16 *)paca + offset */
	sth	r10,PACASLBCACHE(r11)	/* paca->slb_cache[offset] = esid */
	addi	r3,r3,1			/* offset++ */
	b	2f
1:					/* offset >= SLB_CACHE_ENTRIES */
	li	r3,SLB_CACHE_ENTRIES+1
2:
	sth	r3,PACASLBCACHEPTR(r13)	/* paca->slb_cache_ptr = offset */
238
	crclr	4*cr0+eq		/* set result to "success" */
L
Linus Torvalds 已提交
239 240
	blr