proc-v7.S 11.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 *  linux/arch/arm/mm/proc-v7.S
 *
 *  Copyright (C) 2001 Deep Blue Solutions Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 *  This is the "shell" of the ARMv7 processor support.
 */
12
#include <linux/init.h>
13 14 15
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
16
#include <asm/hwcap.h>
17 18 19 20 21 22
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>

#include "proc-macros.S"

#define TTB_S		(1 << 1)
23 24
#define TTB_RGN_NC	(0 << 3)
#define TTB_RGN_OC_WBWA	(1 << 3)
25 26
#define TTB_RGN_OC_WT	(2 << 3)
#define TTB_RGN_OC_WB	(3 << 3)
27 28 29 30 31
#define TTB_NOS		(1 << 5)
#define TTB_IRGN_NC	((0 << 0) | (0 << 6))
#define TTB_IRGN_WBWA	((0 << 0) | (1 << 6))
#define TTB_IRGN_WT	((1 << 0) | (0 << 6))
#define TTB_IRGN_WB	((1 << 0) | (1 << 6))
32

33
/* PTWs cacheable, inner WB not shareable, outer WB not shareable */
34 35 36
#define TTB_FLAGS_UP	TTB_IRGN_WB|TTB_RGN_OC_WB
#define PMD_FLAGS_UP	PMD_SECT_WB

37
/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
38 39
#define TTB_FLAGS_SMP	TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA
#define PMD_FLAGS_SMP	PMD_SECT_WBWA|PMD_SECT_S
40

41 42
ENTRY(cpu_v7_proc_init)
	mov	pc, lr
43
ENDPROC(cpu_v7_proc_init)
44 45

ENTRY(cpu_v7_proc_fin)
46 47 48 49
	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
	bic	r0, r0, #0x1000			@ ...i............
	bic	r0, r0, #0x0006			@ .............ca.
	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
50
	mov	pc, lr
51
ENDPROC(cpu_v7_proc_fin)
52 53 54 55 56 57 58 59 60 61 62 63 64

/*
 *	cpu_v7_reset(loc)
 *
 *	Perform a soft reset of the system.  Put the CPU into the
 *	same state as it would be if it had been reset, and branch
 *	to what would be the reset vector.
 *
 *	- loc   - location to jump to for soft reset
 */
	.align	5
ENTRY(cpu_v7_reset)
	mov	pc, r0
65
ENDPROC(cpu_v7_reset)
66 67 68 69 70 71 72 73 74

/*
 *	cpu_v7_do_idle()
 *
 *	Idle the processor (eg, wait for interrupt).
 *
 *	IRQs are already disabled.
 */
ENTRY(cpu_v7_do_idle)
75
	dsb					@ WFI may enter a low-power mode
76
	wfi
77
	mov	pc, lr
78
ENDPROC(cpu_v7_do_idle)
79 80 81 82 83 84 85 86 87 88 89

ENTRY(cpu_v7_dcache_clean_area)
#ifndef TLB_CAN_READ_FROM_L1_CACHE
	dcache_line_size r2, r3
1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
	add	r0, r0, r2
	subs	r1, r1, r2
	bhi	1b
	dsb
#endif
	mov	pc, lr
90
ENDPROC(cpu_v7_dcache_clean_area)
91 92 93 94 95 96 97 98 99 100 101 102

/*
 *	cpu_v7_switch_mm(pgd_phys, tsk)
 *
 *	Set the translation table base pointer to be pgd_phys
 *
 *	- pgd_phys - physical address of new TTB
 *
 *	It is assumed that:
 *	- we are not using split page tables
 */
ENTRY(cpu_v7_switch_mm)
103
#ifdef CONFIG_MMU
104 105
	mov	r2, #0
	ldr	r1, [r1, #MM_CONTEXT_ID]	@ get mm->context.id
106 107
	ALT_SMP(orr	r0, r0, #TTB_FLAGS_SMP)
	ALT_UP(orr	r0, r0, #TTB_FLAGS_UP)
108 109 110
#ifdef CONFIG_ARM_ERRATA_430973
	mcr	p15, 0, r2, c7, c5, 6		@ flush BTAC/BTB
#endif
111 112 113 114 115 116
	mcr	p15, 0, r2, c13, c0, 1		@ set reserved context ID
	isb
1:	mcr	p15, 0, r0, c2, c0, 0		@ set TTB 0
	isb
	mcr	p15, 0, r1, c13, c0, 1		@ set context ID
	isb
117
#endif
118
	mov	pc, lr
119
ENDPROC(cpu_v7_switch_mm)
120 121 122 123 124 125 126

/*
 *	cpu_v7_set_pte_ext(ptep, pte)
 *
 *	Set a level 2 translation table entry.
 *
 *	- ptep  - pointer to level 2 translation table entry
127
 *		  (hardware version is stored at +2048 bytes)
128 129 130 131
 *	- pte   - PTE value to store
 *	- ext	- value for extended PTE bits
 */
ENTRY(cpu_v7_set_pte_ext)
132
#ifdef CONFIG_MMU
133
	str	r1, [r0]			@ linux version
134 135

	bic	r3, r1, #0x000003f0
136
	bic	r3, r3, #PTE_TYPE_MASK
137 138 139
	orr	r3, r3, r2
	orr	r3, r3, #PTE_EXT_AP0 | 2

140
	tst	r1, #1 << 4
141 142
	orrne	r3, r3, #PTE_EXT_TEX(1)

143 144 145
	eor	r1, r1, #L_PTE_DIRTY
	tst	r1, #L_PTE_RDONLY | L_PTE_DIRTY
	orrne	r3, r3, #PTE_EXT_APX
146 147 148 149 150 151

	tst	r1, #L_PTE_USER
	orrne	r3, r3, #PTE_EXT_AP1
	tstne	r3, #PTE_EXT_APX
	bicne	r3, r3, #PTE_EXT_APX | PTE_EXT_AP0

152 153
	tst	r1, #L_PTE_XN
	orrne	r3, r3, #PTE_EXT_XN
154

155 156
	tst	r1, #L_PTE_YOUNG
	tstne	r1, #L_PTE_PRESENT
157 158
	moveq	r3, #0

159
	str	r3, [r0, #2048]!
160
	mcr	p15, 0, r0, c7, c10, 1		@ flush_pte
161
#endif
162
	mov	pc, lr
163
ENDPROC(cpu_v7_set_pte_ext)
164 165 166 167 168

cpu_v7_name:
	.ascii	"ARMv7 Processor"
	.align

169
	__CPUINIT
170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185

/*
 *	__v7_setup
 *
 *	Initialise TLB, Caches, and MMU state ready to switch the MMU
 *	on.  Return in r0 the new CP15 C1 control register setting.
 *
 *	We automatically detect if we have a Harvard cache, and use the
 *	Harvard cache control instructions insead of the unified cache
 *	control instructions.
 *
 *	This should be able to cover all ARMv7 cores.
 *
 *	It is assumed that:
 *	- cache type register is implemented
 */
186
__v7_ca9mp_setup:
187
#ifdef CONFIG_SMP
188 189
	ALT_SMP(mrc	p15, 0, r0, c1, c0, 1)
	ALT_UP(mov	r0, #(1 << 6))		@ fake it for UP
190 191 192
	tst	r0, #(1 << 6)			@ SMP/nAMP mode enabled?
	orreq	r0, r0, #(1 << 6) | (1 << 0)	@ Enable SMP/nAMP mode and
	mcreq	p15, 0, r0, c1, c0, 1		@ TLB ops broadcasting
193
#endif
194
__v7_setup:
195 196 197 198
	adr	r12, __v7_setup_stack		@ the local stack
	stmia	r12, {r0-r5, r7, r9, r11, lr}
	bl	v7_flush_dcache_all
	ldmia	r12, {r0-r5, r7, r9, r11, lr}
199 200 201 202

	mrc	p15, 0, r0, c0, c0, 0		@ read main ID register
	and	r10, r0, #0xff000000		@ ARM?
	teq	r10, #0x41000000
203
	bne	3f
204 205
	and	r5, r0, #0x00f00000		@ variant
	and	r6, r0, #0x0000000f		@ revision
206 207
	orr	r6, r6, r5, lsr #20-4		@ combine variant and revision
	ubfx	r0, r0, #4, #12			@ primary part number
208

209 210 211 212
	/* Cortex-A8 Errata */
	ldr	r10, =0x00000c08		@ Cortex-A8 primary part number
	teq	r0, r10
	bne	2f
213
#ifdef CONFIG_ARM_ERRATA_430973
214 215 216 217
	teq	r5, #0x00100000			@ only present in r1p*
	mrceq	p15, 0, r10, c1, c0, 1		@ read aux control register
	orreq	r10, r10, #(1 << 6)		@ set IBE to 1
	mcreq	p15, 0, r10, c1, c0, 1		@ write aux control register
218 219
#endif
#ifdef CONFIG_ARM_ERRATA_458693
220
	teq	r6, #0x20			@ only present in r2p0
221 222 223 224
	mrceq	p15, 0, r10, c1, c0, 1		@ read aux control register
	orreq	r10, r10, #(1 << 5)		@ set L1NEON to 1
	orreq	r10, r10, #(1 << 9)		@ set PLDNOP to 1
	mcreq	p15, 0, r10, c1, c0, 1		@ write aux control register
225 226
#endif
#ifdef CONFIG_ARM_ERRATA_460075
227
	teq	r6, #0x20			@ only present in r2p0
228 229 230 231
	mrceq	p15, 1, r10, c9, c0, 2		@ read L2 cache aux ctrl register
	tsteq	r10, #1 << 22
	orreq	r10, r10, #(1 << 22)		@ set the Write Allocate disable bit
	mcreq	p15, 1, r10, c9, c0, 2		@ write the L2 cache aux ctrl register
232
#endif
233 234 235 236 237 238 239 240 241 242 243 244
	b	3f

	/* Cortex-A9 Errata */
2:	ldr	r10, =0x00000c09		@ Cortex-A9 primary part number
	teq	r0, r10
	bne	3f
#ifdef CONFIG_ARM_ERRATA_742230
	cmp	r6, #0x22			@ only present up to r2p2
	mrcle	p15, 0, r10, c15, c0, 1		@ read diagnostic register
	orrle	r10, r10, #1 << 4		@ set bit #4
	mcrle	p15, 0, r10, c15, c0, 1		@ write diagnostic register
#endif
245 246 247 248 249 250 251 252 253
#ifdef CONFIG_ARM_ERRATA_742231
	teq	r6, #0x20			@ present in r2p0
	teqne	r6, #0x21			@ present in r2p1
	teqne	r6, #0x22			@ present in r2p2
	mrceq	p15, 0, r10, c15, c0, 1		@ read diagnostic register
	orreq	r10, r10, #1 << 12		@ set bit #12
	orreq	r10, r10, #1 << 22		@ set bit #22
	mcreq	p15, 0, r10, c15, c0, 1		@ write diagnostic register
#endif
254 255 256 257 258 259 260 261
#ifdef CONFIG_ARM_ERRATA_743622
	teq	r6, #0x20			@ present in r2p0
	teqne	r6, #0x21			@ present in r2p1
	teqne	r6, #0x22			@ present in r2p2
	mrceq	p15, 0, r10, c15, c0, 1		@ read diagnostic register
	orreq	r10, r10, #1 << 6		@ set bit #6
	mcreq	p15, 0, r10, c15, c0, 1		@ write diagnostic register
#endif
262

263
3:	mov	r10, #0
264 265 266 267
#ifdef HARVARD_CACHE
	mcr	p15, 0, r10, c7, c5, 0		@ I+BTB cache invalidate
#endif
	dsb
268
#ifdef CONFIG_MMU
269 270
	mcr	p15, 0, r10, c8, c7, 0		@ invalidate I + D TLBs
	mcr	p15, 0, r10, c2, c0, 2		@ TTB control register
271 272
	ALT_SMP(orr	r4, r4, #TTB_FLAGS_SMP)
	ALT_UP(orr	r4, r4, #TTB_FLAGS_UP)
273 274 275
	mcr	p15, 0, r4, c2, c0, 1		@ load TTB1
	mov	r10, #0x1f			@ domains 0, 1 = manager
	mcr	p15, 0, r10, c3, c0, 0		@ load domain access register
276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
	/*
	 * Memory region attributes with SCTLR.TRE=1
	 *
	 *   n = TEX[0],C,B
	 *   TR = PRRR[2n+1:2n]		- memory type
	 *   IR = NMRR[2n+1:2n]		- inner cacheable property
	 *   OR = NMRR[2n+17:2n+16]	- outer cacheable property
	 *
	 *			n	TR	IR	OR
	 *   UNCACHED		000	00
	 *   BUFFERABLE		001	10	00	00
	 *   WRITETHROUGH	010	10	10	10
	 *   WRITEBACK		011	10	11	11
	 *   reserved		110
	 *   WRITEALLOC		111	10	01	01
	 *   DEV_SHARED		100	01
	 *   DEV_NONSHARED	100	01
	 *   DEV_WC		001	10
	 *   DEV_CACHED		011	10
	 *
	 * Other attributes:
	 *
	 *   DS0 = PRRR[16] = 0		- device shareable property
	 *   DS1 = PRRR[17] = 1		- device shareable property
	 *   NS0 = PRRR[18] = 0		- normal shareable property
	 *   NS1 = PRRR[19] = 1		- normal shareable property
	 *   NOS = PRRR[24+n] = 1	- not outer shareable
	 */
	ldr	r5, =0xff0a81a8			@ PRRR
	ldr	r6, =0x40e040e0			@ NMRR
306 307
	mcr	p15, 0, r5, c10, c2, 0		@ write PRRR
	mcr	p15, 0, r6, c10, c2, 1		@ write NMRR
308
#endif
309 310
	adr	r5, v7_crval
	ldmia	r5, {r5, r6}
311 312 313
#ifdef CONFIG_CPU_ENDIAN_BE8
	orr	r6, r6, #1 << 25		@ big-endian page tables
#endif
314 315 316
   	mrc	p15, 0, r0, c1, c0, 0		@ read control register
	bic	r0, r0, r5			@ clear bits them
	orr	r0, r0, r6			@ set them
317
 THUMB(	orr	r0, r0, #1 << 30	)	@ Thumb exceptions
318
	mov	pc, lr				@ return to head.S:__ret
319
ENDPROC(__v7_setup)
320

321
	/*   AT
322 323
	 *  TFR   EV X F   I D LR    S
	 * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM
324
	 * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
325
	 *    1    0 110       0011 1100 .111 1101 < we want
326
	 */
327 328
	.type	v7_crval, #object
v7_crval:
329
	crval	clear=0x0120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c
330 331 332 333

__v7_setup_stack:
	.space	4 * 11				@ 11 registers

334 335
	__INITDATA

336 337 338
	.type	v7_processor_functions, #object
ENTRY(v7_processor_functions)
	.word	v7_early_abort
339
	.word	v7_pabort
340 341 342 343 344 345 346 347 348
	.word	cpu_v7_proc_init
	.word	cpu_v7_proc_fin
	.word	cpu_v7_reset
	.word	cpu_v7_do_idle
	.word	cpu_v7_dcache_clean_area
	.word	cpu_v7_switch_mm
	.word	cpu_v7_set_pte_ext
	.size	v7_processor_functions, . - v7_processor_functions

349 350
	.section ".rodata"

351 352 353 354 355 356 357 358 359 360 361 362 363
	.type	cpu_arch_name, #object
cpu_arch_name:
	.asciz	"armv7"
	.size	cpu_arch_name, . - cpu_arch_name

	.type	cpu_elf_name, #object
cpu_elf_name:
	.asciz	"v7"
	.size	cpu_elf_name, . - cpu_elf_name
	.align

	.section ".proc.info.init", #alloc, #execinstr

364 365 366 367
	.type   __v7_ca9mp_proc_info, #object
__v7_ca9mp_proc_info:
	.long	0x410fc090		@ Required ID value
	.long	0xff0ffff0		@ Mask for ID
368 369 370 371 372 373 374
	ALT_SMP(.long \
		PMD_TYPE_SECT | \
		PMD_SECT_AP_WRITE | \
		PMD_SECT_AP_READ | \
		PMD_FLAGS_SMP)
	ALT_UP(.long \
		PMD_TYPE_SECT | \
375 376
		PMD_SECT_AP_WRITE | \
		PMD_SECT_AP_READ | \
377
		PMD_FLAGS_UP)
378 379 380 381 382 383 384
	.long   PMD_TYPE_SECT | \
		PMD_SECT_XN | \
		PMD_SECT_AP_WRITE | \
		PMD_SECT_AP_READ
	b	__v7_ca9mp_setup
	.long	cpu_arch_name
	.long	cpu_elf_name
385
	.long	HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS
386 387 388 389 390 391 392
	.long	cpu_v7_name
	.long	v7_processor_functions
	.long	v7wbi_tlb_fns
	.long	v6_user_fns
	.long	v7_cache_fns
	.size	__v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info

393 394 395 396 397 398 399
	/*
	 * Match any ARMv7 processor core.
	 */
	.type	__v7_proc_info, #object
__v7_proc_info:
	.long	0x000f0000		@ Required ID value
	.long	0x000f0000		@ Mask for ID
400 401 402 403 404 405 406
	ALT_SMP(.long \
		PMD_TYPE_SECT | \
		PMD_SECT_AP_WRITE | \
		PMD_SECT_AP_READ | \
		PMD_FLAGS_SMP)
	ALT_UP(.long \
		PMD_TYPE_SECT | \
407
		PMD_SECT_AP_WRITE | \
408
		PMD_SECT_AP_READ | \
409
		PMD_FLAGS_UP)
410 411 412 413 414 415 416
	.long   PMD_TYPE_SECT | \
		PMD_SECT_XN | \
		PMD_SECT_AP_WRITE | \
		PMD_SECT_AP_READ
	b	__v7_setup
	.long	cpu_arch_name
	.long	cpu_elf_name
417
	.long	HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS
418 419
	.long	cpu_v7_name
	.long	v7_processor_functions
420
	.long	v7wbi_tlb_fns
421 422 423
	.long	v6_user_fns
	.long	v7_cache_fns
	.size	__v7_proc_info, . - __v7_proc_info