head.S 26.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
/*
 *  linux/arch/arm/boot/compressed/head.S
 *
 *  Copyright (C) 1996-2002 Russell King
5
 *  Copyright (C) 2004 Hyok S. Choi (MPU support)
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/linkage.h>

/*
 * Debugging stuff
 *
 * Note that these macros must not contain any code which is not
 * 100% relocatable.  Any attempt to do so will result in a crash.
 * Please select one of the following when turning on debugging.
 */
#ifdef DEBUG
21 22

#if defined(CONFIG_DEBUG_ICEDCC)
23 24 25 26 27 28 29

#ifdef CONFIG_CPU_V6
		.macro	loadsp, rb
		.endm
		.macro	writeb, ch, rb
		mcr	p14, 0, \ch, c0, c5, 0
		.endm
30 31 32 33 34 35 36 37
#elif defined(CONFIG_CPU_V7)
		.macro	loadsp, rb
		.endm
		.macro	writeb, ch, rb
wait:		mrc	p14, 0, pc, c0, c1, 0
		bcs	wait
		mcr	p14, 0, \ch, c0, c5, 0
		.endm
38 39 40 41 42 43
#elif defined(CONFIG_CPU_XSCALE)
		.macro	loadsp, rb
		.endm
		.macro	writeb, ch, rb
		mcr	p14, 0, \ch, c8, c0, 0
		.endm
44
#else
L
Linus Torvalds 已提交
45 46
		.macro	loadsp, rb
		.endm
47
		.macro	writeb, ch, rb
48
		mcr	p14, 0, \ch, c1, c0, 0
L
Linus Torvalds 已提交
49
		.endm
50 51
#endif

52
#else
53

54
#include <mach/debug-macro.S>
55

56 57
		.macro	writeb,	ch, rb
		senduart \ch, \rb
L
Linus Torvalds 已提交
58
		.endm
59

60
#if defined(CONFIG_ARCH_SA1100)
L
Linus Torvalds 已提交
61 62
		.macro	loadsp, rb
		mov	\rb, #0x80000000	@ physical base address
63
#ifdef CONFIG_DEBUG_LL_SER3
L
Linus Torvalds 已提交
64
		add	\rb, \rb, #0x00050000	@ Ser3
65
#else
L
Linus Torvalds 已提交
66
		add	\rb, \rb, #0x00010000	@ Ser1
67
#endif
L
Linus Torvalds 已提交
68 69
		.endm
#elif defined(CONFIG_ARCH_S3C2410)
70
		.macro loadsp, rb
L
Linus Torvalds 已提交
71
		mov	\rb, #0x50000000
72
		add	\rb, \rb, #0x4000 * CONFIG_S3C_LOWLEVEL_UART_PORT
L
Linus Torvalds 已提交
73 74
		.endm
#else
75 76 77
		.macro	loadsp,	rb
		addruart \rb
		.endm
L
Linus Torvalds 已提交
78
#endif
79
#endif
L
Linus Torvalds 已提交
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
#endif

		.macro	kputc,val
		mov	r0, \val
		bl	putc
		.endm

		.macro	kphex,val,len
		mov	r0, \val
		mov	r1, #\len
		bl	phex
		.endm

		.macro	debug_reloc_start
#ifdef DEBUG
		kputc	#'\n'
		kphex	r6, 8		/* processor id */
		kputc	#':'
		kphex	r7, 8		/* architecture id */
99
#ifdef CONFIG_CPU_CP15
L
Linus Torvalds 已提交
100 101 102
		kputc	#':'
		mrc	p15, 0, r0, c1, c0
		kphex	r0, 8		/* control reg */
103
#endif
L
Linus Torvalds 已提交
104 105 106
		kputc	#'\n'
		kphex	r5, 8		/* decompressed kernel start */
		kputc	#'-'
107
		kphex	r9, 8		/* decompressed kernel end  */
L
Linus Torvalds 已提交
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
		kputc	#'>'
		kphex	r4, 8		/* kernel execution address */
		kputc	#'\n'
#endif
		.endm

		.macro	debug_reloc_end
#ifdef DEBUG
		kphex	r5, 8		/* end of kernel */
		kputc	#'\n'
		mov	r0, r4
		bl	memdump		/* dump 256 bytes at start of kernel */
#endif
		.endm

		.section ".start", #alloc, #execinstr
/*
 * sort out different calling conventions
 */
		.align
start:
		.type	start,#function
		.rept	8
		mov	r0, r0
		.endr

		b	1f
		.word	0x016f2818		@ Magic numbers to help the loader
		.word	start			@ absolute load/run zImage address
		.word	_edata			@ zImage end address
1:		mov	r7, r1			@ save architecture ID
139
		mov	r8, r2			@ save atags pointer
L
Linus Torvalds 已提交
140 141 142 143 144 145 146 147 148 149 150

#ifndef __ARM_ARCH_2__
		/*
		 * Booting from Angel - need to enter SVC mode and disable
		 * FIQs/IRQs (numeric definitions from angel arm.h source).
		 * We only do this if we were in user mode on entry.
		 */
		mrs	r2, cpsr		@ get current mode
		tst	r2, #3			@ not user?
		bne	not_angel
		mov	r0, #0x17		@ angel_SWIreason_EnterSVC
151 152
 ARM(		swi	0x123456	)	@ angel_SWI_ARM
 THUMB(		svc	0xab		)	@ angel_SWI_THUMB
L
Linus Torvalds 已提交
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
not_angel:
		mrs	r2, cpsr		@ turn off interrupts to
		orr	r2, r2, #0xc0		@ prevent angel from running
		msr	cpsr_c, r2
#else
		teqp	pc, #0x0c000003		@ turn off interrupts
#endif

		/*
		 * Note that some cache flushing and other stuff may
		 * be needed here - is there an Angel SWI call for this?
		 */

		/*
		 * some architecture specific code can be inserted
168
		 * by the linker here, but it should preserve r7, r8, and r9.
L
Linus Torvalds 已提交
169 170 171 172
		 */

		.text
		adr	r0, LC0
173 174 175
 ARM(		ldmia	r0, {r1, r2, r3, r4, r5, r6, ip, sp}	)
 THUMB(		ldmia	r0, {r1, r2, r3, r4, r5, r6, ip}	)
 THUMB(		ldr	sp, [r0, #28]				)
L
Linus Torvalds 已提交
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
		subs	r0, r0, r1		@ calculate the delta offset

						@ if delta is zero, we are
		beq	not_relocated		@ running at the address we
						@ were linked at.

		/*
		 * We're running at a different address.  We need to fix
		 * up various pointers:
		 *   r5 - zImage base address
		 *   r6 - GOT start
		 *   ip - GOT end
		 */
		add	r5, r5, r0
		add	r6, r6, r0
		add	ip, ip, r0

#ifndef CONFIG_ZBOOT_ROM
		/*
		 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
		 * we need to fix up pointers into the BSS region.
		 *   r2 - BSS start
		 *   r3 - BSS end
		 *   sp - stack pointer
		 */
		add	r2, r2, r0
		add	r3, r3, r0
		add	sp, sp, r0

		/*
		 * Relocate all entries in the GOT table.
		 */
1:		ldr	r1, [r6, #0]		@ relocate entries in the GOT
		add	r1, r1, r0		@ table.  This fixes up the
		str	r1, [r6], #4		@ C references.
		cmp	r6, ip
		blo	1b
#else

		/*
		 * Relocate entries in the GOT table.  We only relocate
		 * the entries that are outside the (relocated) BSS region.
		 */
1:		ldr	r1, [r6, #0]		@ relocate entries in the GOT
		cmp	r1, r2			@ entry < bss_start ||
		cmphs	r3, r1			@ _end < entry
		addlo	r1, r1, r0		@ table.  This fixes up the
		str	r1, [r6], #4		@ C references.
		cmp	r6, ip
		blo	1b
#endif

not_relocated:	mov	r0, #0
1:		str	r0, [r2], #4		@ clear bss
		str	r0, [r2], #4
		str	r0, [r2], #4
		str	r0, [r2], #4
		cmp	r2, r3
		blo	1b

		/*
		 * The C runtime environment should now be setup
		 * sufficiently.  Turn the cache on, set up some
		 * pointers, and start decompressing.
		 */
		bl	cache_on

		mov	r1, sp			@ malloc space above stack
		add	r2, sp, #0x10000	@ 64k max

/*
 * Check to see if we will overwrite ourselves.
 *   r4 = final kernel address
 *   r5 = start of this image
 *   r2 = end of malloc space (and therefore this image)
 * We basically want:
 *   r4 >= r2 -> OK
 *   r4 + image length <= r5 -> OK
 */
		cmp	r4, r2
		bhs	wont_overwrite
257 258
		sub	r3, sp, r5		@ > compressed kernel size
		add	r0, r4, r3, lsl #2	@ allow for 4x expansion
L
Linus Torvalds 已提交
259 260 261 262 263 264 265 266
		cmp	r0, r5
		bls	wont_overwrite

		mov	r5, r2			@ decompress after malloc space
		mov	r0, r5
		mov	r3, r7
		bl	decompress_kernel

267
		add	r0, r0, #127 + 128	@ alignment + stack
L
Linus Torvalds 已提交
268 269 270 271 272 273 274 275
		bic	r0, r0, #127		@ align the kernel length
/*
 * r0     = decompressed kernel length
 * r1-r3  = unused
 * r4     = kernel execution address
 * r5     = decompressed kernel start
 * r6     = processor ID
 * r7     = architecture ID
276
 * r8     = atags pointer
277
 * r9-r12,r14 = corrupted
L
Linus Torvalds 已提交
278 279 280 281 282
 */
		add	r1, r5, r0		@ end of decompressed kernel
		adr	r2, reloc_start
		ldr	r3, LC1
		add	r3, r2, r3
283 284 285 286
1:		ldmia	r2!, {r9 - r12, r14}	@ copy relocation code
		stmia	r1!, {r9 - r12, r14}
		ldmia	r2!, {r9 - r12, r14}
		stmia	r1!, {r9 - r12, r14}
L
Linus Torvalds 已提交
287 288
		cmp	r2, r3
		blo	1b
289 290
		mov	sp, r1
		add	sp, sp, #128		@ relocate the stack
L
Linus Torvalds 已提交
291 292

		bl	cache_clean_flush
293 294 295
 ARM(		add	pc, r5, r0		) @ call relocation code
 THUMB(		add	r12, r5, r0		)
 THUMB(		mov	pc, r12			) @ call relocation code
L
Linus Torvalds 已提交
296 297 298 299 300 301 302 303 304 305 306 307

/*
 * We're not in danger of overwriting ourselves.  Do this the simple way.
 *
 * r4     = kernel execution address
 * r7     = architecture ID
 */
wont_overwrite:	mov	r0, r4
		mov	r3, r7
		bl	decompress_kernel
		b	call_kernel

308
		.align	2
L
Linus Torvalds 已提交
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
		.type	LC0, #object
LC0:		.word	LC0			@ r1
		.word	__bss_start		@ r2
		.word	_end			@ r3
		.word	zreladdr		@ r4
		.word	_start			@ r5
		.word	_got_start		@ r6
		.word	_got_end		@ ip
		.word	user_stack+4096		@ sp
LC1:		.word	reloc_end - reloc_start
		.size	LC0, . - LC0

#ifdef CONFIG_ARCH_RPC
		.globl	params
params:		ldr	r0, =params_phys
		mov	pc, lr
		.ltorg
		.align
#endif

/*
 * Turn on the cache.  We need to setup some page tables so that we
 * can have both the I and D caches on.
 *
 * We place the page tables 16k down from the kernel execution address,
 * and we hope that nothing else is using it.  If we're using it, we
 * will go pop!
 *
 * On entry,
 *  r4 = kernel execution address
 *  r6 = processor ID
 *  r7 = architecture number
341 342
 *  r8 = atags pointer
 *  r9 = run-time address of "start"  (???)
L
Linus Torvalds 已提交
343
 * On exit,
344
 *  r1, r2, r3, r9, r10, r12 corrupted
L
Linus Torvalds 已提交
345
 * This routine must preserve:
346
 *  r4, r5, r6, r7, r8
L
Linus Torvalds 已提交
347 348 349 350 351
 */
		.align	5
cache_on:	mov	r3, #8			@ cache_on function
		b	call_cache_fn

352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
/*
 * Initialize the highest priority protection region, PR7
 * to cover all 32bit address and cacheable and bufferable.
 */
__armv4_mpu_cache_on:
		mov	r0, #0x3f		@ 4G, the whole
		mcr	p15, 0, r0, c6, c7, 0	@ PR7 Area Setting
		mcr 	p15, 0, r0, c6, c7, 1

		mov	r0, #0x80		@ PR7
		mcr	p15, 0, r0, c2, c0, 0	@ D-cache on
		mcr	p15, 0, r0, c2, c0, 1	@ I-cache on
		mcr	p15, 0, r0, c3, c0, 0	@ write-buffer on

		mov	r0, #0xc000
		mcr	p15, 0, r0, c5, c0, 1	@ I-access permission
		mcr	p15, 0, r0, c5, c0, 0	@ D-access permission

		mov	r0, #0
		mcr	p15, 0, r0, c7, c10, 4	@ drain write buffer
		mcr	p15, 0, r0, c7, c5, 0	@ flush(inval) I-Cache
		mcr	p15, 0, r0, c7, c6, 0	@ flush(inval) D-Cache
		mrc	p15, 0, r0, c1, c0, 0	@ read control reg
						@ ...I .... ..D. WC.M
		orr	r0, r0, #0x002d		@ .... .... ..1. 11.1
		orr	r0, r0, #0x1000		@ ...1 .... .... ....

		mcr	p15, 0, r0, c1, c0, 0	@ write control reg

		mov	r0, #0
		mcr	p15, 0, r0, c7, c5, 0	@ flush(inval) I-Cache
		mcr	p15, 0, r0, c7, c6, 0	@ flush(inval) D-Cache
		mov	pc, lr

__armv3_mpu_cache_on:
		mov	r0, #0x3f		@ 4G, the whole
		mcr	p15, 0, r0, c6, c7, 0	@ PR7 Area Setting

		mov	r0, #0x80		@ PR7
		mcr	p15, 0, r0, c2, c0, 0	@ cache on
		mcr	p15, 0, r0, c3, c0, 0	@ write-buffer on

		mov	r0, #0xc000
		mcr	p15, 0, r0, c5, c0, 0	@ access permission

		mov	r0, #0
		mcr	p15, 0, r0, c7, c0, 0	@ invalidate whole cache v3
		mrc	p15, 0, r0, c1, c0, 0	@ read control reg
						@ .... .... .... WC.M
		orr	r0, r0, #0x000d		@ .... .... .... 11.1
		mov	r0, #0
		mcr	p15, 0, r0, c1, c0, 0	@ write control reg

		mcr	p15, 0, r0, c7, c0, 0	@ invalidate whole cache v3
		mov	pc, lr

L
Linus Torvalds 已提交
408 409 410 411 412 413 414 415
__setup_mmu:	sub	r3, r4, #16384		@ Page directory size
		bic	r3, r3, #0xff		@ Align the pointer
		bic	r3, r3, #0x3f00
/*
 * Initialise the page tables, turning on the cacheable and bufferable
 * bits for the RAM area only.
 */
		mov	r0, r3
416 417 418
		mov	r9, r0, lsr #18
		mov	r9, r9, lsl #18		@ start of RAM
		add	r10, r9, #0x10000000	@ a reasonable RAM size
L
Linus Torvalds 已提交
419 420 421
		mov	r1, #0x12
		orr	r1, r1, #3 << 10
		add	r2, r3, #16384
422
1:		cmp	r1, r9			@ if virt > start of RAM
L
Linus Torvalds 已提交
423
		orrhs	r1, r1, #0x0c		@ set cacheable, bufferable
424
		cmp	r1, r10			@ if virt > end of RAM
L
Linus Torvalds 已提交
425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
		bichs	r1, r1, #0x0c		@ clear cacheable, bufferable
		str	r1, [r0], #4		@ 1:1 mapping
		add	r1, r1, #1048576
		teq	r0, r2
		bne	1b
/*
 * If ever we are running from Flash, then we surely want the cache
 * to be enabled also for our execution instance...  We map 2MB of it
 * so there is no map overlap problem for up to 1 MB compressed kernel.
 * If the execution is in RAM then we would only be duplicating the above.
 */
		mov	r1, #0x1e
		orr	r1, r1, #3 << 10
		mov	r2, pc, lsr #20
		orr	r1, r1, r2, lsl #20
		add	r0, r3, r2, lsl #2
		str	r1, [r0], #4
		add	r1, r1, #1048576
		str	r1, [r0]
		mov	pc, lr
445
ENDPROC(__setup_mmu)
L
Linus Torvalds 已提交
446

447
__armv4_mmu_cache_on:
L
Linus Torvalds 已提交
448
		mov	r12, lr
449
#ifdef CONFIG_MMU
L
Linus Torvalds 已提交
450 451 452 453 454 455 456
		bl	__setup_mmu
		mov	r0, #0
		mcr	p15, 0, r0, c7, c10, 4	@ drain write buffer
		mcr	p15, 0, r0, c8, c7, 0	@ flush I,D TLBs
		mrc	p15, 0, r0, c1, c0, 0	@ read control reg
		orr	r0, r0, #0x5000		@ I-cache enable, RR cache replacement
		orr	r0, r0, #0x0030
457 458 459
#ifdef CONFIG_CPU_ENDIAN_BE8
		orr	r0, r0, #1 << 25	@ big-endian page tables
#endif
460
		bl	__common_mmu_cache_on
L
Linus Torvalds 已提交
461 462
		mov	r0, #0
		mcr	p15, 0, r0, c8, c7, 0	@ flush I,D TLBs
463
#endif
L
Linus Torvalds 已提交
464 465
		mov	pc, r12

466 467
__armv7_mmu_cache_on:
		mov	r12, lr
468
#ifdef CONFIG_MMU
469 470 471 472 473 474 475
		mrc	p15, 0, r11, c0, c1, 4	@ read ID_MMFR0
		tst	r11, #0xf		@ VMSA
		blne	__setup_mmu
		mov	r0, #0
		mcr	p15, 0, r0, c7, c10, 4	@ drain write buffer
		tst	r11, #0xf		@ VMSA
		mcrne	p15, 0, r0, c8, c7, 0	@ flush I,D TLBs
476
#endif
477 478 479
		mrc	p15, 0, r0, c1, c0, 0	@ read control reg
		orr	r0, r0, #0x5000		@ I-cache enable, RR cache replacement
		orr	r0, r0, #0x003c		@ write buffer
480
#ifdef CONFIG_MMU
481 482 483
#ifdef CONFIG_CPU_ENDIAN_BE8
		orr	r0, r0, #1 << 25	@ big-endian page tables
#endif
484 485 486 487
		orrne	r0, r0, #1		@ MMU enabled
		movne	r1, #-1
		mcrne	p15, 0, r3, c2, c0, 0	@ load page table pointer
		mcrne	p15, 0, r1, c3, c0, 0	@ load domain access control
488
#endif
489 490 491 492 493 494
		mcr	p15, 0, r0, c1, c0, 0	@ load control register
		mrc	p15, 0, r0, c1, c0, 0	@ and read it back
		mov	r0, #0
		mcr	p15, 0, r0, c7, c5, 4	@ ISB
		mov	pc, r12

P
Paulius Zaleckas 已提交
495 496 497 498 499 500 501 502 503 504 505 506 507 508
__fa526_cache_on:
		mov	r12, lr
		bl	__setup_mmu
		mov	r0, #0
		mcr	p15, 0, r0, c7, c7, 0	@ Invalidate whole cache
		mcr	p15, 0, r0, c7, c10, 4	@ drain write buffer
		mcr	p15, 0, r0, c8, c7, 0	@ flush UTLB
		mrc	p15, 0, r0, c1, c0, 0	@ read control reg
		orr	r0, r0, #0x1000		@ I-cache enable
		bl	__common_mmu_cache_on
		mov	r0, #0
		mcr	p15, 0, r0, c8, c7, 0	@ flush UTLB
		mov	pc, r12

509
__arm6_mmu_cache_on:
L
Linus Torvalds 已提交
510 511 512 513 514 515
		mov	r12, lr
		bl	__setup_mmu
		mov	r0, #0
		mcr	p15, 0, r0, c7, c0, 0	@ invalidate whole cache v3
		mcr	p15, 0, r0, c5, c0, 0	@ invalidate whole TLB v3
		mov	r0, #0x30
516
		bl	__common_mmu_cache_on
L
Linus Torvalds 已提交
517 518 519 520
		mov	r0, #0
		mcr	p15, 0, r0, c5, c0, 0	@ invalidate whole TLB v3
		mov	pc, r12

521
__common_mmu_cache_on:
522
#ifndef CONFIG_THUMB2_KERNEL
L
Linus Torvalds 已提交
523 524 525 526 527 528
#ifndef DEBUG
		orr	r0, r0, #0x000d		@ Write buffer, mmu
#endif
		mov	r1, #-1
		mcr	p15, 0, r3, c2, c0, 0	@ load page table pointer
		mcr	p15, 0, r1, c3, c0, 0	@ load domain access control
529 530 531 532 533
		b	1f
		.align	5			@ cache line aligned
1:		mcr	p15, 0, r0, c1, c0, 0	@ load control register
		mrc	p15, 0, r0, c1, c0, 0	@ and read it back to
		sub	pc, lr, r0, lsr #32	@ properly flush pipeline
534
#endif
L
Linus Torvalds 已提交
535 536 537 538 539 540 541 542 543 544 545 546

/*
 * All code following this line is relocatable.  It is relocated by
 * the above code to the end of the decompressed kernel image and
 * executed there.  During this time, we have no stacks.
 *
 * r0     = decompressed kernel length
 * r1-r3  = unused
 * r4     = kernel execution address
 * r5     = decompressed kernel start
 * r6     = processor ID
 * r7     = architecture ID
547
 * r8     = atags pointer
548
 * r9-r12,r14 = corrupted
L
Linus Torvalds 已提交
549 550
 */
		.align	5
551
reloc_start:	add	r9, r5, r0
552
		sub	r9, r9, #128		@ do not copy the stack
L
Linus Torvalds 已提交
553 554 555 556
		debug_reloc_start
		mov	r1, r4
1:
		.rept	4
557 558
		ldmia	r5!, {r0, r2, r3, r10 - r12, r14}	@ relocate kernel
		stmia	r1!, {r0, r2, r3, r10 - r12, r14}
L
Linus Torvalds 已提交
559 560
		.endr

561
		cmp	r5, r9
L
Linus Torvalds 已提交
562
		blo	1b
563 564
		mov	sp, r1
		add	sp, sp, #128		@ relocate the stack
L
Linus Torvalds 已提交
565 566 567 568
		debug_reloc_end

call_kernel:	bl	cache_clean_flush
		bl	cache_off
569
		mov	r0, #0			@ must be zero
L
Linus Torvalds 已提交
570
		mov	r1, r7			@ restore architecture number
571
		mov	r2, r8			@ restore atags pointer
L
Linus Torvalds 已提交
572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
		mov	pc, r4			@ call kernel

/*
 * Here follow the relocatable cache support functions for the
 * various processors.  This is a generic hook for locating an
 * entry and jumping to an instruction at the specified offset
 * from the start of the block.  Please note this is all position
 * independent code.
 *
 *  r1  = corrupted
 *  r2  = corrupted
 *  r3  = block offset
 *  r6  = corrupted
 *  r12 = corrupted
 */

call_cache_fn:	adr	r12, proc_types
589
#ifdef CONFIG_CPU_CP15
L
Linus Torvalds 已提交
590
		mrc	p15, 0, r6, c0, c0	@ get processor ID
591 592 593
#else
		ldr	r6, =CONFIG_PROCESSOR_ID
#endif
L
Linus Torvalds 已提交
594 595 596 597
1:		ldr	r1, [r12, #0]		@ get value
		ldr	r2, [r12, #4]		@ get mask
		eor	r1, r1, r6		@ (real ^ match)
		tst	r1, r2			@       & mask
598 599 600
 ARM(		addeq	pc, r12, r3		) @ call cache function
 THUMB(		addeq	r12, r3			)
 THUMB(		moveq	pc, r12			) @ call cache function
L
Linus Torvalds 已提交
601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
		add	r12, r12, #4*5
		b	1b

/*
 * Table for cache operations.  This is basically:
 *   - CPU ID match
 *   - CPU ID mask
 *   - 'cache on' method instruction
 *   - 'cache off' method instruction
 *   - 'cache flush' method instruction
 *
 * We match an entry using: ((real_id ^ match) & mask) == 0
 *
 * Writethrough caches generally only need 'on' and 'off'
 * methods.  Writeback caches _must_ have the flush method
 * defined.
 */
618
		.align	2
L
Linus Torvalds 已提交
619 620 621 622
		.type	proc_types,#object
proc_types:
		.word	0x41560600		@ ARM6/610
		.word	0xffffffe0
623 624
		W(b)	__arm6_mmu_cache_off	@ works, but slow
		W(b)	__arm6_mmu_cache_off
L
Linus Torvalds 已提交
625
		mov	pc, lr
626
 THUMB(		nop				)
627 628 629
@		b	__arm6_mmu_cache_on		@ untested
@		b	__arm6_mmu_cache_off
@		b	__armv3_mmu_cache_flush
L
Linus Torvalds 已提交
630 631 632 633

		.word	0x00000000		@ old ARM ID
		.word	0x0000f000
		mov	pc, lr
634
 THUMB(		nop				)
L
Linus Torvalds 已提交
635
		mov	pc, lr
636
 THUMB(		nop				)
L
Linus Torvalds 已提交
637
		mov	pc, lr
638
 THUMB(		nop				)
L
Linus Torvalds 已提交
639 640 641

		.word	0x41007000		@ ARM7/710
		.word	0xfff8fe00
642 643
		W(b)	__arm7_mmu_cache_off
		W(b)	__arm7_mmu_cache_off
L
Linus Torvalds 已提交
644
		mov	pc, lr
645
 THUMB(		nop				)
L
Linus Torvalds 已提交
646 647 648

		.word	0x41807200		@ ARM720T (writethrough)
		.word	0xffffff00
649 650
		W(b)	__armv4_mmu_cache_on
		W(b)	__armv4_mmu_cache_off
L
Linus Torvalds 已提交
651
		mov	pc, lr
652
 THUMB(		nop				)
L
Linus Torvalds 已提交
653

654 655
		.word	0x41007400		@ ARM74x
		.word	0xff00ff00
656 657 658
		W(b)	__armv3_mpu_cache_on
		W(b)	__armv3_mpu_cache_off
		W(b)	__armv3_mpu_cache_flush
659 660 661
		
		.word	0x41009400		@ ARM94x
		.word	0xff00ff00
662 663 664
		W(b)	__armv4_mpu_cache_on
		W(b)	__armv4_mpu_cache_off
		W(b)	__armv4_mpu_cache_flush
665

L
Linus Torvalds 已提交
666 667 668
		.word	0x00007000		@ ARM7 IDs
		.word	0x0000f000
		mov	pc, lr
669
 THUMB(		nop				)
L
Linus Torvalds 已提交
670
		mov	pc, lr
671
 THUMB(		nop				)
L
Linus Torvalds 已提交
672
		mov	pc, lr
673
 THUMB(		nop				)
L
Linus Torvalds 已提交
674 675 676 677 678

		@ Everything from here on will be the new ID system.

		.word	0x4401a100		@ sa110 / sa1100
		.word	0xffffffe0
679 680 681
		W(b)	__armv4_mmu_cache_on
		W(b)	__armv4_mmu_cache_off
		W(b)	__armv4_mmu_cache_flush
L
Linus Torvalds 已提交
682 683 684

		.word	0x6901b110		@ sa1110
		.word	0xfffffff0
685 686 687
		W(b)	__armv4_mmu_cache_on
		W(b)	__armv4_mmu_cache_off
		W(b)	__armv4_mmu_cache_flush
L
Linus Torvalds 已提交
688

689 690
		.word	0x56056930
		.word	0xff0ffff0		@ PXA935
691 692 693
		W(b)	__armv4_mmu_cache_on
		W(b)	__armv4_mmu_cache_off
		W(b)	__armv4_mmu_cache_flush
694 695 696

		.word	0x56158000		@ PXA168
		.word	0xfffff000
697 698 699
		W(b)	__armv4_mmu_cache_on
		W(b)	__armv4_mmu_cache_off
		W(b)	__armv5tej_mmu_cache_flush
700 701 702

		.word	0x56056930
		.word	0xff0ffff0		@ PXA935
703 704 705
		W(b)	__armv4_mmu_cache_on
		W(b)	__armv4_mmu_cache_off
		W(b)	__armv4_mmu_cache_flush
L
Linus Torvalds 已提交
706

707 708
		.word	0x56050000		@ Feroceon
		.word	0xff0f0000
709 710 711
		W(b)	__armv4_mmu_cache_on
		W(b)	__armv4_mmu_cache_off
		W(b)	__armv5tej_mmu_cache_flush
712

713 714 715 716 717 718 719 720 721
#ifdef CONFIG_CPU_FEROCEON_OLD_ID
		/* this conflicts with the standard ARMv5TE entry */
		.long	0x41009260		@ Old Feroceon
		.long	0xff00fff0
		b	__armv4_mmu_cache_on
		b	__armv4_mmu_cache_off
		b	__armv5tej_mmu_cache_flush
#endif

P
Paulius Zaleckas 已提交
722 723
		.word	0x66015261		@ FA526
		.word	0xff01fff1
724 725 726
		W(b)	__fa526_cache_on
		W(b)	__armv4_mmu_cache_off
		W(b)	__fa526_cache_flush
P
Paulius Zaleckas 已提交
727

L
Linus Torvalds 已提交
728 729 730 731
		@ These match on the architecture ID

		.word	0x00020000		@ ARMv4T
		.word	0x000f0000
732 733 734
		W(b)	__armv4_mmu_cache_on
		W(b)	__armv4_mmu_cache_off
		W(b)	__armv4_mmu_cache_flush
L
Linus Torvalds 已提交
735 736 737

		.word	0x00050000		@ ARMv5TE
		.word	0x000f0000
738 739 740
		W(b)	__armv4_mmu_cache_on
		W(b)	__armv4_mmu_cache_off
		W(b)	__armv4_mmu_cache_flush
L
Linus Torvalds 已提交
741 742 743

		.word	0x00060000		@ ARMv5TEJ
		.word	0x000f0000
744 745 746
		W(b)	__armv4_mmu_cache_on
		W(b)	__armv4_mmu_cache_off
		W(b)	__armv4_mmu_cache_flush
L
Linus Torvalds 已提交
747

748
		.word	0x0007b000		@ ARMv6
749
		.word	0x000ff000
750 751 752
		W(b)	__armv4_mmu_cache_on
		W(b)	__armv4_mmu_cache_off
		W(b)	__armv6_mmu_cache_flush
L
Linus Torvalds 已提交
753

754 755 756 757 758 759
		.word	0x560f5810		@ Marvell PJ4 ARMv6
		.word	0xff0ffff0
		W(b)	__armv4_mmu_cache_on
		W(b)	__armv4_mmu_cache_off
		W(b)	__armv6_mmu_cache_flush

760 761
		.word	0x000f0000		@ new CPU Id
		.word	0x000f0000
762 763 764
		W(b)	__armv7_mmu_cache_on
		W(b)	__armv7_mmu_cache_off
		W(b)	__armv7_mmu_cache_flush
765

L
Linus Torvalds 已提交
766 767 768
		.word	0			@ unrecognised type
		.word	0
		mov	pc, lr
769
 THUMB(		nop				)
L
Linus Torvalds 已提交
770
		mov	pc, lr
771
 THUMB(		nop				)
L
Linus Torvalds 已提交
772
		mov	pc, lr
773
 THUMB(		nop				)
L
Linus Torvalds 已提交
774 775 776 777 778 779 780 781 782 783 784 785 786 787 788

		.size	proc_types, . - proc_types

/*
 * Turn off the Cache and MMU.  ARMv3 does not support
 * reading the control register, but ARMv4 does.
 *
 * On entry,  r6 = processor ID
 * On exit,   r0, r1, r2, r3, r12 corrupted
 * This routine must preserve: r4, r6, r7
 */
		.align	5
cache_off:	mov	r3, #12			@ cache_off function
		b	call_cache_fn

789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806
__armv4_mpu_cache_off:
		mrc	p15, 0, r0, c1, c0
		bic	r0, r0, #0x000d
		mcr	p15, 0, r0, c1, c0	@ turn MPU and cache off
		mov	r0, #0
		mcr	p15, 0, r0, c7, c10, 4	@ drain write buffer
		mcr	p15, 0, r0, c7, c6, 0	@ flush D-Cache
		mcr	p15, 0, r0, c7, c5, 0	@ flush I-Cache
		mov	pc, lr

__armv3_mpu_cache_off:
		mrc	p15, 0, r0, c1, c0
		bic	r0, r0, #0x000d
		mcr	p15, 0, r0, c1, c0, 0	@ turn MPU and cache off
		mov	r0, #0
		mcr	p15, 0, r0, c7, c0, 0	@ invalidate whole cache v3
		mov	pc, lr

807
__armv4_mmu_cache_off:
808
#ifdef CONFIG_MMU
L
Linus Torvalds 已提交
809 810 811 812 813 814
		mrc	p15, 0, r0, c1, c0
		bic	r0, r0, #0x000d
		mcr	p15, 0, r0, c1, c0	@ turn MMU and cache off
		mov	r0, #0
		mcr	p15, 0, r0, c7, c7	@ invalidate whole cache v4
		mcr	p15, 0, r0, c8, c7	@ invalidate whole TLB v4
815
#endif
L
Linus Torvalds 已提交
816 817
		mov	pc, lr

818 819
__armv7_mmu_cache_off:
		mrc	p15, 0, r0, c1, c0
820
#ifdef CONFIG_MMU
821
		bic	r0, r0, #0x000d
822 823 824
#else
		bic	r0, r0, #0x000c
#endif
825 826 827 828
		mcr	p15, 0, r0, c1, c0	@ turn MMU and cache off
		mov	r12, lr
		bl	__armv7_mmu_cache_flush
		mov	r0, #0
829
#ifdef CONFIG_MMU
830
		mcr	p15, 0, r0, c8, c7, 0	@ invalidate whole TLB
831
#endif
832 833 834
		mcr	p15, 0, r0, c7, c5, 6	@ invalidate BTC
		mcr	p15, 0, r0, c7, c10, 4	@ DSB
		mcr	p15, 0, r0, c7, c5, 4	@ ISB
835 836
		mov	pc, r12

837
__arm6_mmu_cache_off:
L
Linus Torvalds 已提交
838
		mov	r0, #0x00000030		@ ARM6 control reg.
839
		b	__armv3_mmu_cache_off
L
Linus Torvalds 已提交
840

841
__arm7_mmu_cache_off:
L
Linus Torvalds 已提交
842
		mov	r0, #0x00000070		@ ARM7 control reg.
843
		b	__armv3_mmu_cache_off
L
Linus Torvalds 已提交
844

845
__armv3_mmu_cache_off:
L
Linus Torvalds 已提交
846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866
		mcr	p15, 0, r0, c1, c0, 0	@ turn MMU and cache off
		mov	r0, #0
		mcr	p15, 0, r0, c7, c0, 0	@ invalidate whole cache v3
		mcr	p15, 0, r0, c5, c0, 0	@ invalidate whole TLB v3
		mov	pc, lr

/*
 * Clean and flush the cache to maintain consistency.
 *
 * On entry,
 *  r6 = processor ID
 * On exit,
 *  r1, r2, r3, r11, r12 corrupted
 * This routine must preserve:
 *  r0, r4, r5, r6, r7
 */
		.align	5
cache_clean_flush:
		mov	r3, #16
		b	call_cache_fn

867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883
__armv4_mpu_cache_flush:
		mov	r2, #1
		mov	r3, #0
		mcr	p15, 0, ip, c7, c6, 0	@ invalidate D cache
		mov	r1, #7 << 5		@ 8 segments
1:		orr	r3, r1, #63 << 26	@ 64 entries
2:		mcr	p15, 0, r3, c7, c14, 2	@ clean & invalidate D index
		subs	r3, r3, #1 << 26
		bcs	2b			@ entries 63 to 0
		subs 	r1, r1, #1 << 5
		bcs	1b			@ segments 7 to 0

		teq	r2, #0
		mcrne	p15, 0, ip, c7, c5, 0	@ invalidate I cache
		mcr	p15, 0, ip, c7, c10, 4	@ drain WB
		mov	pc, lr
		
P
Paulius Zaleckas 已提交
884 885 886 887 888 889
__fa526_cache_flush:
		mov	r1, #0
		mcr	p15, 0, r1, c7, c14, 0	@ clean and invalidate D cache
		mcr	p15, 0, r1, c7, c5, 0	@ flush I cache
		mcr	p15, 0, r1, c7, c10, 4	@ drain WB
		mov	pc, lr
890

891
__armv6_mmu_cache_flush:
L
Linus Torvalds 已提交
892 893 894 895 896 897 898
		mov	r1, #0
		mcr	p15, 0, r1, c7, c14, 0	@ clean+invalidate D
		mcr	p15, 0, r1, c7, c5, 0	@ invalidate I+BTB
		mcr	p15, 0, r1, c7, c15, 0	@ clean+invalidate unified
		mcr	p15, 0, r1, c7, c10, 4	@ drain WB
		mov	pc, lr

899 900 901 902
__armv7_mmu_cache_flush:
		mrc	p15, 0, r10, c0, c1, 5	@ read ID_MMFR1
		tst	r10, #0xf << 16		@ hierarchical cache (ARMv7)
		mov	r10, #0
903
		beq	hierarchical
904 905 906
		mcr	p15, 0, r10, c7, c14, 0	@ clean+invalidate D
		b	iflush
hierarchical:
907
		mcr	p15, 0, r10, c7, c10, 5	@ DMB
908
		stmfd	sp!, {r0-r7, r9-r11}
909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926
		mrc	p15, 1, r0, c0, c0, 1	@ read clidr
		ands	r3, r0, #0x7000000	@ extract loc from clidr
		mov	r3, r3, lsr #23		@ left align loc bit field
		beq	finished		@ if loc is 0, then no need to clean
		mov	r10, #0			@ start clean at cache level 0
loop1:
		add	r2, r10, r10, lsr #1	@ work out 3x current cache level
		mov	r1, r0, lsr r2		@ extract cache type bits from clidr
		and	r1, r1, #7		@ mask of the bits for current cache only
		cmp	r1, #2			@ see what cache we have at this level
		blt	skip			@ skip if no cache, or just i-cache
		mcr	p15, 2, r10, c0, c0, 0	@ select current cache level in cssr
		mcr	p15, 0, r10, c7, c5, 4	@ isb to sych the new cssr&csidr
		mrc	p15, 1, r1, c0, c0, 0	@ read the new csidr
		and	r2, r1, #7		@ extract the length of the cache lines
		add	r2, r2, #4		@ add 4 (line length offset)
		ldr	r4, =0x3ff
		ands	r4, r4, r1, lsr #3	@ find maximum number on the way size
927
		clz	r5, r4			@ find bit position of way size increment
928 929 930 931 932
		ldr	r7, =0x7fff
		ands	r7, r7, r1, lsr #13	@ extract max number of the index size
loop2:
		mov	r9, r4			@ create working copy of max way size
loop3:
933 934 935 936 937 938
 ARM(		orr	r11, r10, r9, lsl r5	) @ factor way and cache number into r11
 ARM(		orr	r11, r11, r7, lsl r2	) @ factor index number into r11
 THUMB(		lsl	r6, r9, r5		)
 THUMB(		orr	r11, r10, r6		) @ factor way and cache number into r11
 THUMB(		lsl	r6, r7, r2		)
 THUMB(		orr	r11, r11, r6		) @ factor index number into r11
939 940 941 942 943 944 945 946 947 948
		mcr	p15, 0, r11, c7, c14, 2	@ clean & invalidate by set/way
		subs	r9, r9, #1		@ decrement the way
		bge	loop3
		subs	r7, r7, #1		@ decrement the index
		bge	loop2
skip:
		add	r10, r10, #2		@ increment cache number
		cmp	r3, r10
		bgt	loop1
finished:
949
		ldmfd	sp!, {r0-r7, r9-r11}
950 951 952
		mov	r10, #0			@ swith back to cache level 0
		mcr	p15, 2, r10, c0, c0, 0	@ select current cache level in cssr
iflush:
953
		mcr	p15, 0, r10, c7, c10, 4	@ DSB
954
		mcr	p15, 0, r10, c7, c5, 0	@ invalidate I+BTB
955 956
		mcr	p15, 0, r10, c7, c10, 4	@ DSB
		mcr	p15, 0, r10, c7, c5, 4	@ ISB
957 958
		mov	pc, lr

959 960 961 962 963 964 965
__armv5tej_mmu_cache_flush:
1:		mrc	p15, 0, r15, c7, c14, 3	@ test,clean,invalidate D cache
		bne	1b
		mcr	p15, 0, r0, c7, c5, 0	@ flush I cache
		mcr	p15, 0, r0, c7, c10, 4	@ drain WB
		mov	pc, lr

966
__armv4_mmu_cache_flush:
L
Linus Torvalds 已提交
967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982
		mov	r2, #64*1024		@ default: 32K dcache size (*2)
		mov	r11, #32		@ default: 32 byte line size
		mrc	p15, 0, r3, c0, c0, 1	@ read cache type
		teq	r3, r6			@ cache ID register present?
		beq	no_cache_id
		mov	r1, r3, lsr #18
		and	r1, r1, #7
		mov	r2, #1024
		mov	r2, r2, lsl r1		@ base dcache size *2
		tst	r3, #1 << 14		@ test M bit
		addne	r2, r2, r2, lsr #1	@ +1/2 size if M == 1
		mov	r3, r3, lsr #12
		and	r3, r3, #3
		mov	r11, #8
		mov	r11, r11, lsl r3	@ cache line size in bytes
no_cache_id:
983 984
		mov	r1, pc
		bic	r1, r1, #63		@ align to longest cache line
L
Linus Torvalds 已提交
985
		add	r2, r1, r2
986 987 988 989
1:
 ARM(		ldr	r3, [r1], r11		) @ s/w flush D cache
 THUMB(		ldr     r3, [r1]		) @ s/w flush D cache
 THUMB(		add     r1, r1, r11		)
L
Linus Torvalds 已提交
990 991 992 993 994 995 996 997
		teq	r1, r2
		bne	1b

		mcr	p15, 0, r1, c7, c5, 0	@ flush I cache
		mcr	p15, 0, r1, c7, c6, 0	@ flush D cache
		mcr	p15, 0, r1, c7, c10, 4	@ drain WB
		mov	pc, lr

998
__armv3_mmu_cache_flush:
999
__armv3_mpu_cache_flush:
L
Linus Torvalds 已提交
1000 1001 1002 1003 1004 1005 1006 1007 1008
		mov	r1, #0
		mcr	p15, 0, r0, c7, c0, 0	@ invalidate whole cache v3
		mov	pc, lr

/*
 * Various debugging routines for printing hex characters and
 * memory, which again must be relocatable.
 */
#ifdef DEBUG
1009
		.align	2
L
Linus Torvalds 已提交
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
		.type	phexbuf,#object
phexbuf:	.space	12
		.size	phexbuf, . - phexbuf

phex:		adr	r3, phexbuf
		mov	r2, #0
		strb	r2, [r3, r1]
1:		subs	r1, r1, #1
		movmi	r0, r3
		bmi	puts
		and	r2, r0, #15
		mov	r0, r0, lsr #4
		cmp	r2, #10
		addge	r2, r2, #7
		add	r2, r2, #'0'
		strb	r2, [r3, r1]
		b	1b

puts:		loadsp	r3
1:		ldrb	r2, [r0], #1
		teq	r2, #0
		moveq	pc, lr
1032
2:		writeb	r2, r3
L
Linus Torvalds 已提交
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
		mov	r1, #0x00020000
3:		subs	r1, r1, #1
		bne	3b
		teq	r2, #'\n'
		moveq	r2, #'\r'
		beq	2b
		teq	r0, #0
		bne	1b
		mov	pc, lr
putc:
		mov	r2, r0
		mov	r0, #0
		loadsp	r3
		b	2b

memdump:	mov	r12, r0
		mov	r10, lr
		mov	r11, #0
2:		mov	r0, r11, lsl #2
		add	r0, r0, r12
		mov	r1, #8
		bl	phex
		mov	r0, #':'
		bl	putc
1:		mov	r0, #' '
		bl	putc
		ldr	r0, [r12, r11, lsl #2]
		mov	r1, #8
		bl	phex
		and	r0, r11, #7
		teq	r0, #3
		moveq	r0, #' '
		bleq	putc
		and	r0, r11, #7
		add	r11, r11, #1
		teq	r0, #7
		bne	1b
		mov	r0, #'\n'
		bl	putc
		cmp	r11, #64
		blt	2b
		mov	pc, r10
#endif

1077
		.ltorg
L
Linus Torvalds 已提交
1078 1079 1080 1081 1082
reloc_end:

		.align
		.section ".stack", "w"
user_stack:	.space	4096