entry-armv.S 27.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 *  linux/arch/arm/kernel/entry-armv.S
 *
 *  Copyright (C) 1996,1997,1998 Russell King.
 *  ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
6
 *  nommu support by Hyok S. Choi (hyok.choi@samsung.com)
L
Linus Torvalds 已提交
7 8 9 10 11 12 13
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 *  Low-level vector interface routines
 *
14 15
 *  Note:  there is a StrongARM bug in the STMIA rn, {regs}^ instruction
 *  that causes it to save wrong values...  Be aware!
L
Linus Torvalds 已提交
16 17
 */

18
#include <asm/assembler.h>
19
#include <asm/memory.h>
20 21
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
L
Linus Torvalds 已提交
22
#include <asm/vfpmacros.h>
23
#ifndef CONFIG_MULTI_IRQ_HANDLER
24
#include <mach/entry-macro.S>
25
#endif
26
#include <asm/thread_notify.h>
27
#include <asm/unwind.h>
28
#include <asm/unistd.h>
29
#include <asm/tls.h>
30
#include <asm/system_info.h>
L
Linus Torvalds 已提交
31 32

#include "entry-header.S"
33
#include <asm/entry-macro-multi.S>
L
Linus Torvalds 已提交
34

35
/*
36
 * Interrupt handling.
37 38
 */
	.macro	irq_handler
39
#ifdef CONFIG_MULTI_IRQ_HANDLER
40
	ldr	r1, =handle_arch_irq
41 42
	mov	r0, sp
	adr	lr, BSYM(9997f)
43 44
	ldr	pc, [r1]
#else
45
	arch_irq_handler_default
46
#endif
47
9997:
48 49
	.endm

50
	.macro	pabt_helper
51
	@ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
52
#ifdef MULTI_PABORT
53
	ldr	ip, .LCprocfns
54
	mov	lr, pc
55
	ldr	pc, [ip, #PROCESSOR_PABT_FUNC]
56 57 58 59 60 61 62 63 64 65
#else
	bl	CPU_PABORT_HANDLER
#endif
	.endm

	.macro	dabt_helper

	@
	@ Call the processor-specific abort handler:
	@
66
	@  r2 - pt_regs
67 68
	@  r4 - aborted context pc
	@  r5 - aborted context psr
69 70 71 72 73
	@
	@ The abort handler must return the aborted address in r0, and
	@ the fault status register in r1.  r9 must be preserved.
	@
#ifdef MULTI_DABORT
74
	ldr	ip, .LCprocfns
75
	mov	lr, pc
76
	ldr	pc, [ip, #PROCESSOR_DABT_FUNC]
77 78 79 80 81
#else
	bl	CPU_DABORT_HANDLER
#endif
	.endm

82 83 84 85 86 87
#ifdef CONFIG_KPROBES
	.section	.kprobes.text,"ax",%progbits
#else
	.text
#endif

L
Linus Torvalds 已提交
88 89 90
/*
 * Invalid mode handlers
 */
R
Russell King 已提交
91 92
	.macro	inv_entry, reason
	sub	sp, sp, #S_FRAME_SIZE
93 94 95 96
 ARM(	stmib	sp, {r1 - lr}		)
 THUMB(	stmia	sp, {r0 - r12}		)
 THUMB(	str	sp, [sp, #S_SP]		)
 THUMB(	str	lr, [sp, #S_LR]		)
L
Linus Torvalds 已提交
97 98 99 100
	mov	r1, #\reason
	.endm

__pabt_invalid:
R
Russell King 已提交
101 102
	inv_entry BAD_PREFETCH
	b	common_invalid
103
ENDPROC(__pabt_invalid)
L
Linus Torvalds 已提交
104 105

__dabt_invalid:
R
Russell King 已提交
106 107
	inv_entry BAD_DATA
	b	common_invalid
108
ENDPROC(__dabt_invalid)
L
Linus Torvalds 已提交
109 110

__irq_invalid:
R
Russell King 已提交
111 112
	inv_entry BAD_IRQ
	b	common_invalid
113
ENDPROC(__irq_invalid)
L
Linus Torvalds 已提交
114 115

__und_invalid:
R
Russell King 已提交
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
	inv_entry BAD_UNDEFINSTR

	@
	@ XXX fall through to common_invalid
	@

@
@ common_invalid - generic code for failed exception (re-entrant version of handlers)
@
common_invalid:
	zero_fp

	ldmia	r0, {r4 - r6}
	add	r0, sp, #S_PC		@ here for interlock avoidance
	mov	r7, #-1			@  ""   ""    ""        ""
	str	r4, [sp]		@ save preserved r0
	stmia	r0, {r5 - r7}		@ lr_<exception>,
					@ cpsr_<exception>, "old_r0"
L
Linus Torvalds 已提交
134 135 136

	mov	r0, sp
	b	bad_mode
137
ENDPROC(__und_invalid)
L
Linus Torvalds 已提交
138 139 140 141

/*
 * SVC mode handlers
 */
142 143 144 145 146 147 148

#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
#define SPFIX(code...) code
#else
#define SPFIX(code...)
#endif

149
	.macro	svc_entry, stack_hole=0
150 151
 UNWIND(.fnstart		)
 UNWIND(.save {r0 - pc}		)
152 153 154 155 156 157 158
	sub	sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
#ifdef CONFIG_THUMB2_KERNEL
 SPFIX(	str	r0, [sp]	)	@ temporarily saved
 SPFIX(	mov	r0, sp		)
 SPFIX(	tst	r0, #4		)	@ test original stack alignment
 SPFIX(	ldr	r0, [sp]	)	@ restored
#else
159
 SPFIX(	tst	sp, #4		)
160 161 162
#endif
 SPFIX(	subeq	sp, sp, #4	)
	stmia	sp, {r1 - r12}
R
Russell King 已提交
163

164 165 166 167 168 169
	ldmia	r0, {r3 - r5}
	add	r7, sp, #S_SP - 4	@ here for interlock avoidance
	mov	r6, #-1			@  ""  ""      ""       ""
	add	r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
 SPFIX(	addeq	r2, r2, #4	)
	str	r3, [sp, #-4]!		@ save the "real" r0 copied
R
Russell King 已提交
170 171
					@ from the exception stack

172
	mov	r3, lr
L
Linus Torvalds 已提交
173 174 175 176

	@
	@ We are now ready to fill in the remaining blanks on the stack:
	@
177 178 179 180 181
	@  r2 - sp_svc
	@  r3 - lr_svc
	@  r4 - lr_<exception>, already fixed up for correct return/restart
	@  r5 - spsr_<exception>
	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
L
Linus Torvalds 已提交
182
	@
183
	stmia	r7, {r2 - r6}
L
Linus Torvalds 已提交
184

185 186 187
#ifdef CONFIG_TRACE_IRQFLAGS
	bl	trace_hardirqs_off
#endif
188
	.endm
L
Linus Torvalds 已提交
189

190 191 192
	.align	5
__dabt_svc:
	svc_entry
L
Linus Torvalds 已提交
193
	mov	r2, sp
194
	dabt_helper
L
Linus Torvalds 已提交
195 196 197 198

	@
	@ IRQs off again before pulling preserved data off the stack
	@
199
	disable_irq_notrace
L
Linus Torvalds 已提交
200

201 202 203 204 205 206
#ifdef CONFIG_TRACE_IRQFLAGS
	tst	r5, #PSR_I_BIT
	bleq	trace_hardirqs_on
	tst	r5, #PSR_I_BIT
	blne	trace_hardirqs_off
#endif
207
	svc_exit r5				@ return from exception
208
 UNWIND(.fnend		)
209
ENDPROC(__dabt_svc)
L
Linus Torvalds 已提交
210 211 212

	.align	5
__irq_svc:
R
Russell King 已提交
213
	svc_entry
214
	irq_handler
215

L
Linus Torvalds 已提交
216
#ifdef CONFIG_PREEMPT
217 218
	get_thread_info tsk
	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
219
	ldr	r0, [tsk, #TI_FLAGS]		@ get flags
R
Russell King 已提交
220 221
	teq	r8, #0				@ if preempt count != 0
	movne	r0, #0				@ force flags to 0
L
Linus Torvalds 已提交
222 223 224
	tst	r0, #_TIF_NEED_RESCHED
	blne	svc_preempt
#endif
225

R
Russell King 已提交
226
#ifdef CONFIG_TRACE_IRQFLAGS
227 228 229
	@ The parent context IRQs must have been enabled to get here in
	@ the first place, so there's no point checking the PSR I bit.
	bl	trace_hardirqs_on
R
Russell King 已提交
230
#endif
231
	svc_exit r5				@ return from exception
232
 UNWIND(.fnend		)
233
ENDPROC(__irq_svc)
L
Linus Torvalds 已提交
234 235 236 237 238

	.ltorg

#ifdef CONFIG_PREEMPT
svc_preempt:
R
Russell King 已提交
239
	mov	r8, lr
L
Linus Torvalds 已提交
240
1:	bl	preempt_schedule_irq		@ irq en/disable is done inside
241
	ldr	r0, [tsk, #TI_FLAGS]		@ get new tasks TI_FLAGS
L
Linus Torvalds 已提交
242
	tst	r0, #_TIF_NEED_RESCHED
R
Russell King 已提交
243
	moveq	pc, r8				@ go again
L
Linus Torvalds 已提交
244 245 246 247 248
	b	1b
#endif

	.align	5
__und_svc:
249 250 251 252 253 254
#ifdef CONFIG_KPROBES
	@ If a kprobe is about to simulate a "stmdb sp..." instruction,
	@ it obviously needs free stack space which then will belong to
	@ the saved context.
	svc_entry 64
#else
R
Russell King 已提交
255
	svc_entry
256
#endif
L
Linus Torvalds 已提交
257 258 259 260 261 262 263
	@
	@ call emulation code, which returns using r9 if it has emulated
	@ the instruction, or the more conventional lr if we are to treat
	@ this as a real undefined instruction
	@
	@  r0 - instruction
	@
264
#ifndef	CONFIG_THUMB2_KERNEL
265
	ldr	r0, [r4, #-4]
266
#else
267
	ldrh	r0, [r4, #-2]			@ Thumb instruction at LR - 2
268
	cmp	r0, #0xe800			@ 32-bit instruction if xx >= 0
269
	ldrhhs	r9, [r4]			@ bottom 16 bits
270 271
	orrhs	r0, r9, r0, lsl #16
#endif
272
	adr	r9, BSYM(1f)
273
	mov	r2, r4
L
Linus Torvalds 已提交
274 275 276 277 278 279 280 281
	bl	call_fpe

	mov	r0, sp				@ struct pt_regs *regs
	bl	do_undefinstr

	@
	@ IRQs off again before pulling preserved data off the stack
	@
282
1:	disable_irq_notrace
L
Linus Torvalds 已提交
283 284 285 286

	@
	@ restore SPSR and restart the instruction
	@
287
	ldr	r5, [sp, #S_PSR]		@ Get SVC cpsr
288 289 290 291 292 293
#ifdef CONFIG_TRACE_IRQFLAGS
	tst	r5, #PSR_I_BIT
	bleq	trace_hardirqs_on
	tst	r5, #PSR_I_BIT
	blne	trace_hardirqs_off
#endif
294
	svc_exit r5				@ return from exception
295
 UNWIND(.fnend		)
296
ENDPROC(__und_svc)
L
Linus Torvalds 已提交
297 298 299

	.align	5
__pabt_svc:
R
Russell King 已提交
300
	svc_entry
301
	mov	r2, sp				@ regs
302
	pabt_helper
L
Linus Torvalds 已提交
303 304 305 306

	@
	@ IRQs off again before pulling preserved data off the stack
	@
307
	disable_irq_notrace
L
Linus Torvalds 已提交
308

309 310 311 312 313 314
#ifdef CONFIG_TRACE_IRQFLAGS
	tst	r5, #PSR_I_BIT
	bleq	trace_hardirqs_on
	tst	r5, #PSR_I_BIT
	blne	trace_hardirqs_off
#endif
315
	svc_exit r5				@ return from exception
316
 UNWIND(.fnend		)
317
ENDPROC(__pabt_svc)
L
Linus Torvalds 已提交
318 319

	.align	5
320 321
.LCcralign:
	.word	cr_alignment
P
Paul Brook 已提交
322
#ifdef MULTI_DABORT
L
Linus Torvalds 已提交
323 324 325 326 327 328 329 330
.LCprocfns:
	.word	processor
#endif
.LCfp:
	.word	fp_enter

/*
 * User mode handlers
331 332
 *
 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
L
Linus Torvalds 已提交
333
 */
334 335 336 337 338

#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
#error "sizeof(struct pt_regs) must be a multiple of 8"
#endif

R
Russell King 已提交
339
	.macro	usr_entry
340 341
 UNWIND(.fnstart	)
 UNWIND(.cantunwind	)	@ don't unwind the user space
R
Russell King 已提交
342
	sub	sp, sp, #S_FRAME_SIZE
343 344
 ARM(	stmib	sp, {r1 - r12}	)
 THUMB(	stmia	sp, {r0 - r12}	)
R
Russell King 已提交
345

346
	ldmia	r0, {r3 - r5}
R
Russell King 已提交
347
	add	r0, sp, #S_PC		@ here for interlock avoidance
348
	mov	r6, #-1			@  ""  ""     ""        ""
R
Russell King 已提交
349

350
	str	r3, [sp]		@ save the "real" r0 copied
R
Russell King 已提交
351
					@ from the exception stack
L
Linus Torvalds 已提交
352 353 354 355

	@
	@ We are now ready to fill in the remaining blanks on the stack:
	@
356 357 358
	@  r4 - lr_<exception>, already fixed up for correct return/restart
	@  r5 - spsr_<exception>
	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
L
Linus Torvalds 已提交
359 360 361
	@
	@ Also, separately save sp_usr and lr_usr
	@
362
	stmia	r0, {r4 - r6}
363 364
 ARM(	stmdb	r0, {sp, lr}^			)
 THUMB(	store_user_sp_lr r0, r1, S_SP - S_PC	)
L
Linus Torvalds 已提交
365 366 367 368

	@
	@ Enable the alignment trap while in kernel mode
	@
369
	alignment_trap r0
L
Linus Torvalds 已提交
370 371 372 373 374

	@
	@ Clear FP to mark the first stack frame
	@
	zero_fp
375 376 377 378

#ifdef CONFIG_IRQSOFF_TRACER
	bl	trace_hardirqs_off
#endif
L
Linus Torvalds 已提交
379 380
	.endm

381
	.macro	kuser_cmpxchg_check
382
#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
383 384 385 386 387 388 389
#ifndef CONFIG_MMU
#warning "NPTL on non MMU needs fixing"
#else
	@ Make sure our user space atomic helper is restarted
	@ if it was interrupted in a critical region.  Here we
	@ perform a quick test inline since it should be false
	@ 99.9999% of the time.  The rest is done out of line.
390
	cmp	r4, #TASK_SIZE
391
	blhs	kuser_cmpxchg64_fixup
392 393 394 395
#endif
#endif
	.endm

L
Linus Torvalds 已提交
396 397
	.align	5
__dabt_usr:
R
Russell King 已提交
398
	usr_entry
399
	kuser_cmpxchg_check
L
Linus Torvalds 已提交
400
	mov	r2, sp
401 402
	dabt_helper
	b	ret_from_exception
403
 UNWIND(.fnend		)
404
ENDPROC(__dabt_usr)
L
Linus Torvalds 已提交
405 406 407

	.align	5
__irq_usr:
R
Russell King 已提交
408
	usr_entry
409
	kuser_cmpxchg_check
410
	irq_handler
411
	get_thread_info tsk
L
Linus Torvalds 已提交
412
	mov	why, #0
413
	b	ret_to_user_from_irq
414
 UNWIND(.fnend		)
415
ENDPROC(__irq_usr)
L
Linus Torvalds 已提交
416 417 418 419 420

	.ltorg

	.align	5
__und_usr:
R
Russell King 已提交
421
	usr_entry
422

423 424
	mov	r2, r4
	mov	r3, r5
L
Linus Torvalds 已提交
425 426 427 428 429 430 431 432

	@
	@ fall through to the emulation code, which returns using r9 if
	@ it has emulated the instruction, or the more conventional lr
	@ if we are to treat this as a real undefined instruction
	@
	@  r0 - instruction
	@
433 434
	adr	r9, BSYM(ret_from_exception)
	adr	lr, BSYM(__und_usr_unknown)
435
	tst	r3, #PSR_T_BIT			@ Thumb mode?
436
	itet	eq				@ explicit IT needed for the 1f label
437 438 439
	subeq	r4, r2, #4			@ ARM instr at LR - 4
	subne	r4, r2, #2			@ Thumb instr at LR - 2
1:	ldreqt	r0, [r4]
440 441 442
#ifdef CONFIG_CPU_ENDIAN_BE8
	reveq	r0, r0				@ little endian instruction
#endif
443 444
	beq	call_fpe
	@ Thumb instruction
445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
/*
 * Thumb-2 instruction handling.  Note that because pre-v6 and >= v6 platforms
 * can never be supported in a single kernel, this code is not applicable at
 * all when __LINUX_ARM_ARCH__ < 6.  This allows simplifying assumptions to be
 * made about .arch directives.
 */
#if __LINUX_ARM_ARCH__ < 7
/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
#define NEED_CPU_ARCHITECTURE
	ldr	r5, .LCcpu_architecture
	ldr	r5, [r5]
	cmp	r5, #CPU_ARCH_ARMv7
	blo	__und_usr_unknown
/*
 * The following code won't get run unless the running CPU really is v7, so
 * coding round the lack of ldrht on older arches is pointless.  Temporarily
 * override the assembler target arch with the minimum required instead:
 */
	.arch	armv6t2
#endif
466 467 468 469
2:
 ARM(	ldrht	r5, [r4], #2	)
 THUMB(	ldrht	r5, [r4]	)
 THUMB(	add	r4, r4, #2	)
470
	cmp	r5, #0xe800			@ 32bit instruction if xx != 0
471 472 473 474
	blo	__und_usr_unknown
3:	ldrht	r0, [r4]
	add	r2, r2, #2			@ r2 is PC + 2, make it PC + 4
	orr	r0, r0, r5, lsl #16
475 476 477 478 479

#if __LINUX_ARM_ARCH__ < 7
/* If the target arch was overridden, change it back: */
#ifdef CONFIG_CPU_32v6K
	.arch	armv6k
480
#else
481 482 483 484
	.arch	armv6
#endif
#endif /* __LINUX_ARM_ARCH__ < 7 */
#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
485 486
	b	__und_usr_unknown
#endif
487
 UNWIND(.fnend		)
488
ENDPROC(__und_usr)
489

L
Linus Torvalds 已提交
490 491 492 493 494 495 496
	@
	@ fallthrough to call_fpe
	@

/*
 * The out of line fixup for the ldrt above.
 */
497
	.pushsection .fixup, "ax"
498
4:	mov	pc, r9
499 500
	.popsection
	.pushsection __ex_table,"a"
501
	.long	1b, 4b
502
#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
503 504 505
	.long	2b, 4b
	.long	3b, 4b
#endif
506
	.popsection
L
Linus Torvalds 已提交
507 508 509 510 511 512 513 514 515 516 517

/*
 * Check whether the instruction is a co-processor instruction.
 * If yes, we need to call the relevant co-processor handler.
 *
 * Note that we don't do a full check here for the co-processor
 * instructions; all instructions with bit 27 set are well
 * defined.  The only instructions that should fault are the
 * co-processor instructions.  However, we have to watch out
 * for the ARM6/ARM7 SWI bug.
 *
518 519 520 521 522 523 524
 * NEON is a special case that has to be handled here. Not all
 * NEON instructions are co-processor instructions, so we have
 * to make a special case of checking for them. Plus, there's
 * five groups of them, so we have a table of mask/opcode pairs
 * to check against, and if any match then we branch off into the
 * NEON handler code.
 *
L
Linus Torvalds 已提交
525 526 527
 * Emulators may wish to make use of the following registers:
 *  r0  = instruction opcode.
 *  r2  = PC+4
528
 *  r9  = normal "successful" return address
L
Linus Torvalds 已提交
529
 *  r10 = this threads thread_info structure.
530
 *  lr  = unrecognised instruction return address
L
Linus Torvalds 已提交
531
 */
532 533 534 535 536 537 538
	@
	@ Fall-through from Thumb-2 __und_usr
	@
#ifdef CONFIG_NEON
	adr	r6, .LCneon_thumb_opcodes
	b	2f
#endif
L
Linus Torvalds 已提交
539
call_fpe:
540
#ifdef CONFIG_NEON
541
	adr	r6, .LCneon_arm_opcodes
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
2:
	ldr	r7, [r6], #4			@ mask value
	cmp	r7, #0				@ end mask?
	beq	1f
	and	r8, r0, r7
	ldr	r7, [r6], #4			@ opcode bits matching in mask
	cmp	r8, r7				@ NEON instruction?
	bne	2b
	get_thread_info r10
	mov	r7, #1
	strb	r7, [r10, #TI_USED_CP + 10]	@ mark CP#10 as used
	strb	r7, [r10, #TI_USED_CP + 11]	@ mark CP#11 as used
	b	do_vfp				@ let VFP handler handle this
1:
#endif
L
Linus Torvalds 已提交
557
	tst	r0, #0x08000000			@ only CDP/CPRT/LDC/STC have bit 27
558
	tstne	r0, #0x04000000			@ bit 26 set on both ARM and Thumb-2
L
Linus Torvalds 已提交
559 560 561
	moveq	pc, lr
	get_thread_info r10			@ get current thread
	and	r8, r0, #0x00000f00		@ mask out CP number
562
 THUMB(	lsr	r8, r8, #8		)
L
Linus Torvalds 已提交
563 564
	mov	r7, #1
	add	r6, r10, #TI_USED_CP
565 566
 ARM(	strb	r7, [r6, r8, lsr #8]	)	@ set appropriate used_cp[]
 THUMB(	strb	r7, [r6, r8]		)	@ set appropriate used_cp[]
L
Linus Torvalds 已提交
567 568 569 570 571 572 573
#ifdef CONFIG_IWMMXT
	@ Test if we need to give access to iWMMXt coprocessors
	ldr	r5, [r10, #TI_FLAGS]
	rsbs	r7, r8, #(1 << 8)		@ CP 0 or 1 only
	movcss	r7, r5, lsr #(TIF_USING_IWMMXT + 1)
	bcs	iwmmxt_task_enable
#endif
574 575 576 577 578
 ARM(	add	pc, pc, r8, lsr #6	)
 THUMB(	lsl	r8, r8, #2		)
 THUMB(	add	pc, r8			)
	nop

579
	movw_pc	lr				@ CP#0
580 581
	W(b)	do_fpe				@ CP#1 (FPE)
	W(b)	do_fpe				@ CP#2 (FPE)
582
	movw_pc	lr				@ CP#3
583 584 585 586 587
#ifdef CONFIG_CRUNCH
	b	crunch_task_enable		@ CP#4 (MaverickCrunch)
	b	crunch_task_enable		@ CP#5 (MaverickCrunch)
	b	crunch_task_enable		@ CP#6 (MaverickCrunch)
#else
588 589 590
	movw_pc	lr				@ CP#4
	movw_pc	lr				@ CP#5
	movw_pc	lr				@ CP#6
591
#endif
592 593 594
	movw_pc	lr				@ CP#7
	movw_pc	lr				@ CP#8
	movw_pc	lr				@ CP#9
L
Linus Torvalds 已提交
595
#ifdef CONFIG_VFP
596 597
	W(b)	do_vfp				@ CP#10 (VFP)
	W(b)	do_vfp				@ CP#11 (VFP)
L
Linus Torvalds 已提交
598
#else
599 600
	movw_pc	lr				@ CP#10 (VFP)
	movw_pc	lr				@ CP#11 (VFP)
L
Linus Torvalds 已提交
601
#endif
602 603 604 605
	movw_pc	lr				@ CP#12
	movw_pc	lr				@ CP#13
	movw_pc	lr				@ CP#14 (Debug)
	movw_pc	lr				@ CP#15 (Control)
L
Linus Torvalds 已提交
606

607 608 609 610 611 612
#ifdef NEED_CPU_ARCHITECTURE
	.align	2
.LCcpu_architecture:
	.word	__cpu_architecture
#endif

613 614 615
#ifdef CONFIG_NEON
	.align	6

616
.LCneon_arm_opcodes:
617 618 619 620 621 622
	.word	0xfe000000			@ mask
	.word	0xf2000000			@ opcode

	.word	0xff100000			@ mask
	.word	0xf4000000			@ opcode

623 624 625 626 627 628 629 630 631 632
	.word	0x00000000			@ mask
	.word	0x00000000			@ opcode

.LCneon_thumb_opcodes:
	.word	0xef000000			@ mask
	.word	0xef000000			@ opcode

	.word	0xff100000			@ mask
	.word	0xf9000000			@ opcode

633 634 635 636
	.word	0x00000000			@ mask
	.word	0x00000000			@ opcode
#endif

L
Linus Torvalds 已提交
637
do_fpe:
638
	enable_irq
L
Linus Torvalds 已提交
639 640 641 642 643 644 645 646 647 648 649 650 651
	ldr	r4, .LCfp
	add	r10, r10, #TI_FPSTATE		@ r10 = workspace
	ldr	pc, [r4]			@ Call FP module USR entry point

/*
 * The FP module is called with these registers set:
 *  r0  = instruction
 *  r2  = PC+4
 *  r9  = normal "successful" return address
 *  r10 = FP workspace
 *  lr  = unrecognised FP instruction return address
 */

652
	.pushsection .data
L
Linus Torvalds 已提交
653
ENTRY(fp_enter)
654
	.word	no_fp
655
	.popsection
L
Linus Torvalds 已提交
656

657 658 659
ENTRY(no_fp)
	mov	pc, lr
ENDPROC(no_fp)
660 661

__und_usr_unknown:
662
	enable_irq
L
Linus Torvalds 已提交
663
	mov	r0, sp
664
	adr	lr, BSYM(ret_from_exception)
L
Linus Torvalds 已提交
665
	b	do_undefinstr
666
ENDPROC(__und_usr_unknown)
L
Linus Torvalds 已提交
667 668 669

	.align	5
__pabt_usr:
R
Russell King 已提交
670
	usr_entry
671
	mov	r2, sp				@ regs
672
	pabt_helper
673
 UNWIND(.fnend		)
L
Linus Torvalds 已提交
674 675 676 677 678
	/* fall through */
/*
 * This is the return code to user mode for abort handlers
 */
ENTRY(ret_from_exception)
679 680
 UNWIND(.fnstart	)
 UNWIND(.cantunwind	)
L
Linus Torvalds 已提交
681 682 683
	get_thread_info tsk
	mov	why, #0
	b	ret_to_user
684
 UNWIND(.fnend		)
685 686
ENDPROC(__pabt_usr)
ENDPROC(ret_from_exception)
L
Linus Torvalds 已提交
687 688 689 690 691 692 693

/*
 * Register switch for ARMv3 and ARMv4 processors
 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
 * previous and next are guaranteed not to be the same.
 */
ENTRY(__switch_to)
694 695
 UNWIND(.fnstart	)
 UNWIND(.cantunwind	)
L
Linus Torvalds 已提交
696 697
	add	ip, r1, #TI_CPU_SAVE
	ldr	r3, [r2, #TI_TP_VALUE]
698 699 700 701
 ARM(	stmia	ip!, {r4 - sl, fp, sp, lr} )	@ Store most regs on stack
 THUMB(	stmia	ip!, {r4 - sl, fp}	   )	@ Store most regs on stack
 THUMB(	str	sp, [ip], #4		   )
 THUMB(	str	lr, [ip], #4		   )
702
#ifdef CONFIG_CPU_USE_DOMAINS
703
	ldr	r6, [r2, #TI_CPU_DOMAIN]
704
#endif
705
	set_tls	r3, r4, r5
706 707 708 709 710
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
	ldr	r7, [r2, #TI_TASK]
	ldr	r8, =__stack_chk_guard
	ldr	r7, [r7, #TSK_STACK_CANARY]
#endif
711
#ifdef CONFIG_CPU_USE_DOMAINS
L
Linus Torvalds 已提交
712 713
	mcr	p15, 0, r6, c3, c0, 0		@ Set domain register
#endif
714 715 716 717 718
	mov	r5, r0
	add	r4, r2, #TI_CPU_SAVE
	ldr	r0, =thread_notify_head
	mov	r1, #THREAD_NOTIFY_SWITCH
	bl	atomic_notifier_call_chain
719 720 721
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
	str	r7, [r8]
#endif
722
 THUMB(	mov	ip, r4			   )
723
	mov	r0, r5
724 725 726 727
 ARM(	ldmia	r4, {r4 - sl, fp, sp, pc}  )	@ Load all regs saved previously
 THUMB(	ldmia	ip!, {r4 - sl, fp}	   )	@ Load all regs saved previously
 THUMB(	ldr	sp, [ip], #4		   )
 THUMB(	ldr	pc, [ip]		   )
728
 UNWIND(.fnend		)
729
ENDPROC(__switch_to)
L
Linus Torvalds 已提交
730 731

	__INIT
732 733 734 735 736 737 738 739 740

/*
 * User helpers.
 *
 * Each segment is 32-byte aligned and will be moved to the top of the high
 * vector page.  New segments (if ever needed) must be added in front of
 * existing ones.  This mechanism should be used only for things that are
 * really small and justified, and not be abused freely.
 *
741
 * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
742
 */
743
 THUMB(	.arm	)
744

745 746 747 748 749 750 751 752
	.macro	usr_ret, reg
#ifdef CONFIG_ARM_THUMB
	bx	\reg
#else
	mov	pc, \reg
#endif
	.endm

753 754 755 756
	.align	5
	.globl	__kuser_helper_start
__kuser_helper_start:

757
/*
758 759
 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
760 761
 */

762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788
__kuser_cmpxchg64:				@ 0xffff0f60

#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)

	/*
	 * Poor you.  No fast solution possible...
	 * The kernel itself must perform the operation.
	 * A special ghost syscall is used for that (see traps.c).
	 */
	stmfd	sp!, {r7, lr}
	ldr	r7, 1f			@ it's 20 bits
	swi	__ARM_NR_cmpxchg64
	ldmfd	sp!, {r7, pc}
1:	.word	__ARM_NR_cmpxchg64

#elif defined(CONFIG_CPU_32v6K)

	stmfd	sp!, {r4, r5, r6, r7}
	ldrd	r4, r5, [r0]			@ load old val
	ldrd	r6, r7, [r1]			@ load new val
	smp_dmb	arm
1:	ldrexd	r0, r1, [r2]			@ load current val
	eors	r3, r0, r4			@ compare with oldval (1)
	eoreqs	r3, r1, r5			@ compare with oldval (2)
	strexdeq r3, r6, r7, [r2]		@ store newval if eq
	teqeq	r3, #1				@ success?
	beq	1b				@ if no then retry
789
	smp_dmb	arm
790 791
	rsbs	r0, r3, #0			@ set returned val and C flag
	ldmfd	sp!, {r4, r5, r6, r7}
792
	usr_ret	lr
793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816

#elif !defined(CONFIG_SMP)

#ifdef CONFIG_MMU

	/*
	 * The only thing that can break atomicity in this cmpxchg64
	 * implementation is either an IRQ or a data abort exception
	 * causing another process/thread to be scheduled in the middle of
	 * the critical sequence.  The same strategy as for cmpxchg is used.
	 */
	stmfd	sp!, {r4, r5, r6, lr}
	ldmia	r0, {r4, r5}			@ load old val
	ldmia	r1, {r6, lr}			@ load new val
1:	ldmia	r2, {r0, r1}			@ load current val
	eors	r3, r0, r4			@ compare with oldval (1)
	eoreqs	r3, r1, r5			@ compare with oldval (2)
2:	stmeqia	r2, {r6, lr}			@ store newval if eq
	rsbs	r0, r3, #0			@ set return val and C flag
	ldmfd	sp!, {r4, r5, r6, pc}

	.text
kuser_cmpxchg64_fixup:
	@ Called from kuser_cmpxchg_fixup.
817
	@ r4 = address of interrupted insn (must be preserved).
818 819
	@ sp = saved regs. r7 and r8 are clobbered.
	@ 1b = first critical insn, 2b = last critical insn.
820
	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
821 822
	mov	r7, #0xffff0fff
	sub	r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
823
	subs	r8, r4, r7
824 825 826 827 828 829 830 831 832 833 834 835
	rsbcss	r8, r8, #(2b - 1b)
	strcs	r7, [sp, #S_PC]
#if __LINUX_ARM_ARCH__ < 6
	bcc	kuser_cmpxchg32_fixup
#endif
	mov	pc, lr
	.previous

#else
#warning "NPTL on non MMU needs fixing"
	mov	r0, #-1
	adds	r0, r0, #0
836
	usr_ret	lr
837 838 839 840 841 842 843 844 845 846
#endif

#else
#error "incoherent kernel configuration"
#endif

	/* pad to next slot */
	.rept	(16 - (. - __kuser_cmpxchg64)/4)
	.word	0
	.endr
847 848 849 850

	.align	5

__kuser_memory_barrier:				@ 0xffff0fa0
851
	smp_dmb	arm
852
	usr_ret	lr
853 854

	.align	5
855 856 857

__kuser_cmpxchg:				@ 0xffff0fc0

858
#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
859

860 861 862 863 864
	/*
	 * Poor you.  No fast solution possible...
	 * The kernel itself must perform the operation.
	 * A special ghost syscall is used for that (see traps.c).
	 */
865
	stmfd	sp!, {r7, lr}
866
	ldr	r7, 1f			@ it's 20 bits
867
	swi	__ARM_NR_cmpxchg
868
	ldmfd	sp!, {r7, pc}
869
1:	.word	__ARM_NR_cmpxchg
870 871

#elif __LINUX_ARM_ARCH__ < 6
872

873 874
#ifdef CONFIG_MMU

875
	/*
876 877 878 879 880 881 882
	 * The only thing that can break atomicity in this cmpxchg
	 * implementation is either an IRQ or a data abort exception
	 * causing another process/thread to be scheduled in the middle
	 * of the critical sequence.  To prevent this, code is added to
	 * the IRQ and data abort exception handlers to set the pc back
	 * to the beginning of the critical section if it is found to be
	 * within that critical section (see kuser_cmpxchg_fixup).
883
	 */
884 885 886 887 888 889 890
1:	ldr	r3, [r2]			@ load current val
	subs	r3, r3, r0			@ compare with oldval
2:	streq	r1, [r2]			@ store newval if eq
	rsbs	r0, r3, #0			@ set return val and C flag
	usr_ret	lr

	.text
891
kuser_cmpxchg32_fixup:
892
	@ Called from kuser_cmpxchg_check macro.
893
	@ r4 = address of interrupted insn (must be preserved).
894 895
	@ sp = saved regs. r7 and r8 are clobbered.
	@ 1b = first critical insn, 2b = last critical insn.
896
	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
897 898
	mov	r7, #0xffff0fff
	sub	r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
899
	subs	r8, r4, r7
900 901 902 903 904
	rsbcss	r8, r8, #(2b - 1b)
	strcs	r7, [sp, #S_PC]
	mov	pc, lr
	.previous

905 906 907 908
#else
#warning "NPTL on non MMU needs fixing"
	mov	r0, #-1
	adds	r0, r0, #0
909
	usr_ret	lr
910
#endif
911 912 913

#else

914
	smp_dmb	arm
915
1:	ldrex	r3, [r2]
916 917
	subs	r3, r3, r0
	strexeq	r3, r1, [r2]
918 919
	teqeq	r3, #1
	beq	1b
920
	rsbs	r0, r3, #0
921
	/* beware -- each __kuser slot must be 8 instructions max */
922 923
	ALT_SMP(b	__kuser_memory_barrier)
	ALT_UP(usr_ret	lr)
924 925 926 927 928 929

#endif

	.align	5

__kuser_get_tls:				@ 0xffff0fe0
930
	ldr	r0, [pc, #(16 - 8)]	@ read TLS, set in kuser_get_tls_init
931
	usr_ret	lr
932 933 934 935
	mrc	p15, 0, r0, c13, c0, 3	@ 0xffff0fe8 hardware TLS code
	.rep	4
	.word	0			@ 0xffff0ff0 software TLS value, then
	.endr				@ pad up to __kuser_helper_version
936 937 938 939 940 941 942

__kuser_helper_version:				@ 0xffff0ffc
	.word	((__kuser_helper_end - __kuser_helper_start) >> 5)

	.globl	__kuser_helper_end
__kuser_helper_end:

943
 THUMB(	.thumb	)
944

L
Linus Torvalds 已提交
945 946 947
/*
 * Vector stubs.
 *
948 949 950
 * This code is copied to 0xffff0200 so we can use branches in the
 * vectors, rather than ldr's.  Note that this code must not
 * exceed 0x300 bytes.
L
Linus Torvalds 已提交
951 952 953
 *
 * Common stub entry macro:
 *   Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
R
Russell King 已提交
954 955 956
 *
 * SP points to a minimal amount of processor-private memory, the address
 * of which is copied into r0 for the mode specific abort handler.
L
Linus Torvalds 已提交
957
 */
958
	.macro	vector_stub, name, mode, correction=0
L
Linus Torvalds 已提交
959 960 961 962 963 964
	.align	5

vector_\name:
	.if \correction
	sub	lr, lr, #\correction
	.endif
R
Russell King 已提交
965 966 967 968 969 970

	@
	@ Save r0, lr_<exception> (parent PC) and spsr_<exception>
	@ (parent CPSR)
	@
	stmia	sp, {r0, lr}		@ save r0, lr
L
Linus Torvalds 已提交
971
	mrs	lr, spsr
R
Russell King 已提交
972 973
	str	lr, [sp, #8]		@ save spsr

L
Linus Torvalds 已提交
974
	@
R
Russell King 已提交
975
	@ Prepare for SVC32 mode.  IRQs remain disabled.
L
Linus Torvalds 已提交
976
	@
R
Russell King 已提交
977
	mrs	r0, cpsr
978
	eor	r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
R
Russell King 已提交
979
	msr	spsr_cxsf, r0
L
Linus Torvalds 已提交
980

R
Russell King 已提交
981 982 983 984
	@
	@ the branch table must immediately follow this code
	@
	and	lr, lr, #0x0f
985 986
 THUMB(	adr	r0, 1f			)
 THUMB(	ldr	lr, [r0, lr, lsl #2]	)
987
	mov	r0, sp
988
 ARM(	ldr	lr, [pc, lr, lsl #2]	)
R
Russell King 已提交
989
	movs	pc, lr			@ branch to handler in SVC mode
990
ENDPROC(vector_\name)
991 992 993 994

	.align	2
	@ handler addresses follow this label
1:
L
Linus Torvalds 已提交
995 996
	.endm

997
	.globl	__stubs_start
L
Linus Torvalds 已提交
998 999 1000 1001
__stubs_start:
/*
 * Interrupt dispatcher
 */
1002
	vector_stub	irq, IRQ_MODE, 4
L
Linus Torvalds 已提交
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024

	.long	__irq_usr			@  0  (USR_26 / USR_32)
	.long	__irq_invalid			@  1  (FIQ_26 / FIQ_32)
	.long	__irq_invalid			@  2  (IRQ_26 / IRQ_32)
	.long	__irq_svc			@  3  (SVC_26 / SVC_32)
	.long	__irq_invalid			@  4
	.long	__irq_invalid			@  5
	.long	__irq_invalid			@  6
	.long	__irq_invalid			@  7
	.long	__irq_invalid			@  8
	.long	__irq_invalid			@  9
	.long	__irq_invalid			@  a
	.long	__irq_invalid			@  b
	.long	__irq_invalid			@  c
	.long	__irq_invalid			@  d
	.long	__irq_invalid			@  e
	.long	__irq_invalid			@  f

/*
 * Data abort dispatcher
 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
 */
1025
	vector_stub	dabt, ABT_MODE, 8
L
Linus Torvalds 已提交
1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047

	.long	__dabt_usr			@  0  (USR_26 / USR_32)
	.long	__dabt_invalid			@  1  (FIQ_26 / FIQ_32)
	.long	__dabt_invalid			@  2  (IRQ_26 / IRQ_32)
	.long	__dabt_svc			@  3  (SVC_26 / SVC_32)
	.long	__dabt_invalid			@  4
	.long	__dabt_invalid			@  5
	.long	__dabt_invalid			@  6
	.long	__dabt_invalid			@  7
	.long	__dabt_invalid			@  8
	.long	__dabt_invalid			@  9
	.long	__dabt_invalid			@  a
	.long	__dabt_invalid			@  b
	.long	__dabt_invalid			@  c
	.long	__dabt_invalid			@  d
	.long	__dabt_invalid			@  e
	.long	__dabt_invalid			@  f

/*
 * Prefetch abort dispatcher
 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
 */
1048
	vector_stub	pabt, ABT_MODE, 4
L
Linus Torvalds 已提交
1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070

	.long	__pabt_usr			@  0 (USR_26 / USR_32)
	.long	__pabt_invalid			@  1 (FIQ_26 / FIQ_32)
	.long	__pabt_invalid			@  2 (IRQ_26 / IRQ_32)
	.long	__pabt_svc			@  3 (SVC_26 / SVC_32)
	.long	__pabt_invalid			@  4
	.long	__pabt_invalid			@  5
	.long	__pabt_invalid			@  6
	.long	__pabt_invalid			@  7
	.long	__pabt_invalid			@  8
	.long	__pabt_invalid			@  9
	.long	__pabt_invalid			@  a
	.long	__pabt_invalid			@  b
	.long	__pabt_invalid			@  c
	.long	__pabt_invalid			@  d
	.long	__pabt_invalid			@  e
	.long	__pabt_invalid			@  f

/*
 * Undef instr entry dispatcher
 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
 */
1071
	vector_stub	und, UND_MODE
L
Linus Torvalds 已提交
1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123

	.long	__und_usr			@  0 (USR_26 / USR_32)
	.long	__und_invalid			@  1 (FIQ_26 / FIQ_32)
	.long	__und_invalid			@  2 (IRQ_26 / IRQ_32)
	.long	__und_svc			@  3 (SVC_26 / SVC_32)
	.long	__und_invalid			@  4
	.long	__und_invalid			@  5
	.long	__und_invalid			@  6
	.long	__und_invalid			@  7
	.long	__und_invalid			@  8
	.long	__und_invalid			@  9
	.long	__und_invalid			@  a
	.long	__und_invalid			@  b
	.long	__und_invalid			@  c
	.long	__und_invalid			@  d
	.long	__und_invalid			@  e
	.long	__und_invalid			@  f

	.align	5

/*=============================================================================
 * Undefined FIQs
 *-----------------------------------------------------------------------------
 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
 * Basically to switch modes, we *HAVE* to clobber one register...  brain
 * damage alert!  I don't think that we can execute any code in here in any
 * other mode than FIQ...  Ok you can switch to another mode, but you can't
 * get out of that mode without clobbering one register.
 */
vector_fiq:
	subs	pc, lr, #4

/*=============================================================================
 * Address exception handler
 *-----------------------------------------------------------------------------
 * These aren't too critical.
 * (they're not supposed to happen, and won't happen in 32-bit data mode).
 */

vector_addrexcptn:
	b	vector_addrexcptn

/*
 * We group all the following data together to optimise
 * for CPUs with separate I & D caches.
 */
	.align	5

.LCvswi:
	.word	vector_swi

1124
	.globl	__stubs_end
L
Linus Torvalds 已提交
1125 1126
__stubs_end:

1127
	.equ	stubs_offset, __vectors_start + 0x200 - __stubs_start
L
Linus Torvalds 已提交
1128

1129 1130
	.globl	__vectors_start
__vectors_start:
1131 1132 1133 1134 1135 1136 1137 1138 1139 1140
 ARM(	swi	SYS_ERROR0	)
 THUMB(	svc	#0		)
 THUMB(	nop			)
	W(b)	vector_und + stubs_offset
	W(ldr)	pc, .LCvswi + stubs_offset
	W(b)	vector_pabt + stubs_offset
	W(b)	vector_dabt + stubs_offset
	W(b)	vector_addrexcptn + stubs_offset
	W(b)	vector_irq + stubs_offset
	W(b)	vector_fiq + stubs_offset
1141 1142 1143

	.globl	__vectors_end
__vectors_end:
L
Linus Torvalds 已提交
1144 1145 1146 1147 1148 1149 1150 1151 1152

	.data

	.globl	cr_alignment
	.globl	cr_no_alignment
cr_alignment:
	.space	4
cr_no_alignment:
	.space	4
1153 1154 1155 1156 1157 1158

#ifdef CONFIG_MULTI_IRQ_HANDLER
	.globl	handle_arch_irq
handle_arch_irq:
	.space	4
#endif