entry-armv.S 24.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 *  linux/arch/arm/kernel/entry-armv.S
 *
 *  Copyright (C) 1996,1997,1998 Russell King.
 *  ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
6
 *  nommu support by Hyok S. Choi (hyok.choi@samsung.com)
L
Linus Torvalds 已提交
7 8 9 10 11 12 13
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 *  Low-level vector interface routines
 *
14 15
 *  Note:  there is a StrongARM bug in the STMIA rn, {regs}^ instruction
 *  that causes it to save wrong values...  Be aware!
L
Linus Torvalds 已提交
16 17
 */

18
#include <asm/memory.h>
19 20
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
L
Linus Torvalds 已提交
21
#include <asm/vfpmacros.h>
22
#include <mach/entry-macro.S>
23
#include <asm/thread_notify.h>
24
#include <asm/unwind.h>
25
#include <asm/unistd.h>
26
#include <asm/tls.h>
L
Linus Torvalds 已提交
27 28

#include "entry-header.S"
29
#include <asm/entry-macro-multi.S>
L
Linus Torvalds 已提交
30

31 32 33 34
/*
 * Interrupt handling.  Preserves r7, r8, r9
 */
	.macro	irq_handler
35 36 37 38 39 40 41
#ifdef CONFIG_MULTI_IRQ_HANDLER
	ldr	r5, =handle_arch_irq
	mov	r0, sp
	ldr	r5, [r5]
	adr	lr, BSYM(9997f)
	teq	r5, #0
	movne	pc, r5
42
#endif
43
	arch_irq_handler_default
44
9997:
45 46
	.endm

47 48 49 50 51 52
#ifdef CONFIG_KPROBES
	.section	.kprobes.text,"ax",%progbits
#else
	.text
#endif

L
Linus Torvalds 已提交
53 54 55
/*
 * Invalid mode handlers
 */
R
Russell King 已提交
56 57
	.macro	inv_entry, reason
	sub	sp, sp, #S_FRAME_SIZE
58 59 60 61
 ARM(	stmib	sp, {r1 - lr}		)
 THUMB(	stmia	sp, {r0 - r12}		)
 THUMB(	str	sp, [sp, #S_SP]		)
 THUMB(	str	lr, [sp, #S_LR]		)
L
Linus Torvalds 已提交
62 63 64 65
	mov	r1, #\reason
	.endm

__pabt_invalid:
R
Russell King 已提交
66 67
	inv_entry BAD_PREFETCH
	b	common_invalid
68
ENDPROC(__pabt_invalid)
L
Linus Torvalds 已提交
69 70

__dabt_invalid:
R
Russell King 已提交
71 72
	inv_entry BAD_DATA
	b	common_invalid
73
ENDPROC(__dabt_invalid)
L
Linus Torvalds 已提交
74 75

__irq_invalid:
R
Russell King 已提交
76 77
	inv_entry BAD_IRQ
	b	common_invalid
78
ENDPROC(__irq_invalid)
L
Linus Torvalds 已提交
79 80

__und_invalid:
R
Russell King 已提交
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
	inv_entry BAD_UNDEFINSTR

	@
	@ XXX fall through to common_invalid
	@

@
@ common_invalid - generic code for failed exception (re-entrant version of handlers)
@
common_invalid:
	zero_fp

	ldmia	r0, {r4 - r6}
	add	r0, sp, #S_PC		@ here for interlock avoidance
	mov	r7, #-1			@  ""   ""    ""        ""
	str	r4, [sp]		@ save preserved r0
	stmia	r0, {r5 - r7}		@ lr_<exception>,
					@ cpsr_<exception>, "old_r0"
L
Linus Torvalds 已提交
99 100 101

	mov	r0, sp
	b	bad_mode
102
ENDPROC(__und_invalid)
L
Linus Torvalds 已提交
103 104 105 106

/*
 * SVC mode handlers
 */
107 108 109 110 111 112 113

#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
#define SPFIX(code...) code
#else
#define SPFIX(code...)
#endif

114
	.macro	svc_entry, stack_hole=0
115 116
 UNWIND(.fnstart		)
 UNWIND(.save {r0 - pc}		)
117 118 119 120 121 122 123
	sub	sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
#ifdef CONFIG_THUMB2_KERNEL
 SPFIX(	str	r0, [sp]	)	@ temporarily saved
 SPFIX(	mov	r0, sp		)
 SPFIX(	tst	r0, #4		)	@ test original stack alignment
 SPFIX(	ldr	r0, [sp]	)	@ restored
#else
124
 SPFIX(	tst	sp, #4		)
125 126 127
#endif
 SPFIX(	subeq	sp, sp, #4	)
	stmia	sp, {r1 - r12}
R
Russell King 已提交
128 129

	ldmia	r0, {r1 - r3}
130
	add	r5, sp, #S_SP - 4	@ here for interlock avoidance
R
Russell King 已提交
131
	mov	r4, #-1			@  ""  ""      ""       ""
132 133 134
	add	r0, sp, #(S_FRAME_SIZE + \stack_hole - 4)
 SPFIX(	addeq	r0, r0, #4	)
	str	r1, [sp, #-4]!		@ save the "real" r0 copied
R
Russell King 已提交
135 136
					@ from the exception stack

L
Linus Torvalds 已提交
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
	mov	r1, lr

	@
	@ We are now ready to fill in the remaining blanks on the stack:
	@
	@  r0 - sp_svc
	@  r1 - lr_svc
	@  r2 - lr_<exception>, already fixed up for correct return/restart
	@  r3 - spsr_<exception>
	@  r4 - orig_r0 (see pt_regs definition in ptrace.h)
	@
	stmia	r5, {r0 - r4}
	.endm

	.align	5
__dabt_svc:
R
Russell King 已提交
153
	svc_entry
L
Linus Torvalds 已提交
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170

	@
	@ get ready to re-enable interrupts if appropriate
	@
	mrs	r9, cpsr
	tst	r3, #PSR_I_BIT
	biceq	r9, r9, #PSR_I_BIT

	@
	@ Call the processor-specific abort handler:
	@
	@  r2 - aborted context pc
	@  r3 - aborted context cpsr
	@
	@ The abort handler must return the aborted address in r0, and
	@ the fault status register in r1.  r9 must be preserved.
	@
P
Paul Brook 已提交
171
#ifdef MULTI_DABORT
L
Linus Torvalds 已提交
172 173
	ldr	r4, .LCprocfns
	mov	lr, pc
P
Paul Brook 已提交
174
	ldr	pc, [r4, #PROCESSOR_DABT_FUNC]
L
Linus Torvalds 已提交
175
#else
P
Paul Brook 已提交
176
	bl	CPU_DABORT_HANDLER
L
Linus Torvalds 已提交
177 178 179 180 181
#endif

	@
	@ set desired IRQ state, then call main handler
	@
182
	debug_entry r1
L
Linus Torvalds 已提交
183 184 185 186 187 188 189
	msr	cpsr_c, r9
	mov	r2, sp
	bl	do_DataAbort

	@
	@ IRQs off again before pulling preserved data off the stack
	@
190
	disable_irq_notrace
L
Linus Torvalds 已提交
191 192 193 194

	@
	@ restore SPSR and restart the instruction
	@
195 196
	ldr	r2, [sp, #S_PSR]
	svc_exit r2				@ return from exception
197
 UNWIND(.fnend		)
198
ENDPROC(__dabt_svc)
L
Linus Torvalds 已提交
199 200 201

	.align	5
__irq_svc:
R
Russell King 已提交
202 203
	svc_entry

204 205 206
#ifdef CONFIG_TRACE_IRQFLAGS
	bl	trace_hardirqs_off
#endif
L
Linus Torvalds 已提交
207
#ifdef CONFIG_PREEMPT
208 209 210 211
	get_thread_info tsk
	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
	add	r7, r8, #1			@ increment it
	str	r7, [tsk, #TI_PREEMPT]
L
Linus Torvalds 已提交
212
#endif
R
Russell King 已提交
213

214
	irq_handler
L
Linus Torvalds 已提交
215
#ifdef CONFIG_PREEMPT
R
Russell King 已提交
216
	str	r8, [tsk, #TI_PREEMPT]		@ restore preempt count
217
	ldr	r0, [tsk, #TI_FLAGS]		@ get flags
R
Russell King 已提交
218 219
	teq	r8, #0				@ if preempt count != 0
	movne	r0, #0				@ force flags to 0
L
Linus Torvalds 已提交
220 221 222
	tst	r0, #_TIF_NEED_RESCHED
	blne	svc_preempt
#endif
223
	ldr	r4, [sp, #S_PSR]		@ irqs are already disabled
R
Russell King 已提交
224
#ifdef CONFIG_TRACE_IRQFLAGS
225
	tst	r4, #PSR_I_BIT
R
Russell King 已提交
226 227
	bleq	trace_hardirqs_on
#endif
228
	svc_exit r4				@ return from exception
229
 UNWIND(.fnend		)
230
ENDPROC(__irq_svc)
L
Linus Torvalds 已提交
231 232 233 234 235

	.ltorg

#ifdef CONFIG_PREEMPT
svc_preempt:
R
Russell King 已提交
236
	mov	r8, lr
L
Linus Torvalds 已提交
237
1:	bl	preempt_schedule_irq		@ irq en/disable is done inside
238
	ldr	r0, [tsk, #TI_FLAGS]		@ get new tasks TI_FLAGS
L
Linus Torvalds 已提交
239
	tst	r0, #_TIF_NEED_RESCHED
R
Russell King 已提交
240
	moveq	pc, r8				@ go again
L
Linus Torvalds 已提交
241 242 243 244 245
	b	1b
#endif

	.align	5
__und_svc:
246 247 248 249 250 251
#ifdef CONFIG_KPROBES
	@ If a kprobe is about to simulate a "stmdb sp..." instruction,
	@ it obviously needs free stack space which then will belong to
	@ the saved context.
	svc_entry 64
#else
R
Russell King 已提交
252
	svc_entry
253
#endif
L
Linus Torvalds 已提交
254 255 256 257 258 259 260 261

	@
	@ call emulation code, which returns using r9 if it has emulated
	@ the instruction, or the more conventional lr if we are to treat
	@ this as a real undefined instruction
	@
	@  r0 - instruction
	@
262
#ifndef	CONFIG_THUMB2_KERNEL
L
Linus Torvalds 已提交
263
	ldr	r0, [r2, #-4]
264 265 266 267 268 269 270
#else
	ldrh	r0, [r2, #-2]			@ Thumb instruction at LR - 2
	and	r9, r0, #0xf800
	cmp	r9, #0xe800			@ 32-bit instruction if xx >= 0
	ldrhhs	r9, [r2]			@ bottom 16 bits
	orrhs	r0, r9, r0, lsl #16
#endif
271
	adr	r9, BSYM(1f)
L
Linus Torvalds 已提交
272 273 274 275 276 277 278 279
	bl	call_fpe

	mov	r0, sp				@ struct pt_regs *regs
	bl	do_undefinstr

	@
	@ IRQs off again before pulling preserved data off the stack
	@
280
1:	disable_irq_notrace
L
Linus Torvalds 已提交
281 282 283 284

	@
	@ restore SPSR and restart the instruction
	@
285 286
	ldr	r2, [sp, #S_PSR]		@ Get SVC cpsr
	svc_exit r2				@ return from exception
287
 UNWIND(.fnend		)
288
ENDPROC(__und_svc)
L
Linus Torvalds 已提交
289 290 291

	.align	5
__pabt_svc:
R
Russell King 已提交
292
	svc_entry
L
Linus Torvalds 已提交
293 294 295 296 297 298 299 300

	@
	@ re-enable interrupts if appropriate
	@
	mrs	r9, cpsr
	tst	r3, #PSR_I_BIT
	biceq	r9, r9, #PSR_I_BIT

P
Paul Brook 已提交
301
	mov	r0, r2			@ pass address of aborted instruction.
302
#ifdef MULTI_PABORT
P
Paul Brook 已提交
303 304 305 306
	ldr	r4, .LCprocfns
	mov	lr, pc
	ldr	pc, [r4, #PROCESSOR_PABT_FUNC]
#else
307
	bl	CPU_PABORT_HANDLER
P
Paul Brook 已提交
308
#endif
309
	debug_entry r1
P
Paul Brook 已提交
310
	msr	cpsr_c, r9			@ Maybe enable interrupts
311
	mov	r2, sp				@ regs
L
Linus Torvalds 已提交
312 313 314 315 316
	bl	do_PrefetchAbort		@ call abort handler

	@
	@ IRQs off again before pulling preserved data off the stack
	@
317
	disable_irq_notrace
L
Linus Torvalds 已提交
318 319 320 321

	@
	@ restore SPSR and restart the instruction
	@
322 323
	ldr	r2, [sp, #S_PSR]
	svc_exit r2				@ return from exception
324
 UNWIND(.fnend		)
325
ENDPROC(__pabt_svc)
L
Linus Torvalds 已提交
326 327

	.align	5
328 329
.LCcralign:
	.word	cr_alignment
P
Paul Brook 已提交
330
#ifdef MULTI_DABORT
L
Linus Torvalds 已提交
331 332 333 334 335 336 337 338
.LCprocfns:
	.word	processor
#endif
.LCfp:
	.word	fp_enter

/*
 * User mode handlers
339 340
 *
 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
L
Linus Torvalds 已提交
341
 */
342 343 344 345 346

#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
#error "sizeof(struct pt_regs) must be a multiple of 8"
#endif

R
Russell King 已提交
347
	.macro	usr_entry
348 349
 UNWIND(.fnstart	)
 UNWIND(.cantunwind	)	@ don't unwind the user space
R
Russell King 已提交
350
	sub	sp, sp, #S_FRAME_SIZE
351 352
 ARM(	stmib	sp, {r1 - r12}	)
 THUMB(	stmia	sp, {r0 - r12}	)
R
Russell King 已提交
353 354 355 356 357 358 359

	ldmia	r0, {r1 - r3}
	add	r0, sp, #S_PC		@ here for interlock avoidance
	mov	r4, #-1			@  ""  ""     ""        ""

	str	r1, [sp]		@ save the "real" r0 copied
					@ from the exception stack
L
Linus Torvalds 已提交
360 361 362 363 364 365 366 367 368 369

	@
	@ We are now ready to fill in the remaining blanks on the stack:
	@
	@  r2 - lr_<exception>, already fixed up for correct return/restart
	@  r3 - spsr_<exception>
	@  r4 - orig_r0 (see pt_regs definition in ptrace.h)
	@
	@ Also, separately save sp_usr and lr_usr
	@
R
Russell King 已提交
370
	stmia	r0, {r2 - r4}
371 372
 ARM(	stmdb	r0, {sp, lr}^			)
 THUMB(	store_user_sp_lr r0, r1, S_SP - S_PC	)
L
Linus Torvalds 已提交
373 374 375 376

	@
	@ Enable the alignment trap while in kernel mode
	@
377
	alignment_trap r0
L
Linus Torvalds 已提交
378 379 380 381 382 383 384

	@
	@ Clear FP to mark the first stack frame
	@
	zero_fp
	.endm

385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
	.macro	kuser_cmpxchg_check
#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
#ifndef CONFIG_MMU
#warning "NPTL on non MMU needs fixing"
#else
	@ Make sure our user space atomic helper is restarted
	@ if it was interrupted in a critical region.  Here we
	@ perform a quick test inline since it should be false
	@ 99.9999% of the time.  The rest is done out of line.
	cmp	r2, #TASK_SIZE
	blhs	kuser_cmpxchg_fixup
#endif
#endif
	.endm

L
Linus Torvalds 已提交
400 401
	.align	5
__dabt_usr:
R
Russell King 已提交
402
	usr_entry
403
	kuser_cmpxchg_check
L
Linus Torvalds 已提交
404 405 406 407 408 409 410 411 412 413

	@
	@ Call the processor-specific abort handler:
	@
	@  r2 - aborted context pc
	@  r3 - aborted context cpsr
	@
	@ The abort handler must return the aborted address in r0, and
	@ the fault status register in r1.
	@
P
Paul Brook 已提交
414
#ifdef MULTI_DABORT
L
Linus Torvalds 已提交
415 416
	ldr	r4, .LCprocfns
	mov	lr, pc
P
Paul Brook 已提交
417
	ldr	pc, [r4, #PROCESSOR_DABT_FUNC]
L
Linus Torvalds 已提交
418
#else
P
Paul Brook 已提交
419
	bl	CPU_DABORT_HANDLER
L
Linus Torvalds 已提交
420 421 422 423 424
#endif

	@
	@ IRQs on, then call the main handler
	@
425
	debug_entry r1
426
	enable_irq
L
Linus Torvalds 已提交
427
	mov	r2, sp
428
	adr	lr, BSYM(ret_from_exception)
L
Linus Torvalds 已提交
429
	b	do_DataAbort
430
 UNWIND(.fnend		)
431
ENDPROC(__dabt_usr)
L
Linus Torvalds 已提交
432 433 434

	.align	5
__irq_usr:
R
Russell King 已提交
435
	usr_entry
436
	kuser_cmpxchg_check
L
Linus Torvalds 已提交
437

438
	get_thread_info tsk
L
Linus Torvalds 已提交
439
#ifdef CONFIG_PREEMPT
440 441 442
	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
	add	r7, r8, #1			@ increment it
	str	r7, [tsk, #TI_PREEMPT]
L
Linus Torvalds 已提交
443
#endif
R
Russell King 已提交
444

445
	irq_handler
L
Linus Torvalds 已提交
446
#ifdef CONFIG_PREEMPT
447 448
	ldr	r0, [tsk, #TI_PREEMPT]
	str	r8, [tsk, #TI_PREEMPT]
L
Linus Torvalds 已提交
449
	teq	r0, r7
450 451 452
 ARM(	strne	r0, [r0, -r0]	)
 THUMB(	movne	r0, #0		)
 THUMB(	strne	r0, [r0]	)
L
Linus Torvalds 已提交
453
#endif
R
Russell King 已提交
454

L
Linus Torvalds 已提交
455 456
	mov	why, #0
	b	ret_to_user
457
 UNWIND(.fnend		)
458
ENDPROC(__irq_usr)
L
Linus Torvalds 已提交
459 460 461 462 463

	.ltorg

	.align	5
__und_usr:
R
Russell King 已提交
464
	usr_entry
L
Linus Torvalds 已提交
465 466 467 468 469 470 471 472

	@
	@ fall through to the emulation code, which returns using r9 if
	@ it has emulated the instruction, or the more conventional lr
	@ if we are to treat this as a real undefined instruction
	@
	@  r0 - instruction
	@
473 474
	adr	r9, BSYM(ret_from_exception)
	adr	lr, BSYM(__und_usr_unknown)
475
	tst	r3, #PSR_T_BIT			@ Thumb mode?
476
	itet	eq				@ explicit IT needed for the 1f label
477 478 479
	subeq	r4, r2, #4			@ ARM instr at LR - 4
	subne	r4, r2, #2			@ Thumb instr at LR - 2
1:	ldreqt	r0, [r4]
480 481 482
#ifdef CONFIG_CPU_ENDIAN_BE8
	reveq	r0, r0				@ little endian instruction
#endif
483 484 485
	beq	call_fpe
	@ Thumb instruction
#if __LINUX_ARM_ARCH__ >= 7
486 487 488 489
2:
 ARM(	ldrht	r5, [r4], #2	)
 THUMB(	ldrht	r5, [r4]	)
 THUMB(	add	r4, r4, #2	)
490 491 492 493 494 495 496 497 498
	and	r0, r5, #0xf800			@ mask bits 111x x... .... ....
	cmp	r0, #0xe800			@ 32bit instruction if xx != 0
	blo	__und_usr_unknown
3:	ldrht	r0, [r4]
	add	r2, r2, #2			@ r2 is PC + 2, make it PC + 4
	orr	r0, r0, r5, lsl #16
#else
	b	__und_usr_unknown
#endif
499
 UNWIND(.fnend		)
500
ENDPROC(__und_usr)
501

L
Linus Torvalds 已提交
502 503 504 505 506 507 508
	@
	@ fallthrough to call_fpe
	@

/*
 * The out of line fixup for the ldrt above.
 */
509
	.pushsection .fixup, "ax"
510
4:	mov	pc, r9
511 512
	.popsection
	.pushsection __ex_table,"a"
513 514 515 516 517
	.long	1b, 4b
#if __LINUX_ARM_ARCH__ >= 7
	.long	2b, 4b
	.long	3b, 4b
#endif
518
	.popsection
L
Linus Torvalds 已提交
519 520 521 522 523 524 525 526 527 528 529

/*
 * Check whether the instruction is a co-processor instruction.
 * If yes, we need to call the relevant co-processor handler.
 *
 * Note that we don't do a full check here for the co-processor
 * instructions; all instructions with bit 27 set are well
 * defined.  The only instructions that should fault are the
 * co-processor instructions.  However, we have to watch out
 * for the ARM6/ARM7 SWI bug.
 *
530 531 532 533 534 535 536
 * NEON is a special case that has to be handled here. Not all
 * NEON instructions are co-processor instructions, so we have
 * to make a special case of checking for them. Plus, there's
 * five groups of them, so we have a table of mask/opcode pairs
 * to check against, and if any match then we branch off into the
 * NEON handler code.
 *
L
Linus Torvalds 已提交
537 538 539
 * Emulators may wish to make use of the following registers:
 *  r0  = instruction opcode.
 *  r2  = PC+4
540
 *  r9  = normal "successful" return address
L
Linus Torvalds 已提交
541
 *  r10 = this threads thread_info structure.
542
 *  lr  = unrecognised instruction return address
L
Linus Torvalds 已提交
543
 */
544 545 546 547 548 549 550
	@
	@ Fall-through from Thumb-2 __und_usr
	@
#ifdef CONFIG_NEON
	adr	r6, .LCneon_thumb_opcodes
	b	2f
#endif
L
Linus Torvalds 已提交
551
call_fpe:
552
#ifdef CONFIG_NEON
553
	adr	r6, .LCneon_arm_opcodes
554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
2:
	ldr	r7, [r6], #4			@ mask value
	cmp	r7, #0				@ end mask?
	beq	1f
	and	r8, r0, r7
	ldr	r7, [r6], #4			@ opcode bits matching in mask
	cmp	r8, r7				@ NEON instruction?
	bne	2b
	get_thread_info r10
	mov	r7, #1
	strb	r7, [r10, #TI_USED_CP + 10]	@ mark CP#10 as used
	strb	r7, [r10, #TI_USED_CP + 11]	@ mark CP#11 as used
	b	do_vfp				@ let VFP handler handle this
1:
#endif
L
Linus Torvalds 已提交
569
	tst	r0, #0x08000000			@ only CDP/CPRT/LDC/STC have bit 27
570
	tstne	r0, #0x04000000			@ bit 26 set on both ARM and Thumb-2
L
Linus Torvalds 已提交
571 572 573 574 575 576 577
#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
	and	r8, r0, #0x0f000000		@ mask out op-code bits
	teqne	r8, #0x0f000000			@ SWI (ARM6/7 bug)?
#endif
	moveq	pc, lr
	get_thread_info r10			@ get current thread
	and	r8, r0, #0x00000f00		@ mask out CP number
578
 THUMB(	lsr	r8, r8, #8		)
L
Linus Torvalds 已提交
579 580
	mov	r7, #1
	add	r6, r10, #TI_USED_CP
581 582
 ARM(	strb	r7, [r6, r8, lsr #8]	)	@ set appropriate used_cp[]
 THUMB(	strb	r7, [r6, r8]		)	@ set appropriate used_cp[]
L
Linus Torvalds 已提交
583 584 585 586 587 588 589
#ifdef CONFIG_IWMMXT
	@ Test if we need to give access to iWMMXt coprocessors
	ldr	r5, [r10, #TI_FLAGS]
	rsbs	r7, r8, #(1 << 8)		@ CP 0 or 1 only
	movcss	r7, r5, lsr #(TIF_USING_IWMMXT + 1)
	bcs	iwmmxt_task_enable
#endif
590 591 592 593 594
 ARM(	add	pc, pc, r8, lsr #6	)
 THUMB(	lsl	r8, r8, #2		)
 THUMB(	add	pc, r8			)
	nop

595
	movw_pc	lr				@ CP#0
596 597
	W(b)	do_fpe				@ CP#1 (FPE)
	W(b)	do_fpe				@ CP#2 (FPE)
598
	movw_pc	lr				@ CP#3
599 600 601 602 603
#ifdef CONFIG_CRUNCH
	b	crunch_task_enable		@ CP#4 (MaverickCrunch)
	b	crunch_task_enable		@ CP#5 (MaverickCrunch)
	b	crunch_task_enable		@ CP#6 (MaverickCrunch)
#else
604 605 606
	movw_pc	lr				@ CP#4
	movw_pc	lr				@ CP#5
	movw_pc	lr				@ CP#6
607
#endif
608 609 610
	movw_pc	lr				@ CP#7
	movw_pc	lr				@ CP#8
	movw_pc	lr				@ CP#9
L
Linus Torvalds 已提交
611
#ifdef CONFIG_VFP
612 613
	W(b)	do_vfp				@ CP#10 (VFP)
	W(b)	do_vfp				@ CP#11 (VFP)
L
Linus Torvalds 已提交
614
#else
615 616
	movw_pc	lr				@ CP#10 (VFP)
	movw_pc	lr				@ CP#11 (VFP)
L
Linus Torvalds 已提交
617
#endif
618 619 620 621
	movw_pc	lr				@ CP#12
	movw_pc	lr				@ CP#13
	movw_pc	lr				@ CP#14 (Debug)
	movw_pc	lr				@ CP#15 (Control)
L
Linus Torvalds 已提交
622

623 624 625
#ifdef CONFIG_NEON
	.align	6

626
.LCneon_arm_opcodes:
627 628 629 630 631 632
	.word	0xfe000000			@ mask
	.word	0xf2000000			@ opcode

	.word	0xff100000			@ mask
	.word	0xf4000000			@ opcode

633 634 635 636 637 638 639 640 641 642
	.word	0x00000000			@ mask
	.word	0x00000000			@ opcode

.LCneon_thumb_opcodes:
	.word	0xef000000			@ mask
	.word	0xef000000			@ opcode

	.word	0xff100000			@ mask
	.word	0xf9000000			@ opcode

643 644 645 646
	.word	0x00000000			@ mask
	.word	0x00000000			@ opcode
#endif

L
Linus Torvalds 已提交
647
do_fpe:
648
	enable_irq
L
Linus Torvalds 已提交
649 650 651 652 653 654 655 656 657 658 659 660 661
	ldr	r4, .LCfp
	add	r10, r10, #TI_FPSTATE		@ r10 = workspace
	ldr	pc, [r4]			@ Call FP module USR entry point

/*
 * The FP module is called with these registers set:
 *  r0  = instruction
 *  r2  = PC+4
 *  r9  = normal "successful" return address
 *  r10 = FP workspace
 *  lr  = unrecognised FP instruction return address
 */

662
	.pushsection .data
L
Linus Torvalds 已提交
663
ENTRY(fp_enter)
664
	.word	no_fp
665
	.popsection
L
Linus Torvalds 已提交
666

667 668 669
ENTRY(no_fp)
	mov	pc, lr
ENDPROC(no_fp)
670 671

__und_usr_unknown:
672
	enable_irq
L
Linus Torvalds 已提交
673
	mov	r0, sp
674
	adr	lr, BSYM(ret_from_exception)
L
Linus Torvalds 已提交
675
	b	do_undefinstr
676
ENDPROC(__und_usr_unknown)
L
Linus Torvalds 已提交
677 678 679

	.align	5
__pabt_usr:
R
Russell King 已提交
680
	usr_entry
L
Linus Torvalds 已提交
681

P
Paul Brook 已提交
682
	mov	r0, r2			@ pass address of aborted instruction.
683
#ifdef MULTI_PABORT
P
Paul Brook 已提交
684 685 686 687
	ldr	r4, .LCprocfns
	mov	lr, pc
	ldr	pc, [r4, #PROCESSOR_PABT_FUNC]
#else
688
	bl	CPU_PABORT_HANDLER
P
Paul Brook 已提交
689
#endif
690
	debug_entry r1
691
	enable_irq				@ Enable interrupts
692
	mov	r2, sp				@ regs
L
Linus Torvalds 已提交
693
	bl	do_PrefetchAbort		@ call abort handler
694
 UNWIND(.fnend		)
L
Linus Torvalds 已提交
695 696 697 698 699
	/* fall through */
/*
 * This is the return code to user mode for abort handlers
 */
ENTRY(ret_from_exception)
700 701
 UNWIND(.fnstart	)
 UNWIND(.cantunwind	)
L
Linus Torvalds 已提交
702 703 704
	get_thread_info tsk
	mov	why, #0
	b	ret_to_user
705
 UNWIND(.fnend		)
706 707
ENDPROC(__pabt_usr)
ENDPROC(ret_from_exception)
L
Linus Torvalds 已提交
708 709 710 711 712 713 714

/*
 * Register switch for ARMv3 and ARMv4 processors
 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
 * previous and next are guaranteed not to be the same.
 */
ENTRY(__switch_to)
715 716
 UNWIND(.fnstart	)
 UNWIND(.cantunwind	)
L
Linus Torvalds 已提交
717 718
	add	ip, r1, #TI_CPU_SAVE
	ldr	r3, [r2, #TI_TP_VALUE]
719 720 721 722
 ARM(	stmia	ip!, {r4 - sl, fp, sp, lr} )	@ Store most regs on stack
 THUMB(	stmia	ip!, {r4 - sl, fp}	   )	@ Store most regs on stack
 THUMB(	str	sp, [ip], #4		   )
 THUMB(	str	lr, [ip], #4		   )
723
#ifdef CONFIG_CPU_USE_DOMAINS
724
	ldr	r6, [r2, #TI_CPU_DOMAIN]
725
#endif
726
	set_tls	r3, r4, r5
727 728 729 730 731
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
	ldr	r7, [r2, #TI_TASK]
	ldr	r8, =__stack_chk_guard
	ldr	r7, [r7, #TSK_STACK_CANARY]
#endif
732
#ifdef CONFIG_CPU_USE_DOMAINS
L
Linus Torvalds 已提交
733 734
	mcr	p15, 0, r6, c3, c0, 0		@ Set domain register
#endif
735 736 737 738 739
	mov	r5, r0
	add	r4, r2, #TI_CPU_SAVE
	ldr	r0, =thread_notify_head
	mov	r1, #THREAD_NOTIFY_SWITCH
	bl	atomic_notifier_call_chain
740 741 742
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
	str	r7, [r8]
#endif
743
 THUMB(	mov	ip, r4			   )
744
	mov	r0, r5
745 746 747 748
 ARM(	ldmia	r4, {r4 - sl, fp, sp, pc}  )	@ Load all regs saved previously
 THUMB(	ldmia	ip!, {r4 - sl, fp}	   )	@ Load all regs saved previously
 THUMB(	ldr	sp, [ip], #4		   )
 THUMB(	ldr	pc, [ip]		   )
749
 UNWIND(.fnend		)
750
ENDPROC(__switch_to)
L
Linus Torvalds 已提交
751 752

	__INIT
753 754 755 756 757 758 759 760 761

/*
 * User helpers.
 *
 * Each segment is 32-byte aligned and will be moved to the top of the high
 * vector page.  New segments (if ever needed) must be added in front of
 * existing ones.  This mechanism should be used only for things that are
 * really small and justified, and not be abused freely.
 *
762
 * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
763
 */
764
 THUMB(	.arm	)
765

766 767 768 769 770 771 772 773
	.macro	usr_ret, reg
#ifdef CONFIG_ARM_THUMB
	bx	\reg
#else
	mov	pc, \reg
#endif
	.endm

774 775 776 777
	.align	5
	.globl	__kuser_helper_start
__kuser_helper_start:

778
__kuser_memory_barrier:				@ 0xffff0fa0
779
	smp_dmb	arm
780
	usr_ret	lr
781 782 783

	.align	5

784 785
__kuser_cmpxchg:				@ 0xffff0fc0

786
#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
787

788 789 790 791 792
	/*
	 * Poor you.  No fast solution possible...
	 * The kernel itself must perform the operation.
	 * A special ghost syscall is used for that (see traps.c).
	 */
793
	stmfd	sp!, {r7, lr}
794
	ldr	r7, 1f			@ it's 20 bits
795
	swi	__ARM_NR_cmpxchg
796
	ldmfd	sp!, {r7, pc}
797
1:	.word	__ARM_NR_cmpxchg
798 799

#elif __LINUX_ARM_ARCH__ < 6
800

801 802
#ifdef CONFIG_MMU

803
	/*
804 805 806 807 808 809 810
	 * The only thing that can break atomicity in this cmpxchg
	 * implementation is either an IRQ or a data abort exception
	 * causing another process/thread to be scheduled in the middle
	 * of the critical sequence.  To prevent this, code is added to
	 * the IRQ and data abort exception handlers to set the pc back
	 * to the beginning of the critical section if it is found to be
	 * within that critical section (see kuser_cmpxchg_fixup).
811
	 */
812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832
1:	ldr	r3, [r2]			@ load current val
	subs	r3, r3, r0			@ compare with oldval
2:	streq	r1, [r2]			@ store newval if eq
	rsbs	r0, r3, #0			@ set return val and C flag
	usr_ret	lr

	.text
kuser_cmpxchg_fixup:
	@ Called from kuser_cmpxchg_check macro.
	@ r2 = address of interrupted insn (must be preserved).
	@ sp = saved regs. r7 and r8 are clobbered.
	@ 1b = first critical insn, 2b = last critical insn.
	@ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b.
	mov	r7, #0xffff0fff
	sub	r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
	subs	r8, r2, r7
	rsbcss	r8, r8, #(2b - 1b)
	strcs	r7, [sp, #S_PC]
	mov	pc, lr
	.previous

833 834 835 836
#else
#warning "NPTL on non MMU needs fixing"
	mov	r0, #-1
	adds	r0, r0, #0
837
	usr_ret	lr
838
#endif
839 840 841

#else

842
	smp_dmb	arm
843
1:	ldrex	r3, [r2]
844 845
	subs	r3, r3, r0
	strexeq	r3, r1, [r2]
846 847
	teqeq	r3, #1
	beq	1b
848
	rsbs	r0, r3, #0
849
	/* beware -- each __kuser slot must be 8 instructions max */
850 851
	ALT_SMP(b	__kuser_memory_barrier)
	ALT_UP(usr_ret	lr)
852 853 854 855 856 857

#endif

	.align	5

__kuser_get_tls:				@ 0xffff0fe0
858
	ldr	r0, [pc, #(16 - 8)]	@ read TLS, set in kuser_get_tls_init
859
	usr_ret	lr
860 861 862 863
	mrc	p15, 0, r0, c13, c0, 3	@ 0xffff0fe8 hardware TLS code
	.rep	4
	.word	0			@ 0xffff0ff0 software TLS value, then
	.endr				@ pad up to __kuser_helper_version
864 865 866 867 868 869 870

__kuser_helper_version:				@ 0xffff0ffc
	.word	((__kuser_helper_end - __kuser_helper_start) >> 5)

	.globl	__kuser_helper_end
__kuser_helper_end:

871
 THUMB(	.thumb	)
872

L
Linus Torvalds 已提交
873 874 875
/*
 * Vector stubs.
 *
876 877 878
 * This code is copied to 0xffff0200 so we can use branches in the
 * vectors, rather than ldr's.  Note that this code must not
 * exceed 0x300 bytes.
L
Linus Torvalds 已提交
879 880 881
 *
 * Common stub entry macro:
 *   Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
R
Russell King 已提交
882 883 884
 *
 * SP points to a minimal amount of processor-private memory, the address
 * of which is copied into r0 for the mode specific abort handler.
L
Linus Torvalds 已提交
885
 */
886
	.macro	vector_stub, name, mode, correction=0
L
Linus Torvalds 已提交
887 888 889 890 891 892
	.align	5

vector_\name:
	.if \correction
	sub	lr, lr, #\correction
	.endif
R
Russell King 已提交
893 894 895 896 897 898

	@
	@ Save r0, lr_<exception> (parent PC) and spsr_<exception>
	@ (parent CPSR)
	@
	stmia	sp, {r0, lr}		@ save r0, lr
L
Linus Torvalds 已提交
899
	mrs	lr, spsr
R
Russell King 已提交
900 901
	str	lr, [sp, #8]		@ save spsr

L
Linus Torvalds 已提交
902
	@
R
Russell King 已提交
903
	@ Prepare for SVC32 mode.  IRQs remain disabled.
L
Linus Torvalds 已提交
904
	@
R
Russell King 已提交
905
	mrs	r0, cpsr
906
	eor	r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
R
Russell King 已提交
907
	msr	spsr_cxsf, r0
L
Linus Torvalds 已提交
908

R
Russell King 已提交
909 910 911 912
	@
	@ the branch table must immediately follow this code
	@
	and	lr, lr, #0x0f
913 914
 THUMB(	adr	r0, 1f			)
 THUMB(	ldr	lr, [r0, lr, lsl #2]	)
915
	mov	r0, sp
916
 ARM(	ldr	lr, [pc, lr, lsl #2]	)
R
Russell King 已提交
917
	movs	pc, lr			@ branch to handler in SVC mode
918
ENDPROC(vector_\name)
919 920 921 922

	.align	2
	@ handler addresses follow this label
1:
L
Linus Torvalds 已提交
923 924
	.endm

925
	.globl	__stubs_start
L
Linus Torvalds 已提交
926 927 928 929
__stubs_start:
/*
 * Interrupt dispatcher
 */
930
	vector_stub	irq, IRQ_MODE, 4
L
Linus Torvalds 已提交
931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952

	.long	__irq_usr			@  0  (USR_26 / USR_32)
	.long	__irq_invalid			@  1  (FIQ_26 / FIQ_32)
	.long	__irq_invalid			@  2  (IRQ_26 / IRQ_32)
	.long	__irq_svc			@  3  (SVC_26 / SVC_32)
	.long	__irq_invalid			@  4
	.long	__irq_invalid			@  5
	.long	__irq_invalid			@  6
	.long	__irq_invalid			@  7
	.long	__irq_invalid			@  8
	.long	__irq_invalid			@  9
	.long	__irq_invalid			@  a
	.long	__irq_invalid			@  b
	.long	__irq_invalid			@  c
	.long	__irq_invalid			@  d
	.long	__irq_invalid			@  e
	.long	__irq_invalid			@  f

/*
 * Data abort dispatcher
 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
 */
953
	vector_stub	dabt, ABT_MODE, 8
L
Linus Torvalds 已提交
954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975

	.long	__dabt_usr			@  0  (USR_26 / USR_32)
	.long	__dabt_invalid			@  1  (FIQ_26 / FIQ_32)
	.long	__dabt_invalid			@  2  (IRQ_26 / IRQ_32)
	.long	__dabt_svc			@  3  (SVC_26 / SVC_32)
	.long	__dabt_invalid			@  4
	.long	__dabt_invalid			@  5
	.long	__dabt_invalid			@  6
	.long	__dabt_invalid			@  7
	.long	__dabt_invalid			@  8
	.long	__dabt_invalid			@  9
	.long	__dabt_invalid			@  a
	.long	__dabt_invalid			@  b
	.long	__dabt_invalid			@  c
	.long	__dabt_invalid			@  d
	.long	__dabt_invalid			@  e
	.long	__dabt_invalid			@  f

/*
 * Prefetch abort dispatcher
 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
 */
976
	vector_stub	pabt, ABT_MODE, 4
L
Linus Torvalds 已提交
977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998

	.long	__pabt_usr			@  0 (USR_26 / USR_32)
	.long	__pabt_invalid			@  1 (FIQ_26 / FIQ_32)
	.long	__pabt_invalid			@  2 (IRQ_26 / IRQ_32)
	.long	__pabt_svc			@  3 (SVC_26 / SVC_32)
	.long	__pabt_invalid			@  4
	.long	__pabt_invalid			@  5
	.long	__pabt_invalid			@  6
	.long	__pabt_invalid			@  7
	.long	__pabt_invalid			@  8
	.long	__pabt_invalid			@  9
	.long	__pabt_invalid			@  a
	.long	__pabt_invalid			@  b
	.long	__pabt_invalid			@  c
	.long	__pabt_invalid			@  d
	.long	__pabt_invalid			@  e
	.long	__pabt_invalid			@  f

/*
 * Undef instr entry dispatcher
 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
 */
999
	vector_stub	und, UND_MODE
L
Linus Torvalds 已提交
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052

	.long	__und_usr			@  0 (USR_26 / USR_32)
	.long	__und_invalid			@  1 (FIQ_26 / FIQ_32)
	.long	__und_invalid			@  2 (IRQ_26 / IRQ_32)
	.long	__und_svc			@  3 (SVC_26 / SVC_32)
	.long	__und_invalid			@  4
	.long	__und_invalid			@  5
	.long	__und_invalid			@  6
	.long	__und_invalid			@  7
	.long	__und_invalid			@  8
	.long	__und_invalid			@  9
	.long	__und_invalid			@  a
	.long	__und_invalid			@  b
	.long	__und_invalid			@  c
	.long	__und_invalid			@  d
	.long	__und_invalid			@  e
	.long	__und_invalid			@  f

	.align	5

/*=============================================================================
 * Undefined FIQs
 *-----------------------------------------------------------------------------
 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
 * Basically to switch modes, we *HAVE* to clobber one register...  brain
 * damage alert!  I don't think that we can execute any code in here in any
 * other mode than FIQ...  Ok you can switch to another mode, but you can't
 * get out of that mode without clobbering one register.
 */
vector_fiq:
	disable_fiq
	subs	pc, lr, #4

/*=============================================================================
 * Address exception handler
 *-----------------------------------------------------------------------------
 * These aren't too critical.
 * (they're not supposed to happen, and won't happen in 32-bit data mode).
 */

vector_addrexcptn:
	b	vector_addrexcptn

/*
 * We group all the following data together to optimise
 * for CPUs with separate I & D caches.
 */
	.align	5

.LCvswi:
	.word	vector_swi

1053
	.globl	__stubs_end
L
Linus Torvalds 已提交
1054 1055
__stubs_end:

1056
	.equ	stubs_offset, __vectors_start + 0x200 - __stubs_start
L
Linus Torvalds 已提交
1057

1058 1059
	.globl	__vectors_start
__vectors_start:
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
 ARM(	swi	SYS_ERROR0	)
 THUMB(	svc	#0		)
 THUMB(	nop			)
	W(b)	vector_und + stubs_offset
	W(ldr)	pc, .LCvswi + stubs_offset
	W(b)	vector_pabt + stubs_offset
	W(b)	vector_dabt + stubs_offset
	W(b)	vector_addrexcptn + stubs_offset
	W(b)	vector_irq + stubs_offset
	W(b)	vector_fiq + stubs_offset
1070 1071 1072

	.globl	__vectors_end
__vectors_end:
L
Linus Torvalds 已提交
1073 1074 1075 1076 1077 1078 1079 1080 1081

	.data

	.globl	cr_alignment
	.globl	cr_no_alignment
cr_alignment:
	.space	4
cr_no_alignment:
	.space	4
1082 1083 1084 1085 1086 1087

#ifdef CONFIG_MULTI_IRQ_HANDLER
	.globl	handle_arch_irq
handle_arch_irq:
	.space	4
#endif