entry-armv.S 29.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 *  linux/arch/arm/kernel/entry-armv.S
 *
 *  Copyright (C) 1996,1997,1998 Russell King.
 *  ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
6
 *  nommu support by Hyok S. Choi (hyok.choi@samsung.com)
L
Linus Torvalds 已提交
7 8 9 10 11 12 13
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 *  Low-level vector interface routines
 *
14 15
 *  Note:  there is a StrongARM bug in the STMIA rn, {regs}^ instruction
 *  that causes it to save wrong values...  Be aware!
L
Linus Torvalds 已提交
16 17
 */

18
#include <asm/assembler.h>
19
#include <asm/memory.h>
20 21
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
L
Linus Torvalds 已提交
22
#include <asm/vfpmacros.h>
23
#ifndef CONFIG_MULTI_IRQ_HANDLER
24
#include <mach/entry-macro.S>
25
#endif
26
#include <asm/thread_notify.h>
27
#include <asm/unwind.h>
28
#include <asm/unistd.h>
29
#include <asm/tls.h>
30
#include <asm/system_info.h>
L
Linus Torvalds 已提交
31 32

#include "entry-header.S"
33
#include <asm/entry-macro-multi.S>
L
Linus Torvalds 已提交
34

35
/*
36
 * Interrupt handling.
37 38
 */
	.macro	irq_handler
39
#ifdef CONFIG_MULTI_IRQ_HANDLER
40
	ldr	r1, =handle_arch_irq
41 42
	mov	r0, sp
	adr	lr, BSYM(9997f)
43 44
	ldr	pc, [r1]
#else
45
	arch_irq_handler_default
46
#endif
47
9997:
48 49
	.endm

50
	.macro	pabt_helper
51
	@ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
52
#ifdef MULTI_PABORT
53
	ldr	ip, .LCprocfns
54
	mov	lr, pc
55
	ldr	pc, [ip, #PROCESSOR_PABT_FUNC]
56 57 58 59 60 61 62 63 64 65
#else
	bl	CPU_PABORT_HANDLER
#endif
	.endm

	.macro	dabt_helper

	@
	@ Call the processor-specific abort handler:
	@
66
	@  r2 - pt_regs
67 68
	@  r4 - aborted context pc
	@  r5 - aborted context psr
69 70 71 72 73
	@
	@ The abort handler must return the aborted address in r0, and
	@ the fault status register in r1.  r9 must be preserved.
	@
#ifdef MULTI_DABORT
74
	ldr	ip, .LCprocfns
75
	mov	lr, pc
76
	ldr	pc, [ip, #PROCESSOR_DABT_FUNC]
77 78 79 80 81
#else
	bl	CPU_DABORT_HANDLER
#endif
	.endm

82 83 84 85 86 87
#ifdef CONFIG_KPROBES
	.section	.kprobes.text,"ax",%progbits
#else
	.text
#endif

L
Linus Torvalds 已提交
88 89 90
/*
 * Invalid mode handlers
 */
R
Russell King 已提交
91 92
	.macro	inv_entry, reason
	sub	sp, sp, #S_FRAME_SIZE
93 94 95 96
 ARM(	stmib	sp, {r1 - lr}		)
 THUMB(	stmia	sp, {r0 - r12}		)
 THUMB(	str	sp, [sp, #S_SP]		)
 THUMB(	str	lr, [sp, #S_LR]		)
L
Linus Torvalds 已提交
97 98 99 100
	mov	r1, #\reason
	.endm

__pabt_invalid:
R
Russell King 已提交
101 102
	inv_entry BAD_PREFETCH
	b	common_invalid
103
ENDPROC(__pabt_invalid)
L
Linus Torvalds 已提交
104 105

__dabt_invalid:
R
Russell King 已提交
106 107
	inv_entry BAD_DATA
	b	common_invalid
108
ENDPROC(__dabt_invalid)
L
Linus Torvalds 已提交
109 110

__irq_invalid:
R
Russell King 已提交
111 112
	inv_entry BAD_IRQ
	b	common_invalid
113
ENDPROC(__irq_invalid)
L
Linus Torvalds 已提交
114 115

__und_invalid:
R
Russell King 已提交
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
	inv_entry BAD_UNDEFINSTR

	@
	@ XXX fall through to common_invalid
	@

@
@ common_invalid - generic code for failed exception (re-entrant version of handlers)
@
common_invalid:
	zero_fp

	ldmia	r0, {r4 - r6}
	add	r0, sp, #S_PC		@ here for interlock avoidance
	mov	r7, #-1			@  ""   ""    ""        ""
	str	r4, [sp]		@ save preserved r0
	stmia	r0, {r5 - r7}		@ lr_<exception>,
					@ cpsr_<exception>, "old_r0"
L
Linus Torvalds 已提交
134 135 136

	mov	r0, sp
	b	bad_mode
137
ENDPROC(__und_invalid)
L
Linus Torvalds 已提交
138 139 140 141

/*
 * SVC mode handlers
 */
142 143 144 145 146 147 148

#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
#define SPFIX(code...) code
#else
#define SPFIX(code...)
#endif

149
	.macro	svc_entry, stack_hole=0, trace=1
150 151
 UNWIND(.fnstart		)
 UNWIND(.save {r0 - pc}		)
152 153 154 155 156 157 158
	sub	sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
#ifdef CONFIG_THUMB2_KERNEL
 SPFIX(	str	r0, [sp]	)	@ temporarily saved
 SPFIX(	mov	r0, sp		)
 SPFIX(	tst	r0, #4		)	@ test original stack alignment
 SPFIX(	ldr	r0, [sp]	)	@ restored
#else
159
 SPFIX(	tst	sp, #4		)
160 161 162
#endif
 SPFIX(	subeq	sp, sp, #4	)
	stmia	sp, {r1 - r12}
R
Russell King 已提交
163

164 165 166 167 168 169
	ldmia	r0, {r3 - r5}
	add	r7, sp, #S_SP - 4	@ here for interlock avoidance
	mov	r6, #-1			@  ""  ""      ""       ""
	add	r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
 SPFIX(	addeq	r2, r2, #4	)
	str	r3, [sp, #-4]!		@ save the "real" r0 copied
R
Russell King 已提交
170 171
					@ from the exception stack

172
	mov	r3, lr
L
Linus Torvalds 已提交
173 174 175 176

	@
	@ We are now ready to fill in the remaining blanks on the stack:
	@
177 178 179 180 181
	@  r2 - sp_svc
	@  r3 - lr_svc
	@  r4 - lr_<exception>, already fixed up for correct return/restart
	@  r5 - spsr_<exception>
	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
L
Linus Torvalds 已提交
182
	@
183
	stmia	r7, {r2 - r6}
L
Linus Torvalds 已提交
184

185
	.if \trace
186 187 188
#ifdef CONFIG_TRACE_IRQFLAGS
	bl	trace_hardirqs_off
#endif
189
	.endif
190
	.endm
L
Linus Torvalds 已提交
191

192 193 194
	.align	5
__dabt_svc:
	svc_entry
L
Linus Torvalds 已提交
195
	mov	r2, sp
196
	dabt_helper
197
 THUMB(	ldr	r5, [sp, #S_PSR]	)	@ potentially updated CPSR
198
	svc_exit r5				@ return from exception
199
 UNWIND(.fnend		)
200
ENDPROC(__dabt_svc)
L
Linus Torvalds 已提交
201 202 203

	.align	5
__irq_svc:
R
Russell King 已提交
204
	svc_entry
205
	irq_handler
206

L
Linus Torvalds 已提交
207
#ifdef CONFIG_PREEMPT
208 209
	get_thread_info tsk
	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
210
	ldr	r0, [tsk, #TI_FLAGS]		@ get flags
R
Russell King 已提交
211 212
	teq	r8, #0				@ if preempt count != 0
	movne	r0, #0				@ force flags to 0
L
Linus Torvalds 已提交
213 214 215
	tst	r0, #_TIF_NEED_RESCHED
	blne	svc_preempt
#endif
216

217
	svc_exit r5, irq = 1			@ return from exception
218
 UNWIND(.fnend		)
219
ENDPROC(__irq_svc)
L
Linus Torvalds 已提交
220 221 222 223 224

	.ltorg

#ifdef CONFIG_PREEMPT
svc_preempt:
R
Russell King 已提交
225
	mov	r8, lr
L
Linus Torvalds 已提交
226
1:	bl	preempt_schedule_irq		@ irq en/disable is done inside
227
	ldr	r0, [tsk, #TI_FLAGS]		@ get new tasks TI_FLAGS
L
Linus Torvalds 已提交
228
	tst	r0, #_TIF_NEED_RESCHED
229
	reteq	r8				@ go again
L
Linus Torvalds 已提交
230 231 232
	b	1b
#endif

233 234 235 236 237 238 239 240 241 242 243 244 245
__und_fault:
	@ Correct the PC such that it is pointing at the instruction
	@ which caused the fault.  If the faulting instruction was ARM
	@ the PC will be pointing at the next instruction, and have to
	@ subtract 4.  Otherwise, it is Thumb, and the PC will be
	@ pointing at the second half of the Thumb instruction.  We
	@ have to subtract 2.
	ldr	r2, [r0, #S_PC]
	sub	r2, r2, r1
	str	r2, [r0, #S_PC]
	b	do_undefinstr
ENDPROC(__und_fault)

L
Linus Torvalds 已提交
246 247
	.align	5
__und_svc:
248 249 250 251 252 253
#ifdef CONFIG_KPROBES
	@ If a kprobe is about to simulate a "stmdb sp..." instruction,
	@ it obviously needs free stack space which then will belong to
	@ the saved context.
	svc_entry 64
#else
R
Russell King 已提交
254
	svc_entry
255
#endif
L
Linus Torvalds 已提交
256 257 258 259 260 261 262
	@
	@ call emulation code, which returns using r9 if it has emulated
	@ the instruction, or the more conventional lr if we are to treat
	@ this as a real undefined instruction
	@
	@  r0 - instruction
	@
263
#ifndef CONFIG_THUMB2_KERNEL
264
	ldr	r0, [r4, #-4]
265
#else
266
	mov	r1, #2
267
	ldrh	r0, [r4, #-2]			@ Thumb instruction at LR - 2
268
	cmp	r0, #0xe800			@ 32-bit instruction if xx >= 0
269 270 271 272 273
	blo	__und_svc_fault
	ldrh	r9, [r4]			@ bottom 16 bits
	add	r4, r4, #2
	str	r4, [sp, #S_PC]
	orr	r0, r9, r0, lsl #16
274
#endif
275
	adr	r9, BSYM(__und_svc_finish)
276
	mov	r2, r4
L
Linus Torvalds 已提交
277 278
	bl	call_fpe

279 280
	mov	r1, #4				@ PC correction to apply
__und_svc_fault:
L
Linus Torvalds 已提交
281
	mov	r0, sp				@ struct pt_regs *regs
282
	bl	__und_fault
L
Linus Torvalds 已提交
283

284
__und_svc_finish:
285 286
	ldr	r5, [sp, #S_PSR]		@ Get SVC cpsr
	svc_exit r5				@ return from exception
287
 UNWIND(.fnend		)
288
ENDPROC(__und_svc)
L
Linus Torvalds 已提交
289 290 291

	.align	5
__pabt_svc:
R
Russell King 已提交
292
	svc_entry
293
	mov	r2, sp				@ regs
294
	pabt_helper
295
	svc_exit r5				@ return from exception
296
 UNWIND(.fnend		)
297
ENDPROC(__pabt_svc)
L
Linus Torvalds 已提交
298

299 300 301 302 303 304 305 306 307
	.align	5
__fiq_svc:
	svc_entry trace=0
	mov	r0, sp				@ struct pt_regs *regs
	bl	handle_fiq_as_nmi
	svc_exit_via_fiq
 UNWIND(.fnend		)
ENDPROC(__fiq_svc)

L
Linus Torvalds 已提交
308
	.align	5
309 310
.LCcralign:
	.word	cr_alignment
P
Paul Brook 已提交
311
#ifdef MULTI_DABORT
L
Linus Torvalds 已提交
312 313 314 315 316 317
.LCprocfns:
	.word	processor
#endif
.LCfp:
	.word	fp_enter

318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
/*
 * Abort mode handlers
 */

@
@ Taking a FIQ in abort mode is similar to taking a FIQ in SVC mode
@ and reuses the same macros. However in abort mode we must also
@ save/restore lr_abt and spsr_abt to make nested aborts safe.
@
	.align 5
__fiq_abt:
	svc_entry trace=0

 ARM(	msr	cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
 THUMB( mov	r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
 THUMB( msr	cpsr_c, r0 )
	mov	r1, lr		@ Save lr_abt
	mrs	r2, spsr	@ Save spsr_abt, abort is now safe
 ARM(	msr	cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
 THUMB( mov	r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
 THUMB( msr	cpsr_c, r0 )
	stmfd	sp!, {r1 - r2}

	add	r0, sp, #8			@ struct pt_regs *regs
	bl	handle_fiq_as_nmi

	ldmfd	sp!, {r1 - r2}
 ARM(	msr	cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
 THUMB( mov	r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
 THUMB( msr	cpsr_c, r0 )
	mov	lr, r1		@ Restore lr_abt, abort is unsafe
	msr	spsr_cxsf, r2	@ Restore spsr_abt
 ARM(	msr	cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
 THUMB( mov	r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
 THUMB( msr	cpsr_c, r0 )

	svc_exit_via_fiq
 UNWIND(.fnend		)
ENDPROC(__fiq_abt)

L
Linus Torvalds 已提交
358 359
/*
 * User mode handlers
360 361
 *
 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
L
Linus Torvalds 已提交
362
 */
363 364 365 366 367

#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
#error "sizeof(struct pt_regs) must be a multiple of 8"
#endif

368
	.macro	usr_entry, trace=1
369 370
 UNWIND(.fnstart	)
 UNWIND(.cantunwind	)	@ don't unwind the user space
R
Russell King 已提交
371
	sub	sp, sp, #S_FRAME_SIZE
372 373
 ARM(	stmib	sp, {r1 - r12}	)
 THUMB(	stmia	sp, {r0 - r12}	)
R
Russell King 已提交
374

375
	ldmia	r0, {r3 - r5}
R
Russell King 已提交
376
	add	r0, sp, #S_PC		@ here for interlock avoidance
377
	mov	r6, #-1			@  ""  ""     ""        ""
R
Russell King 已提交
378

379
	str	r3, [sp]		@ save the "real" r0 copied
R
Russell King 已提交
380
					@ from the exception stack
L
Linus Torvalds 已提交
381 382 383 384

	@
	@ We are now ready to fill in the remaining blanks on the stack:
	@
385 386 387
	@  r4 - lr_<exception>, already fixed up for correct return/restart
	@  r5 - spsr_<exception>
	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
L
Linus Torvalds 已提交
388 389 390
	@
	@ Also, separately save sp_usr and lr_usr
	@
391
	stmia	r0, {r4 - r6}
392 393
 ARM(	stmdb	r0, {sp, lr}^			)
 THUMB(	store_user_sp_lr r0, r1, S_SP - S_PC	)
L
Linus Torvalds 已提交
394 395 396 397

	@
	@ Enable the alignment trap while in kernel mode
	@
398
	alignment_trap r0, .LCcralign
L
Linus Torvalds 已提交
399 400 401 402 403

	@
	@ Clear FP to mark the first stack frame
	@
	zero_fp
404

405
	.if	\trace
406 407 408
#ifdef CONFIG_IRQSOFF_TRACER
	bl	trace_hardirqs_off
#endif
409
	ct_user_exit save = 0
410
	.endif
L
Linus Torvalds 已提交
411 412
	.endm

413
	.macro	kuser_cmpxchg_check
R
Russell King 已提交
414 415
#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
    !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
416 417 418 419 420 421 422
#ifndef CONFIG_MMU
#warning "NPTL on non MMU needs fixing"
#else
	@ Make sure our user space atomic helper is restarted
	@ if it was interrupted in a critical region.  Here we
	@ perform a quick test inline since it should be false
	@ 99.9999% of the time.  The rest is done out of line.
423
	cmp	r4, #TASK_SIZE
424
	blhs	kuser_cmpxchg64_fixup
425 426 427 428
#endif
#endif
	.endm

L
Linus Torvalds 已提交
429 430
	.align	5
__dabt_usr:
R
Russell King 已提交
431
	usr_entry
432
	kuser_cmpxchg_check
L
Linus Torvalds 已提交
433
	mov	r2, sp
434 435
	dabt_helper
	b	ret_from_exception
436
 UNWIND(.fnend		)
437
ENDPROC(__dabt_usr)
L
Linus Torvalds 已提交
438 439 440

	.align	5
__irq_usr:
R
Russell King 已提交
441
	usr_entry
442
	kuser_cmpxchg_check
443
	irq_handler
444
	get_thread_info tsk
L
Linus Torvalds 已提交
445
	mov	why, #0
446
	b	ret_to_user_from_irq
447
 UNWIND(.fnend		)
448
ENDPROC(__irq_usr)
L
Linus Torvalds 已提交
449 450 451 452 453

	.ltorg

	.align	5
__und_usr:
R
Russell King 已提交
454
	usr_entry
455

456 457
	mov	r2, r4
	mov	r3, r5
L
Linus Torvalds 已提交
458

459 460 461
	@ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
	@      faulting instruction depending on Thumb mode.
	@ r3 = regs->ARM_cpsr
L
Linus Torvalds 已提交
462
	@
463 464 465
	@ The emulation code returns using r9 if it has emulated the
	@ instruction, or the more conventional lr if we are to treat
	@ this as a real undefined instruction
L
Linus Torvalds 已提交
466
	@
467
	adr	r9, BSYM(ret_from_exception)
468

469 470 471 472 473
	@ IRQs must be enabled before attempting to read the instruction from
	@ user space since that could cause a page/translation fault if the
	@ page table was modified by another CPU.
	enable_irq

474
	tst	r3, #PSR_T_BIT			@ Thumb mode?
475 476 477
	bne	__und_usr_thumb
	sub	r4, r2, #4			@ ARM instr at LR - 4
1:	ldrt	r0, [r4]
478 479
 ARM_BE8(rev	r0, r0)				@ little endian instruction

480 481 482 483 484 485 486 487
	@ r0 = 32-bit ARM instruction which caused the exception
	@ r2 = PC value for the following instruction (:= regs->ARM_pc)
	@ r4 = PC value for the faulting instruction
	@ lr = 32-bit undefined instruction function
	adr	lr, BSYM(__und_usr_fault_32)
	b	call_fpe

__und_usr_thumb:
488
	@ Thumb instruction
489
	sub	r4, r2, #2			@ First half of thumb instr at LR - 2
490 491 492 493 494 495 496 497 498 499 500 501 502
#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
/*
 * Thumb-2 instruction handling.  Note that because pre-v6 and >= v6 platforms
 * can never be supported in a single kernel, this code is not applicable at
 * all when __LINUX_ARM_ARCH__ < 6.  This allows simplifying assumptions to be
 * made about .arch directives.
 */
#if __LINUX_ARM_ARCH__ < 7
/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
#define NEED_CPU_ARCHITECTURE
	ldr	r5, .LCcpu_architecture
	ldr	r5, [r5]
	cmp	r5, #CPU_ARCH_ARMv7
503
	blo	__und_usr_fault_16		@ 16bit undefined instruction
504 505 506 507 508 509 510
/*
 * The following code won't get run unless the running CPU really is v7, so
 * coding round the lack of ldrht on older arches is pointless.  Temporarily
 * override the assembler target arch with the minimum required instead:
 */
	.arch	armv6t2
#endif
511
2:	ldrht	r5, [r4]
512
ARM_BE8(rev16	r5, r5)				@ little endian instruction
513
	cmp	r5, #0xe800			@ 32bit instruction if xx != 0
514 515
	blo	__und_usr_fault_16		@ 16bit undefined instruction
3:	ldrht	r0, [r2]
516
ARM_BE8(rev16	r0, r0)				@ little endian instruction
517
	add	r2, r2, #2			@ r2 is PC + 2, make it PC + 4
518
	str	r2, [sp, #S_PC]			@ it's a 2x16bit instr, update
519
	orr	r0, r0, r5, lsl #16
520 521 522 523 524
	adr	lr, BSYM(__und_usr_fault_32)
	@ r0 = the two 16-bit Thumb instructions which caused the exception
	@ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
	@ r4 = PC value for the first 16-bit Thumb instruction
	@ lr = 32bit undefined instruction function
525 526 527 528 529

#if __LINUX_ARM_ARCH__ < 7
/* If the target arch was overridden, change it back: */
#ifdef CONFIG_CPU_32v6K
	.arch	armv6k
530
#else
531 532 533 534
	.arch	armv6
#endif
#endif /* __LINUX_ARM_ARCH__ < 7 */
#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
535
	b	__und_usr_fault_16
536
#endif
537
 UNWIND(.fnend)
538
ENDPROC(__und_usr)
539

L
Linus Torvalds 已提交
540
/*
541
 * The out of line fixup for the ldrt instructions above.
L
Linus Torvalds 已提交
542
 */
543
	.pushsection .fixup, "ax"
544
	.align	2
545
4:	str     r4, [sp, #S_PC]			@ retry current instruction
546
	ret	r9
547 548
	.popsection
	.pushsection __ex_table,"a"
549
	.long	1b, 4b
550
#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
551 552 553
	.long	2b, 4b
	.long	3b, 4b
#endif
554
	.popsection
L
Linus Torvalds 已提交
555 556 557 558 559 560 561 562 563 564 565

/*
 * Check whether the instruction is a co-processor instruction.
 * If yes, we need to call the relevant co-processor handler.
 *
 * Note that we don't do a full check here for the co-processor
 * instructions; all instructions with bit 27 set are well
 * defined.  The only instructions that should fault are the
 * co-processor instructions.  However, we have to watch out
 * for the ARM6/ARM7 SWI bug.
 *
566 567 568 569 570 571 572
 * NEON is a special case that has to be handled here. Not all
 * NEON instructions are co-processor instructions, so we have
 * to make a special case of checking for them. Plus, there's
 * five groups of them, so we have a table of mask/opcode pairs
 * to check against, and if any match then we branch off into the
 * NEON handler code.
 *
L
Linus Torvalds 已提交
573
 * Emulators may wish to make use of the following registers:
574 575
 *  r0  = instruction opcode (32-bit ARM or two 16-bit Thumb)
 *  r2  = PC value to resume execution after successful emulation
576
 *  r9  = normal "successful" return address
577
 *  r10 = this threads thread_info structure
578
 *  lr  = unrecognised instruction return address
579
 * IRQs enabled, FIQs enabled.
L
Linus Torvalds 已提交
580
 */
581 582 583 584
	@
	@ Fall-through from Thumb-2 __und_usr
	@
#ifdef CONFIG_NEON
585
	get_thread_info r10			@ get current thread
586 587 588
	adr	r6, .LCneon_thumb_opcodes
	b	2f
#endif
L
Linus Torvalds 已提交
589
call_fpe:
590
	get_thread_info r10			@ get current thread
591
#ifdef CONFIG_NEON
592
	adr	r6, .LCneon_arm_opcodes
593
2:	ldr	r5, [r6], #4			@ mask value
594
	ldr	r7, [r6], #4			@ opcode bits matching in mask
595 596 597
	cmp	r5, #0				@ end mask?
	beq	1f
	and	r8, r0, r5
598 599 600 601 602 603 604 605
	cmp	r8, r7				@ NEON instruction?
	bne	2b
	mov	r7, #1
	strb	r7, [r10, #TI_USED_CP + 10]	@ mark CP#10 as used
	strb	r7, [r10, #TI_USED_CP + 11]	@ mark CP#11 as used
	b	do_vfp				@ let VFP handler handle this
1:
#endif
L
Linus Torvalds 已提交
606
	tst	r0, #0x08000000			@ only CDP/CPRT/LDC/STC have bit 27
607
	tstne	r0, #0x04000000			@ bit 26 set on both ARM and Thumb-2
608
	reteq	lr
L
Linus Torvalds 已提交
609
	and	r8, r0, #0x00000f00		@ mask out CP number
610
 THUMB(	lsr	r8, r8, #8		)
L
Linus Torvalds 已提交
611 612
	mov	r7, #1
	add	r6, r10, #TI_USED_CP
613 614
 ARM(	strb	r7, [r6, r8, lsr #8]	)	@ set appropriate used_cp[]
 THUMB(	strb	r7, [r6, r8]		)	@ set appropriate used_cp[]
L
Linus Torvalds 已提交
615 616 617 618 619 620 621
#ifdef CONFIG_IWMMXT
	@ Test if we need to give access to iWMMXt coprocessors
	ldr	r5, [r10, #TI_FLAGS]
	rsbs	r7, r8, #(1 << 8)		@ CP 0 or 1 only
	movcss	r7, r5, lsr #(TIF_USING_IWMMXT + 1)
	bcs	iwmmxt_task_enable
#endif
622 623 624 625 626
 ARM(	add	pc, pc, r8, lsr #6	)
 THUMB(	lsl	r8, r8, #2		)
 THUMB(	add	pc, r8			)
	nop

627
	ret.w	lr				@ CP#0
628 629
	W(b)	do_fpe				@ CP#1 (FPE)
	W(b)	do_fpe				@ CP#2 (FPE)
630
	ret.w	lr				@ CP#3
631 632 633 634 635
#ifdef CONFIG_CRUNCH
	b	crunch_task_enable		@ CP#4 (MaverickCrunch)
	b	crunch_task_enable		@ CP#5 (MaverickCrunch)
	b	crunch_task_enable		@ CP#6 (MaverickCrunch)
#else
636 637 638
	ret.w	lr				@ CP#4
	ret.w	lr				@ CP#5
	ret.w	lr				@ CP#6
639
#endif
640 641 642
	ret.w	lr				@ CP#7
	ret.w	lr				@ CP#8
	ret.w	lr				@ CP#9
L
Linus Torvalds 已提交
643
#ifdef CONFIG_VFP
644 645
	W(b)	do_vfp				@ CP#10 (VFP)
	W(b)	do_vfp				@ CP#11 (VFP)
L
Linus Torvalds 已提交
646
#else
647 648
	ret.w	lr				@ CP#10 (VFP)
	ret.w	lr				@ CP#11 (VFP)
L
Linus Torvalds 已提交
649
#endif
650 651 652 653
	ret.w	lr				@ CP#12
	ret.w	lr				@ CP#13
	ret.w	lr				@ CP#14 (Debug)
	ret.w	lr				@ CP#15 (Control)
L
Linus Torvalds 已提交
654

655 656 657 658 659 660
#ifdef NEED_CPU_ARCHITECTURE
	.align	2
.LCcpu_architecture:
	.word	__cpu_architecture
#endif

661 662 663
#ifdef CONFIG_NEON
	.align	6

664
.LCneon_arm_opcodes:
665 666 667 668 669 670
	.word	0xfe000000			@ mask
	.word	0xf2000000			@ opcode

	.word	0xff100000			@ mask
	.word	0xf4000000			@ opcode

671 672 673 674 675 676 677 678 679 680
	.word	0x00000000			@ mask
	.word	0x00000000			@ opcode

.LCneon_thumb_opcodes:
	.word	0xef000000			@ mask
	.word	0xef000000			@ opcode

	.word	0xff100000			@ mask
	.word	0xf9000000			@ opcode

681 682 683 684
	.word	0x00000000			@ mask
	.word	0x00000000			@ opcode
#endif

L
Linus Torvalds 已提交
685 686 687 688 689 690 691 692 693 694 695 696 697 698
do_fpe:
	ldr	r4, .LCfp
	add	r10, r10, #TI_FPSTATE		@ r10 = workspace
	ldr	pc, [r4]			@ Call FP module USR entry point

/*
 * The FP module is called with these registers set:
 *  r0  = instruction
 *  r2  = PC+4
 *  r9  = normal "successful" return address
 *  r10 = FP workspace
 *  lr  = unrecognised FP instruction return address
 */

699
	.pushsection .data
L
Linus Torvalds 已提交
700
ENTRY(fp_enter)
701
	.word	no_fp
702
	.popsection
L
Linus Torvalds 已提交
703

704
ENTRY(no_fp)
705
	ret	lr
706
ENDPROC(no_fp)
707

708 709 710 711 712
__und_usr_fault_32:
	mov	r1, #4
	b	1f
__und_usr_fault_16:
	mov	r1, #2
713
1:	mov	r0, sp
714
	adr	lr, BSYM(ret_from_exception)
715 716 717
	b	__und_fault
ENDPROC(__und_usr_fault_32)
ENDPROC(__und_usr_fault_16)
L
Linus Torvalds 已提交
718 719 720

	.align	5
__pabt_usr:
R
Russell King 已提交
721
	usr_entry
722
	mov	r2, sp				@ regs
723
	pabt_helper
724
 UNWIND(.fnend		)
L
Linus Torvalds 已提交
725 726 727 728 729
	/* fall through */
/*
 * This is the return code to user mode for abort handlers
 */
ENTRY(ret_from_exception)
730 731
 UNWIND(.fnstart	)
 UNWIND(.cantunwind	)
L
Linus Torvalds 已提交
732 733 734
	get_thread_info tsk
	mov	why, #0
	b	ret_to_user
735
 UNWIND(.fnend		)
736 737
ENDPROC(__pabt_usr)
ENDPROC(ret_from_exception)
L
Linus Torvalds 已提交
738

739 740 741 742 743 744 745 746 747 748 749
	.align	5
__fiq_usr:
	usr_entry trace=0
	kuser_cmpxchg_check
	mov	r0, sp				@ struct pt_regs *regs
	bl	handle_fiq_as_nmi
	get_thread_info tsk
	restore_user_regs fast = 0, offset = 0
 UNWIND(.fnend		)
ENDPROC(__fiq_usr)

L
Linus Torvalds 已提交
750 751 752 753 754 755
/*
 * Register switch for ARMv3 and ARMv4 processors
 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
 * previous and next are guaranteed not to be the same.
 */
ENTRY(__switch_to)
756 757
 UNWIND(.fnstart	)
 UNWIND(.cantunwind	)
L
Linus Torvalds 已提交
758
	add	ip, r1, #TI_CPU_SAVE
759 760 761 762
 ARM(	stmia	ip!, {r4 - sl, fp, sp, lr} )	@ Store most regs on stack
 THUMB(	stmia	ip!, {r4 - sl, fp}	   )	@ Store most regs on stack
 THUMB(	str	sp, [ip], #4		   )
 THUMB(	str	lr, [ip], #4		   )
763 764
	ldr	r4, [r2, #TI_TP_VALUE]
	ldr	r5, [r2, #TI_TP_VALUE + 4]
765
#ifdef CONFIG_CPU_USE_DOMAINS
766
	ldr	r6, [r2, #TI_CPU_DOMAIN]
767
#endif
768
	switch_tls r1, r4, r5, r3, r7
769 770 771 772 773
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
	ldr	r7, [r2, #TI_TASK]
	ldr	r8, =__stack_chk_guard
	ldr	r7, [r7, #TSK_STACK_CANARY]
#endif
774
#ifdef CONFIG_CPU_USE_DOMAINS
L
Linus Torvalds 已提交
775 776
	mcr	p15, 0, r6, c3, c0, 0		@ Set domain register
#endif
777 778 779 780 781
	mov	r5, r0
	add	r4, r2, #TI_CPU_SAVE
	ldr	r0, =thread_notify_head
	mov	r1, #THREAD_NOTIFY_SWITCH
	bl	atomic_notifier_call_chain
782 783 784
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
	str	r7, [r8]
#endif
785
 THUMB(	mov	ip, r4			   )
786
	mov	r0, r5
787 788 789 790
 ARM(	ldmia	r4, {r4 - sl, fp, sp, pc}  )	@ Load all regs saved previously
 THUMB(	ldmia	ip!, {r4 - sl, fp}	   )	@ Load all regs saved previously
 THUMB(	ldr	sp, [ip], #4		   )
 THUMB(	ldr	pc, [ip]		   )
791
 UNWIND(.fnend		)
792
ENDPROC(__switch_to)
L
Linus Torvalds 已提交
793 794

	__INIT
795 796 797 798 799 800 801 802 803

/*
 * User helpers.
 *
 * Each segment is 32-byte aligned and will be moved to the top of the high
 * vector page.  New segments (if ever needed) must be added in front of
 * existing ones.  This mechanism should be used only for things that are
 * really small and justified, and not be abused freely.
 *
804
 * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
805
 */
806
 THUMB(	.arm	)
807

808 809 810 811
	.macro	usr_ret, reg
#ifdef CONFIG_ARM_THUMB
	bx	\reg
#else
812
	ret	\reg
813 814 815
#endif
	.endm

816 817 818 819 820 821 822 823 824 825 826
	.macro	kuser_pad, sym, size
	.if	(. - \sym) & 3
	.rept	4 - (. - \sym) & 3
	.byte	0
	.endr
	.endif
	.rept	(\size - (. - \sym)) / 4
	.word	0xe7fddef1
	.endr
	.endm

827
#ifdef CONFIG_KUSER_HELPERS
828 829 830 831
	.align	5
	.globl	__kuser_helper_start
__kuser_helper_start:

832
/*
833 834
 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
835 836
 */

837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863
__kuser_cmpxchg64:				@ 0xffff0f60

#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)

	/*
	 * Poor you.  No fast solution possible...
	 * The kernel itself must perform the operation.
	 * A special ghost syscall is used for that (see traps.c).
	 */
	stmfd	sp!, {r7, lr}
	ldr	r7, 1f			@ it's 20 bits
	swi	__ARM_NR_cmpxchg64
	ldmfd	sp!, {r7, pc}
1:	.word	__ARM_NR_cmpxchg64

#elif defined(CONFIG_CPU_32v6K)

	stmfd	sp!, {r4, r5, r6, r7}
	ldrd	r4, r5, [r0]			@ load old val
	ldrd	r6, r7, [r1]			@ load new val
	smp_dmb	arm
1:	ldrexd	r0, r1, [r2]			@ load current val
	eors	r3, r0, r4			@ compare with oldval (1)
	eoreqs	r3, r1, r5			@ compare with oldval (2)
	strexdeq r3, r6, r7, [r2]		@ store newval if eq
	teqeq	r3, #1				@ success?
	beq	1b				@ if no then retry
864
	smp_dmb	arm
865 866
	rsbs	r0, r3, #0			@ set returned val and C flag
	ldmfd	sp!, {r4, r5, r6, r7}
867
	usr_ret	lr
868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891

#elif !defined(CONFIG_SMP)

#ifdef CONFIG_MMU

	/*
	 * The only thing that can break atomicity in this cmpxchg64
	 * implementation is either an IRQ or a data abort exception
	 * causing another process/thread to be scheduled in the middle of
	 * the critical sequence.  The same strategy as for cmpxchg is used.
	 */
	stmfd	sp!, {r4, r5, r6, lr}
	ldmia	r0, {r4, r5}			@ load old val
	ldmia	r1, {r6, lr}			@ load new val
1:	ldmia	r2, {r0, r1}			@ load current val
	eors	r3, r0, r4			@ compare with oldval (1)
	eoreqs	r3, r1, r5			@ compare with oldval (2)
2:	stmeqia	r2, {r6, lr}			@ store newval if eq
	rsbs	r0, r3, #0			@ set return val and C flag
	ldmfd	sp!, {r4, r5, r6, pc}

	.text
kuser_cmpxchg64_fixup:
	@ Called from kuser_cmpxchg_fixup.
892
	@ r4 = address of interrupted insn (must be preserved).
893 894
	@ sp = saved regs. r7 and r8 are clobbered.
	@ 1b = first critical insn, 2b = last critical insn.
895
	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
896 897
	mov	r7, #0xffff0fff
	sub	r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
898
	subs	r8, r4, r7
899 900 901 902 903
	rsbcss	r8, r8, #(2b - 1b)
	strcs	r7, [sp, #S_PC]
#if __LINUX_ARM_ARCH__ < 6
	bcc	kuser_cmpxchg32_fixup
#endif
904
	ret	lr
905 906 907 908 909 910
	.previous

#else
#warning "NPTL on non MMU needs fixing"
	mov	r0, #-1
	adds	r0, r0, #0
911
	usr_ret	lr
912 913 914 915 916 917
#endif

#else
#error "incoherent kernel configuration"
#endif

918
	kuser_pad __kuser_cmpxchg64, 64
919 920

__kuser_memory_barrier:				@ 0xffff0fa0
921
	smp_dmb	arm
922
	usr_ret	lr
923

924
	kuser_pad __kuser_memory_barrier, 32
925 926 927

__kuser_cmpxchg:				@ 0xffff0fc0

928
#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
929

930 931 932 933 934
	/*
	 * Poor you.  No fast solution possible...
	 * The kernel itself must perform the operation.
	 * A special ghost syscall is used for that (see traps.c).
	 */
935
	stmfd	sp!, {r7, lr}
936
	ldr	r7, 1f			@ it's 20 bits
937
	swi	__ARM_NR_cmpxchg
938
	ldmfd	sp!, {r7, pc}
939
1:	.word	__ARM_NR_cmpxchg
940 941

#elif __LINUX_ARM_ARCH__ < 6
942

943 944
#ifdef CONFIG_MMU

945
	/*
946 947 948 949 950 951 952
	 * The only thing that can break atomicity in this cmpxchg
	 * implementation is either an IRQ or a data abort exception
	 * causing another process/thread to be scheduled in the middle
	 * of the critical sequence.  To prevent this, code is added to
	 * the IRQ and data abort exception handlers to set the pc back
	 * to the beginning of the critical section if it is found to be
	 * within that critical section (see kuser_cmpxchg_fixup).
953
	 */
954 955 956 957 958 959 960
1:	ldr	r3, [r2]			@ load current val
	subs	r3, r3, r0			@ compare with oldval
2:	streq	r1, [r2]			@ store newval if eq
	rsbs	r0, r3, #0			@ set return val and C flag
	usr_ret	lr

	.text
961
kuser_cmpxchg32_fixup:
962
	@ Called from kuser_cmpxchg_check macro.
963
	@ r4 = address of interrupted insn (must be preserved).
964 965
	@ sp = saved regs. r7 and r8 are clobbered.
	@ 1b = first critical insn, 2b = last critical insn.
966
	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
967 968
	mov	r7, #0xffff0fff
	sub	r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
969
	subs	r8, r4, r7
970 971
	rsbcss	r8, r8, #(2b - 1b)
	strcs	r7, [sp, #S_PC]
972
	ret	lr
973 974
	.previous

975 976 977 978
#else
#warning "NPTL on non MMU needs fixing"
	mov	r0, #-1
	adds	r0, r0, #0
979
	usr_ret	lr
980
#endif
981 982 983

#else

984
	smp_dmb	arm
985
1:	ldrex	r3, [r2]
986 987
	subs	r3, r3, r0
	strexeq	r3, r1, [r2]
988 989
	teqeq	r3, #1
	beq	1b
990
	rsbs	r0, r3, #0
991
	/* beware -- each __kuser slot must be 8 instructions max */
992 993
	ALT_SMP(b	__kuser_memory_barrier)
	ALT_UP(usr_ret	lr)
994 995 996

#endif

997
	kuser_pad __kuser_cmpxchg, 32
998 999

__kuser_get_tls:				@ 0xffff0fe0
1000
	ldr	r0, [pc, #(16 - 8)]	@ read TLS, set in kuser_get_tls_init
1001
	usr_ret	lr
1002
	mrc	p15, 0, r0, c13, c0, 3	@ 0xffff0fe8 hardware TLS code
1003 1004
	kuser_pad __kuser_get_tls, 16
	.rep	3
1005 1006
	.word	0			@ 0xffff0ff0 software TLS value, then
	.endr				@ pad up to __kuser_helper_version
1007 1008 1009 1010 1011 1012 1013

__kuser_helper_version:				@ 0xffff0ffc
	.word	((__kuser_helper_end - __kuser_helper_start) >> 5)

	.globl	__kuser_helper_end
__kuser_helper_end:

1014 1015
#endif

1016
 THUMB(	.thumb	)
1017

L
Linus Torvalds 已提交
1018 1019 1020
/*
 * Vector stubs.
 *
R
Russell King 已提交
1021 1022 1023
 * This code is copied to 0xffff1000 so we can use branches in the
 * vectors, rather than ldr's.  Note that this code must not exceed
 * a page size.
L
Linus Torvalds 已提交
1024 1025 1026
 *
 * Common stub entry macro:
 *   Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
R
Russell King 已提交
1027 1028 1029
 *
 * SP points to a minimal amount of processor-private memory, the address
 * of which is copied into r0 for the mode specific abort handler.
L
Linus Torvalds 已提交
1030
 */
1031
	.macro	vector_stub, name, mode, correction=0
L
Linus Torvalds 已提交
1032 1033 1034 1035 1036 1037
	.align	5

vector_\name:
	.if \correction
	sub	lr, lr, #\correction
	.endif
R
Russell King 已提交
1038 1039 1040 1041 1042 1043

	@
	@ Save r0, lr_<exception> (parent PC) and spsr_<exception>
	@ (parent CPSR)
	@
	stmia	sp, {r0, lr}		@ save r0, lr
L
Linus Torvalds 已提交
1044
	mrs	lr, spsr
R
Russell King 已提交
1045 1046
	str	lr, [sp, #8]		@ save spsr

L
Linus Torvalds 已提交
1047
	@
R
Russell King 已提交
1048
	@ Prepare for SVC32 mode.  IRQs remain disabled.
L
Linus Torvalds 已提交
1049
	@
R
Russell King 已提交
1050
	mrs	r0, cpsr
1051
	eor	r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
R
Russell King 已提交
1052
	msr	spsr_cxsf, r0
L
Linus Torvalds 已提交
1053

R
Russell King 已提交
1054 1055 1056 1057
	@
	@ the branch table must immediately follow this code
	@
	and	lr, lr, #0x0f
1058 1059
 THUMB(	adr	r0, 1f			)
 THUMB(	ldr	lr, [r0, lr, lsl #2]	)
1060
	mov	r0, sp
1061
 ARM(	ldr	lr, [pc, lr, lsl #2]	)
R
Russell King 已提交
1062
	movs	pc, lr			@ branch to handler in SVC mode
1063
ENDPROC(vector_\name)
1064 1065 1066 1067

	.align	2
	@ handler addresses follow this label
1:
L
Linus Torvalds 已提交
1068 1069
	.endm

1070
	.section .stubs, "ax", %progbits
L
Linus Torvalds 已提交
1071
__stubs_start:
R
Russell King 已提交
1072 1073 1074 1075 1076 1077 1078 1079 1080
	@ This must be the first word
	.word	vector_swi

vector_rst:
 ARM(	swi	SYS_ERROR0	)
 THUMB(	svc	#0		)
 THUMB(	nop			)
	b	vector_und

L
Linus Torvalds 已提交
1081 1082 1083
/*
 * Interrupt dispatcher
 */
1084
	vector_stub	irq, IRQ_MODE, 4
L
Linus Torvalds 已提交
1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106

	.long	__irq_usr			@  0  (USR_26 / USR_32)
	.long	__irq_invalid			@  1  (FIQ_26 / FIQ_32)
	.long	__irq_invalid			@  2  (IRQ_26 / IRQ_32)
	.long	__irq_svc			@  3  (SVC_26 / SVC_32)
	.long	__irq_invalid			@  4
	.long	__irq_invalid			@  5
	.long	__irq_invalid			@  6
	.long	__irq_invalid			@  7
	.long	__irq_invalid			@  8
	.long	__irq_invalid			@  9
	.long	__irq_invalid			@  a
	.long	__irq_invalid			@  b
	.long	__irq_invalid			@  c
	.long	__irq_invalid			@  d
	.long	__irq_invalid			@  e
	.long	__irq_invalid			@  f

/*
 * Data abort dispatcher
 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
 */
1107
	vector_stub	dabt, ABT_MODE, 8
L
Linus Torvalds 已提交
1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129

	.long	__dabt_usr			@  0  (USR_26 / USR_32)
	.long	__dabt_invalid			@  1  (FIQ_26 / FIQ_32)
	.long	__dabt_invalid			@  2  (IRQ_26 / IRQ_32)
	.long	__dabt_svc			@  3  (SVC_26 / SVC_32)
	.long	__dabt_invalid			@  4
	.long	__dabt_invalid			@  5
	.long	__dabt_invalid			@  6
	.long	__dabt_invalid			@  7
	.long	__dabt_invalid			@  8
	.long	__dabt_invalid			@  9
	.long	__dabt_invalid			@  a
	.long	__dabt_invalid			@  b
	.long	__dabt_invalid			@  c
	.long	__dabt_invalid			@  d
	.long	__dabt_invalid			@  e
	.long	__dabt_invalid			@  f

/*
 * Prefetch abort dispatcher
 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
 */
1130
	vector_stub	pabt, ABT_MODE, 4
L
Linus Torvalds 已提交
1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152

	.long	__pabt_usr			@  0 (USR_26 / USR_32)
	.long	__pabt_invalid			@  1 (FIQ_26 / FIQ_32)
	.long	__pabt_invalid			@  2 (IRQ_26 / IRQ_32)
	.long	__pabt_svc			@  3 (SVC_26 / SVC_32)
	.long	__pabt_invalid			@  4
	.long	__pabt_invalid			@  5
	.long	__pabt_invalid			@  6
	.long	__pabt_invalid			@  7
	.long	__pabt_invalid			@  8
	.long	__pabt_invalid			@  9
	.long	__pabt_invalid			@  a
	.long	__pabt_invalid			@  b
	.long	__pabt_invalid			@  c
	.long	__pabt_invalid			@  d
	.long	__pabt_invalid			@  e
	.long	__pabt_invalid			@  f

/*
 * Undef instr entry dispatcher
 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
 */
1153
	vector_stub	und, UND_MODE
L
Linus Torvalds 已提交
1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173

	.long	__und_usr			@  0 (USR_26 / USR_32)
	.long	__und_invalid			@  1 (FIQ_26 / FIQ_32)
	.long	__und_invalid			@  2 (IRQ_26 / IRQ_32)
	.long	__und_svc			@  3 (SVC_26 / SVC_32)
	.long	__und_invalid			@  4
	.long	__und_invalid			@  5
	.long	__und_invalid			@  6
	.long	__und_invalid			@  7
	.long	__und_invalid			@  8
	.long	__und_invalid			@  9
	.long	__und_invalid			@  a
	.long	__und_invalid			@  b
	.long	__und_invalid			@  c
	.long	__und_invalid			@  d
	.long	__und_invalid			@  e
	.long	__und_invalid			@  f

	.align	5

R
Russell King 已提交
1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
/*=============================================================================
 * Address exception handler
 *-----------------------------------------------------------------------------
 * These aren't too critical.
 * (they're not supposed to happen, and won't happen in 32-bit data mode).
 */

vector_addrexcptn:
	b	vector_addrexcptn

L
Linus Torvalds 已提交
1184
/*=============================================================================
1185
 * FIQ "NMI" handler
L
Linus Torvalds 已提交
1186
 *-----------------------------------------------------------------------------
1187 1188
 * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
 * systems.
L
Linus Torvalds 已提交
1189
 */
1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207
	vector_stub	fiq, FIQ_MODE, 4

	.long	__fiq_usr			@  0  (USR_26 / USR_32)
	.long	__fiq_svc			@  1  (FIQ_26 / FIQ_32)
	.long	__fiq_svc			@  2  (IRQ_26 / IRQ_32)
	.long	__fiq_svc			@  3  (SVC_26 / SVC_32)
	.long	__fiq_svc			@  4
	.long	__fiq_svc			@  5
	.long	__fiq_svc			@  6
	.long	__fiq_abt			@  7
	.long	__fiq_svc			@  8
	.long	__fiq_svc			@  9
	.long	__fiq_svc			@  a
	.long	__fiq_svc			@  b
	.long	__fiq_svc			@  c
	.long	__fiq_svc			@  d
	.long	__fiq_svc			@  e
	.long	__fiq_svc			@  f
L
Linus Torvalds 已提交
1208

1209 1210 1211
	.globl	vector_fiq_offset
	.equ	vector_fiq_offset, vector_fiq

1212
	.section .vectors, "ax", %progbits
1213
__vectors_start:
1214 1215 1216 1217 1218 1219 1220 1221
	W(b)	vector_rst
	W(b)	vector_und
	W(ldr)	pc, __vectors_start + 0x1000
	W(b)	vector_pabt
	W(b)	vector_dabt
	W(b)	vector_addrexcptn
	W(b)	vector_irq
	W(b)	vector_fiq
L
Linus Torvalds 已提交
1222 1223 1224 1225 1226 1227

	.data

	.globl	cr_alignment
cr_alignment:
	.space	4
1228 1229 1230 1231 1232 1233

#ifdef CONFIG_MULTI_IRQ_HANDLER
	.globl	handle_arch_irq
handle_arch_irq:
	.space	4
#endif