entry-armv.S 27.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 *  linux/arch/arm/kernel/entry-armv.S
 *
 *  Copyright (C) 1996,1997,1998 Russell King.
 *  ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
6
 *  nommu support by Hyok S. Choi (hyok.choi@samsung.com)
L
Linus Torvalds 已提交
7 8 9 10 11 12 13
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 *  Low-level vector interface routines
 *
14 15
 *  Note:  there is a StrongARM bug in the STMIA rn, {regs}^ instruction
 *  that causes it to save wrong values...  Be aware!
L
Linus Torvalds 已提交
16 17
 */

18
#include <asm/assembler.h>
19
#include <asm/memory.h>
20 21
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
L
Linus Torvalds 已提交
22
#include <asm/vfpmacros.h>
23
#ifndef CONFIG_MULTI_IRQ_HANDLER
24
#include <mach/entry-macro.S>
25
#endif
26
#include <asm/thread_notify.h>
27
#include <asm/unwind.h>
28
#include <asm/unistd.h>
29
#include <asm/tls.h>
30
#include <asm/system_info.h>
L
Linus Torvalds 已提交
31 32

#include "entry-header.S"
33
#include <asm/entry-macro-multi.S>
L
Linus Torvalds 已提交
34

35
/*
36
 * Interrupt handling.
37 38
 */
	.macro	irq_handler
39
#ifdef CONFIG_MULTI_IRQ_HANDLER
40
	ldr	r1, =handle_arch_irq
41 42
	mov	r0, sp
	adr	lr, BSYM(9997f)
43 44
	ldr	pc, [r1]
#else
45
	arch_irq_handler_default
46
#endif
47
9997:
48 49
	.endm

50
	.macro	pabt_helper
51
	@ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
52
#ifdef MULTI_PABORT
53
	ldr	ip, .LCprocfns
54
	mov	lr, pc
55
	ldr	pc, [ip, #PROCESSOR_PABT_FUNC]
56 57 58 59 60 61 62 63 64 65
#else
	bl	CPU_PABORT_HANDLER
#endif
	.endm

	.macro	dabt_helper

	@
	@ Call the processor-specific abort handler:
	@
66
	@  r2 - pt_regs
67 68
	@  r4 - aborted context pc
	@  r5 - aborted context psr
69 70 71 72 73
	@
	@ The abort handler must return the aborted address in r0, and
	@ the fault status register in r1.  r9 must be preserved.
	@
#ifdef MULTI_DABORT
74
	ldr	ip, .LCprocfns
75
	mov	lr, pc
76
	ldr	pc, [ip, #PROCESSOR_DABT_FUNC]
77 78 79 80 81
#else
	bl	CPU_DABORT_HANDLER
#endif
	.endm

82 83 84 85 86 87
#ifdef CONFIG_KPROBES
	.section	.kprobes.text,"ax",%progbits
#else
	.text
#endif

L
Linus Torvalds 已提交
88 89 90
/*
 * Invalid mode handlers
 */
R
Russell King 已提交
91 92
	.macro	inv_entry, reason
	sub	sp, sp, #S_FRAME_SIZE
93 94 95 96
 ARM(	stmib	sp, {r1 - lr}		)
 THUMB(	stmia	sp, {r0 - r12}		)
 THUMB(	str	sp, [sp, #S_SP]		)
 THUMB(	str	lr, [sp, #S_LR]		)
L
Linus Torvalds 已提交
97 98 99 100
	mov	r1, #\reason
	.endm

__pabt_invalid:
R
Russell King 已提交
101 102
	inv_entry BAD_PREFETCH
	b	common_invalid
103
ENDPROC(__pabt_invalid)
L
Linus Torvalds 已提交
104 105

__dabt_invalid:
R
Russell King 已提交
106 107
	inv_entry BAD_DATA
	b	common_invalid
108
ENDPROC(__dabt_invalid)
L
Linus Torvalds 已提交
109 110

__irq_invalid:
R
Russell King 已提交
111 112
	inv_entry BAD_IRQ
	b	common_invalid
113
ENDPROC(__irq_invalid)
L
Linus Torvalds 已提交
114 115

__und_invalid:
R
Russell King 已提交
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
	inv_entry BAD_UNDEFINSTR

	@
	@ XXX fall through to common_invalid
	@

@
@ common_invalid - generic code for failed exception (re-entrant version of handlers)
@
common_invalid:
	zero_fp

	ldmia	r0, {r4 - r6}
	add	r0, sp, #S_PC		@ here for interlock avoidance
	mov	r7, #-1			@  ""   ""    ""        ""
	str	r4, [sp]		@ save preserved r0
	stmia	r0, {r5 - r7}		@ lr_<exception>,
					@ cpsr_<exception>, "old_r0"
L
Linus Torvalds 已提交
134 135 136

	mov	r0, sp
	b	bad_mode
137
ENDPROC(__und_invalid)
L
Linus Torvalds 已提交
138 139 140 141

/*
 * SVC mode handlers
 */
142 143 144 145 146 147 148

#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
#define SPFIX(code...) code
#else
#define SPFIX(code...)
#endif

149
	.macro	svc_entry, stack_hole=0
150 151
 UNWIND(.fnstart		)
 UNWIND(.save {r0 - pc}		)
152 153 154 155 156 157 158
	sub	sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
#ifdef CONFIG_THUMB2_KERNEL
 SPFIX(	str	r0, [sp]	)	@ temporarily saved
 SPFIX(	mov	r0, sp		)
 SPFIX(	tst	r0, #4		)	@ test original stack alignment
 SPFIX(	ldr	r0, [sp]	)	@ restored
#else
159
 SPFIX(	tst	sp, #4		)
160 161 162
#endif
 SPFIX(	subeq	sp, sp, #4	)
	stmia	sp, {r1 - r12}
R
Russell King 已提交
163

164 165 166 167 168 169
	ldmia	r0, {r3 - r5}
	add	r7, sp, #S_SP - 4	@ here for interlock avoidance
	mov	r6, #-1			@  ""  ""      ""       ""
	add	r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
 SPFIX(	addeq	r2, r2, #4	)
	str	r3, [sp, #-4]!		@ save the "real" r0 copied
R
Russell King 已提交
170 171
					@ from the exception stack

172
	mov	r3, lr
L
Linus Torvalds 已提交
173 174 175 176

	@
	@ We are now ready to fill in the remaining blanks on the stack:
	@
177 178 179 180 181
	@  r2 - sp_svc
	@  r3 - lr_svc
	@  r4 - lr_<exception>, already fixed up for correct return/restart
	@  r5 - spsr_<exception>
	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
L
Linus Torvalds 已提交
182
	@
183
	stmia	r7, {r2 - r6}
L
Linus Torvalds 已提交
184

185 186 187
#ifdef CONFIG_TRACE_IRQFLAGS
	bl	trace_hardirqs_off
#endif
188
	.endm
L
Linus Torvalds 已提交
189

190 191 192
	.align	5
__dabt_svc:
	svc_entry
L
Linus Torvalds 已提交
193
	mov	r2, sp
194
	dabt_helper
195
 THUMB(	ldr	r5, [sp, #S_PSR]	)	@ potentially updated CPSR
196
	svc_exit r5				@ return from exception
197
 UNWIND(.fnend		)
198
ENDPROC(__dabt_svc)
L
Linus Torvalds 已提交
199 200 201

	.align	5
__irq_svc:
R
Russell King 已提交
202
	svc_entry
203
	irq_handler
204

L
Linus Torvalds 已提交
205
#ifdef CONFIG_PREEMPT
206 207
	get_thread_info tsk
	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
208
	ldr	r0, [tsk, #TI_FLAGS]		@ get flags
R
Russell King 已提交
209 210
	teq	r8, #0				@ if preempt count != 0
	movne	r0, #0				@ force flags to 0
L
Linus Torvalds 已提交
211 212 213
	tst	r0, #_TIF_NEED_RESCHED
	blne	svc_preempt
#endif
214

215
	svc_exit r5, irq = 1			@ return from exception
216
 UNWIND(.fnend		)
217
ENDPROC(__irq_svc)
L
Linus Torvalds 已提交
218 219 220 221 222

	.ltorg

#ifdef CONFIG_PREEMPT
svc_preempt:
R
Russell King 已提交
223
	mov	r8, lr
L
Linus Torvalds 已提交
224
1:	bl	preempt_schedule_irq		@ irq en/disable is done inside
225
	ldr	r0, [tsk, #TI_FLAGS]		@ get new tasks TI_FLAGS
L
Linus Torvalds 已提交
226
	tst	r0, #_TIF_NEED_RESCHED
R
Russell King 已提交
227
	moveq	pc, r8				@ go again
L
Linus Torvalds 已提交
228 229 230
	b	1b
#endif

231 232 233 234 235 236 237 238 239 240 241 242 243
__und_fault:
	@ Correct the PC such that it is pointing at the instruction
	@ which caused the fault.  If the faulting instruction was ARM
	@ the PC will be pointing at the next instruction, and have to
	@ subtract 4.  Otherwise, it is Thumb, and the PC will be
	@ pointing at the second half of the Thumb instruction.  We
	@ have to subtract 2.
	ldr	r2, [r0, #S_PC]
	sub	r2, r2, r1
	str	r2, [r0, #S_PC]
	b	do_undefinstr
ENDPROC(__und_fault)

L
Linus Torvalds 已提交
244 245
	.align	5
__und_svc:
246 247 248 249 250 251
#ifdef CONFIG_KPROBES
	@ If a kprobe is about to simulate a "stmdb sp..." instruction,
	@ it obviously needs free stack space which then will belong to
	@ the saved context.
	svc_entry 64
#else
R
Russell King 已提交
252
	svc_entry
253
#endif
L
Linus Torvalds 已提交
254 255 256 257 258 259 260
	@
	@ call emulation code, which returns using r9 if it has emulated
	@ the instruction, or the more conventional lr if we are to treat
	@ this as a real undefined instruction
	@
	@  r0 - instruction
	@
261
#ifndef CONFIG_THUMB2_KERNEL
262
	ldr	r0, [r4, #-4]
263
#else
264
	mov	r1, #2
265
	ldrh	r0, [r4, #-2]			@ Thumb instruction at LR - 2
266
	cmp	r0, #0xe800			@ 32-bit instruction if xx >= 0
267 268 269 270 271
	blo	__und_svc_fault
	ldrh	r9, [r4]			@ bottom 16 bits
	add	r4, r4, #2
	str	r4, [sp, #S_PC]
	orr	r0, r9, r0, lsl #16
272
#endif
273
	adr	r9, BSYM(__und_svc_finish)
274
	mov	r2, r4
L
Linus Torvalds 已提交
275 276
	bl	call_fpe

277 278
	mov	r1, #4				@ PC correction to apply
__und_svc_fault:
L
Linus Torvalds 已提交
279
	mov	r0, sp				@ struct pt_regs *regs
280
	bl	__und_fault
L
Linus Torvalds 已提交
281

282
__und_svc_finish:
283 284
	ldr	r5, [sp, #S_PSR]		@ Get SVC cpsr
	svc_exit r5				@ return from exception
285
 UNWIND(.fnend		)
286
ENDPROC(__und_svc)
L
Linus Torvalds 已提交
287 288 289

	.align	5
__pabt_svc:
R
Russell King 已提交
290
	svc_entry
291
	mov	r2, sp				@ regs
292
	pabt_helper
293
	svc_exit r5				@ return from exception
294
 UNWIND(.fnend		)
295
ENDPROC(__pabt_svc)
L
Linus Torvalds 已提交
296 297

	.align	5
298 299
.LCcralign:
	.word	cr_alignment
P
Paul Brook 已提交
300
#ifdef MULTI_DABORT
L
Linus Torvalds 已提交
301 302 303 304 305 306 307 308
.LCprocfns:
	.word	processor
#endif
.LCfp:
	.word	fp_enter

/*
 * User mode handlers
309 310
 *
 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
L
Linus Torvalds 已提交
311
 */
312 313 314 315 316

#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
#error "sizeof(struct pt_regs) must be a multiple of 8"
#endif

R
Russell King 已提交
317
	.macro	usr_entry
318 319
 UNWIND(.fnstart	)
 UNWIND(.cantunwind	)	@ don't unwind the user space
R
Russell King 已提交
320
	sub	sp, sp, #S_FRAME_SIZE
321 322
 ARM(	stmib	sp, {r1 - r12}	)
 THUMB(	stmia	sp, {r0 - r12}	)
R
Russell King 已提交
323

324
	ldmia	r0, {r3 - r5}
R
Russell King 已提交
325
	add	r0, sp, #S_PC		@ here for interlock avoidance
326
	mov	r6, #-1			@  ""  ""     ""        ""
R
Russell King 已提交
327

328
	str	r3, [sp]		@ save the "real" r0 copied
R
Russell King 已提交
329
					@ from the exception stack
L
Linus Torvalds 已提交
330 331 332 333

	@
	@ We are now ready to fill in the remaining blanks on the stack:
	@
334 335 336
	@  r4 - lr_<exception>, already fixed up for correct return/restart
	@  r5 - spsr_<exception>
	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
L
Linus Torvalds 已提交
337 338 339
	@
	@ Also, separately save sp_usr and lr_usr
	@
340
	stmia	r0, {r4 - r6}
341 342
 ARM(	stmdb	r0, {sp, lr}^			)
 THUMB(	store_user_sp_lr r0, r1, S_SP - S_PC	)
L
Linus Torvalds 已提交
343 344 345 346

	@
	@ Enable the alignment trap while in kernel mode
	@
347
	alignment_trap r0
L
Linus Torvalds 已提交
348 349 350 351 352

	@
	@ Clear FP to mark the first stack frame
	@
	zero_fp
353 354 355 356

#ifdef CONFIG_IRQSOFF_TRACER
	bl	trace_hardirqs_off
#endif
357
	ct_user_exit save = 0
L
Linus Torvalds 已提交
358 359
	.endm

360
	.macro	kuser_cmpxchg_check
R
Russell King 已提交
361 362
#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
    !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
363 364 365 366 367 368 369
#ifndef CONFIG_MMU
#warning "NPTL on non MMU needs fixing"
#else
	@ Make sure our user space atomic helper is restarted
	@ if it was interrupted in a critical region.  Here we
	@ perform a quick test inline since it should be false
	@ 99.9999% of the time.  The rest is done out of line.
370
	cmp	r4, #TASK_SIZE
371
	blhs	kuser_cmpxchg64_fixup
372 373 374 375
#endif
#endif
	.endm

L
Linus Torvalds 已提交
376 377
	.align	5
__dabt_usr:
R
Russell King 已提交
378
	usr_entry
379
	kuser_cmpxchg_check
L
Linus Torvalds 已提交
380
	mov	r2, sp
381 382
	dabt_helper
	b	ret_from_exception
383
 UNWIND(.fnend		)
384
ENDPROC(__dabt_usr)
L
Linus Torvalds 已提交
385 386 387

	.align	5
__irq_usr:
R
Russell King 已提交
388
	usr_entry
389
	kuser_cmpxchg_check
390
	irq_handler
391
	get_thread_info tsk
L
Linus Torvalds 已提交
392
	mov	why, #0
393
	b	ret_to_user_from_irq
394
 UNWIND(.fnend		)
395
ENDPROC(__irq_usr)
L
Linus Torvalds 已提交
396 397 398 399 400

	.ltorg

	.align	5
__und_usr:
R
Russell King 已提交
401
	usr_entry
402

403 404
	mov	r2, r4
	mov	r3, r5
L
Linus Torvalds 已提交
405

406 407 408
	@ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
	@      faulting instruction depending on Thumb mode.
	@ r3 = regs->ARM_cpsr
L
Linus Torvalds 已提交
409
	@
410 411 412
	@ The emulation code returns using r9 if it has emulated the
	@ instruction, or the more conventional lr if we are to treat
	@ this as a real undefined instruction
L
Linus Torvalds 已提交
413
	@
414
	adr	r9, BSYM(ret_from_exception)
415

416
	tst	r3, #PSR_T_BIT			@ Thumb mode?
417 418 419
	bne	__und_usr_thumb
	sub	r4, r2, #4			@ ARM instr at LR - 4
1:	ldrt	r0, [r4]
420 421
 ARM_BE8(rev	r0, r0)				@ little endian instruction

422 423 424 425 426 427 428 429
	@ r0 = 32-bit ARM instruction which caused the exception
	@ r2 = PC value for the following instruction (:= regs->ARM_pc)
	@ r4 = PC value for the faulting instruction
	@ lr = 32-bit undefined instruction function
	adr	lr, BSYM(__und_usr_fault_32)
	b	call_fpe

__und_usr_thumb:
430
	@ Thumb instruction
431
	sub	r4, r2, #2			@ First half of thumb instr at LR - 2
432 433 434 435 436 437 438 439 440 441 442 443 444
#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
/*
 * Thumb-2 instruction handling.  Note that because pre-v6 and >= v6 platforms
 * can never be supported in a single kernel, this code is not applicable at
 * all when __LINUX_ARM_ARCH__ < 6.  This allows simplifying assumptions to be
 * made about .arch directives.
 */
#if __LINUX_ARM_ARCH__ < 7
/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
#define NEED_CPU_ARCHITECTURE
	ldr	r5, .LCcpu_architecture
	ldr	r5, [r5]
	cmp	r5, #CPU_ARCH_ARMv7
445
	blo	__und_usr_fault_16		@ 16bit undefined instruction
446 447 448 449 450 451 452
/*
 * The following code won't get run unless the running CPU really is v7, so
 * coding round the lack of ldrht on older arches is pointless.  Temporarily
 * override the assembler target arch with the minimum required instead:
 */
	.arch	armv6t2
#endif
453
2:	ldrht	r5, [r4]
454
	cmp	r5, #0xe800			@ 32bit instruction if xx != 0
455 456
	blo	__und_usr_fault_16		@ 16bit undefined instruction
3:	ldrht	r0, [r2]
457
	add	r2, r2, #2			@ r2 is PC + 2, make it PC + 4
458
	str	r2, [sp, #S_PC]			@ it's a 2x16bit instr, update
459
	orr	r0, r0, r5, lsl #16
460 461 462 463 464
	adr	lr, BSYM(__und_usr_fault_32)
	@ r0 = the two 16-bit Thumb instructions which caused the exception
	@ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
	@ r4 = PC value for the first 16-bit Thumb instruction
	@ lr = 32bit undefined instruction function
465 466 467 468 469

#if __LINUX_ARM_ARCH__ < 7
/* If the target arch was overridden, change it back: */
#ifdef CONFIG_CPU_32v6K
	.arch	armv6k
470
#else
471 472 473 474
	.arch	armv6
#endif
#endif /* __LINUX_ARM_ARCH__ < 7 */
#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
475
	b	__und_usr_fault_16
476
#endif
477
 UNWIND(.fnend)
478
ENDPROC(__und_usr)
479

L
Linus Torvalds 已提交
480
/*
481
 * The out of line fixup for the ldrt instructions above.
L
Linus Torvalds 已提交
482
 */
483
	.pushsection .fixup, "ax"
484
	.align	2
485
4:	mov	pc, r9
486 487
	.popsection
	.pushsection __ex_table,"a"
488
	.long	1b, 4b
489
#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
490 491 492
	.long	2b, 4b
	.long	3b, 4b
#endif
493
	.popsection
L
Linus Torvalds 已提交
494 495 496 497 498 499 500 501 502 503 504

/*
 * Check whether the instruction is a co-processor instruction.
 * If yes, we need to call the relevant co-processor handler.
 *
 * Note that we don't do a full check here for the co-processor
 * instructions; all instructions with bit 27 set are well
 * defined.  The only instructions that should fault are the
 * co-processor instructions.  However, we have to watch out
 * for the ARM6/ARM7 SWI bug.
 *
505 506 507 508 509 510 511
 * NEON is a special case that has to be handled here. Not all
 * NEON instructions are co-processor instructions, so we have
 * to make a special case of checking for them. Plus, there's
 * five groups of them, so we have a table of mask/opcode pairs
 * to check against, and if any match then we branch off into the
 * NEON handler code.
 *
L
Linus Torvalds 已提交
512
 * Emulators may wish to make use of the following registers:
513 514
 *  r0  = instruction opcode (32-bit ARM or two 16-bit Thumb)
 *  r2  = PC value to resume execution after successful emulation
515
 *  r9  = normal "successful" return address
516
 *  r10 = this threads thread_info structure
517
 *  lr  = unrecognised instruction return address
518
 * IRQs disabled, FIQs enabled.
L
Linus Torvalds 已提交
519
 */
520 521 522 523
	@
	@ Fall-through from Thumb-2 __und_usr
	@
#ifdef CONFIG_NEON
524
	get_thread_info r10			@ get current thread
525 526 527
	adr	r6, .LCneon_thumb_opcodes
	b	2f
#endif
L
Linus Torvalds 已提交
528
call_fpe:
529
	get_thread_info r10			@ get current thread
530
#ifdef CONFIG_NEON
531
	adr	r6, .LCneon_arm_opcodes
532
2:	ldr	r5, [r6], #4			@ mask value
533
	ldr	r7, [r6], #4			@ opcode bits matching in mask
534 535 536
	cmp	r5, #0				@ end mask?
	beq	1f
	and	r8, r0, r5
537 538 539 540 541 542 543 544
	cmp	r8, r7				@ NEON instruction?
	bne	2b
	mov	r7, #1
	strb	r7, [r10, #TI_USED_CP + 10]	@ mark CP#10 as used
	strb	r7, [r10, #TI_USED_CP + 11]	@ mark CP#11 as used
	b	do_vfp				@ let VFP handler handle this
1:
#endif
L
Linus Torvalds 已提交
545
	tst	r0, #0x08000000			@ only CDP/CPRT/LDC/STC have bit 27
546
	tstne	r0, #0x04000000			@ bit 26 set on both ARM and Thumb-2
L
Linus Torvalds 已提交
547 548
	moveq	pc, lr
	and	r8, r0, #0x00000f00		@ mask out CP number
549
 THUMB(	lsr	r8, r8, #8		)
L
Linus Torvalds 已提交
550 551
	mov	r7, #1
	add	r6, r10, #TI_USED_CP
552 553
 ARM(	strb	r7, [r6, r8, lsr #8]	)	@ set appropriate used_cp[]
 THUMB(	strb	r7, [r6, r8]		)	@ set appropriate used_cp[]
L
Linus Torvalds 已提交
554 555 556 557 558 559 560
#ifdef CONFIG_IWMMXT
	@ Test if we need to give access to iWMMXt coprocessors
	ldr	r5, [r10, #TI_FLAGS]
	rsbs	r7, r8, #(1 << 8)		@ CP 0 or 1 only
	movcss	r7, r5, lsr #(TIF_USING_IWMMXT + 1)
	bcs	iwmmxt_task_enable
#endif
561 562 563 564 565
 ARM(	add	pc, pc, r8, lsr #6	)
 THUMB(	lsl	r8, r8, #2		)
 THUMB(	add	pc, r8			)
	nop

566
	movw_pc	lr				@ CP#0
567 568
	W(b)	do_fpe				@ CP#1 (FPE)
	W(b)	do_fpe				@ CP#2 (FPE)
569
	movw_pc	lr				@ CP#3
570 571 572 573 574
#ifdef CONFIG_CRUNCH
	b	crunch_task_enable		@ CP#4 (MaverickCrunch)
	b	crunch_task_enable		@ CP#5 (MaverickCrunch)
	b	crunch_task_enable		@ CP#6 (MaverickCrunch)
#else
575 576 577
	movw_pc	lr				@ CP#4
	movw_pc	lr				@ CP#5
	movw_pc	lr				@ CP#6
578
#endif
579 580 581
	movw_pc	lr				@ CP#7
	movw_pc	lr				@ CP#8
	movw_pc	lr				@ CP#9
L
Linus Torvalds 已提交
582
#ifdef CONFIG_VFP
583 584
	W(b)	do_vfp				@ CP#10 (VFP)
	W(b)	do_vfp				@ CP#11 (VFP)
L
Linus Torvalds 已提交
585
#else
586 587
	movw_pc	lr				@ CP#10 (VFP)
	movw_pc	lr				@ CP#11 (VFP)
L
Linus Torvalds 已提交
588
#endif
589 590 591 592
	movw_pc	lr				@ CP#12
	movw_pc	lr				@ CP#13
	movw_pc	lr				@ CP#14 (Debug)
	movw_pc	lr				@ CP#15 (Control)
L
Linus Torvalds 已提交
593

594 595 596 597 598 599
#ifdef NEED_CPU_ARCHITECTURE
	.align	2
.LCcpu_architecture:
	.word	__cpu_architecture
#endif

600 601 602
#ifdef CONFIG_NEON
	.align	6

603
.LCneon_arm_opcodes:
604 605 606 607 608 609
	.word	0xfe000000			@ mask
	.word	0xf2000000			@ opcode

	.word	0xff100000			@ mask
	.word	0xf4000000			@ opcode

610 611 612 613 614 615 616 617 618 619
	.word	0x00000000			@ mask
	.word	0x00000000			@ opcode

.LCneon_thumb_opcodes:
	.word	0xef000000			@ mask
	.word	0xef000000			@ opcode

	.word	0xff100000			@ mask
	.word	0xf9000000			@ opcode

620 621 622 623
	.word	0x00000000			@ mask
	.word	0x00000000			@ opcode
#endif

L
Linus Torvalds 已提交
624
do_fpe:
625
	enable_irq
L
Linus Torvalds 已提交
626 627 628 629 630 631 632 633 634 635 636 637 638
	ldr	r4, .LCfp
	add	r10, r10, #TI_FPSTATE		@ r10 = workspace
	ldr	pc, [r4]			@ Call FP module USR entry point

/*
 * The FP module is called with these registers set:
 *  r0  = instruction
 *  r2  = PC+4
 *  r9  = normal "successful" return address
 *  r10 = FP workspace
 *  lr  = unrecognised FP instruction return address
 */

639
	.pushsection .data
L
Linus Torvalds 已提交
640
ENTRY(fp_enter)
641
	.word	no_fp
642
	.popsection
L
Linus Torvalds 已提交
643

644 645 646
ENTRY(no_fp)
	mov	pc, lr
ENDPROC(no_fp)
647

648 649 650 651 652 653
__und_usr_fault_32:
	mov	r1, #4
	b	1f
__und_usr_fault_16:
	mov	r1, #2
1:	enable_irq
L
Linus Torvalds 已提交
654
	mov	r0, sp
655
	adr	lr, BSYM(ret_from_exception)
656 657 658
	b	__und_fault
ENDPROC(__und_usr_fault_32)
ENDPROC(__und_usr_fault_16)
L
Linus Torvalds 已提交
659 660 661

	.align	5
__pabt_usr:
R
Russell King 已提交
662
	usr_entry
663
	mov	r2, sp				@ regs
664
	pabt_helper
665
 UNWIND(.fnend		)
L
Linus Torvalds 已提交
666 667 668 669 670
	/* fall through */
/*
 * This is the return code to user mode for abort handlers
 */
ENTRY(ret_from_exception)
671 672
 UNWIND(.fnstart	)
 UNWIND(.cantunwind	)
L
Linus Torvalds 已提交
673 674 675
	get_thread_info tsk
	mov	why, #0
	b	ret_to_user
676
 UNWIND(.fnend		)
677 678
ENDPROC(__pabt_usr)
ENDPROC(ret_from_exception)
L
Linus Torvalds 已提交
679 680 681 682 683 684 685

/*
 * Register switch for ARMv3 and ARMv4 processors
 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
 * previous and next are guaranteed not to be the same.
 */
ENTRY(__switch_to)
686 687
 UNWIND(.fnstart	)
 UNWIND(.cantunwind	)
L
Linus Torvalds 已提交
688
	add	ip, r1, #TI_CPU_SAVE
689 690 691 692
 ARM(	stmia	ip!, {r4 - sl, fp, sp, lr} )	@ Store most regs on stack
 THUMB(	stmia	ip!, {r4 - sl, fp}	   )	@ Store most regs on stack
 THUMB(	str	sp, [ip], #4		   )
 THUMB(	str	lr, [ip], #4		   )
693 694
	ldr	r4, [r2, #TI_TP_VALUE]
	ldr	r5, [r2, #TI_TP_VALUE + 4]
695
#ifdef CONFIG_CPU_USE_DOMAINS
696
	ldr	r6, [r2, #TI_CPU_DOMAIN]
697
#endif
698
	switch_tls r1, r4, r5, r3, r7
699 700 701 702 703
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
	ldr	r7, [r2, #TI_TASK]
	ldr	r8, =__stack_chk_guard
	ldr	r7, [r7, #TSK_STACK_CANARY]
#endif
704
#ifdef CONFIG_CPU_USE_DOMAINS
L
Linus Torvalds 已提交
705 706
	mcr	p15, 0, r6, c3, c0, 0		@ Set domain register
#endif
707 708 709 710 711
	mov	r5, r0
	add	r4, r2, #TI_CPU_SAVE
	ldr	r0, =thread_notify_head
	mov	r1, #THREAD_NOTIFY_SWITCH
	bl	atomic_notifier_call_chain
712 713 714
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
	str	r7, [r8]
#endif
715
 THUMB(	mov	ip, r4			   )
716
	mov	r0, r5
717 718 719 720
 ARM(	ldmia	r4, {r4 - sl, fp, sp, pc}  )	@ Load all regs saved previously
 THUMB(	ldmia	ip!, {r4 - sl, fp}	   )	@ Load all regs saved previously
 THUMB(	ldr	sp, [ip], #4		   )
 THUMB(	ldr	pc, [ip]		   )
721
 UNWIND(.fnend		)
722
ENDPROC(__switch_to)
L
Linus Torvalds 已提交
723 724

	__INIT
725 726 727 728 729 730 731 732 733

/*
 * User helpers.
 *
 * Each segment is 32-byte aligned and will be moved to the top of the high
 * vector page.  New segments (if ever needed) must be added in front of
 * existing ones.  This mechanism should be used only for things that are
 * really small and justified, and not be abused freely.
 *
734
 * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
735
 */
736
 THUMB(	.arm	)
737

738 739 740 741 742 743 744 745
	.macro	usr_ret, reg
#ifdef CONFIG_ARM_THUMB
	bx	\reg
#else
	mov	pc, \reg
#endif
	.endm

746 747 748 749 750 751 752 753 754 755 756
	.macro	kuser_pad, sym, size
	.if	(. - \sym) & 3
	.rept	4 - (. - \sym) & 3
	.byte	0
	.endr
	.endif
	.rept	(\size - (. - \sym)) / 4
	.word	0xe7fddef1
	.endr
	.endm

757
#ifdef CONFIG_KUSER_HELPERS
758 759 760 761
	.align	5
	.globl	__kuser_helper_start
__kuser_helper_start:

762
/*
763 764
 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
765 766
 */

767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793
__kuser_cmpxchg64:				@ 0xffff0f60

#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)

	/*
	 * Poor you.  No fast solution possible...
	 * The kernel itself must perform the operation.
	 * A special ghost syscall is used for that (see traps.c).
	 */
	stmfd	sp!, {r7, lr}
	ldr	r7, 1f			@ it's 20 bits
	swi	__ARM_NR_cmpxchg64
	ldmfd	sp!, {r7, pc}
1:	.word	__ARM_NR_cmpxchg64

#elif defined(CONFIG_CPU_32v6K)

	stmfd	sp!, {r4, r5, r6, r7}
	ldrd	r4, r5, [r0]			@ load old val
	ldrd	r6, r7, [r1]			@ load new val
	smp_dmb	arm
1:	ldrexd	r0, r1, [r2]			@ load current val
	eors	r3, r0, r4			@ compare with oldval (1)
	eoreqs	r3, r1, r5			@ compare with oldval (2)
	strexdeq r3, r6, r7, [r2]		@ store newval if eq
	teqeq	r3, #1				@ success?
	beq	1b				@ if no then retry
794
	smp_dmb	arm
795 796
	rsbs	r0, r3, #0			@ set returned val and C flag
	ldmfd	sp!, {r4, r5, r6, r7}
797
	usr_ret	lr
798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821

#elif !defined(CONFIG_SMP)

#ifdef CONFIG_MMU

	/*
	 * The only thing that can break atomicity in this cmpxchg64
	 * implementation is either an IRQ or a data abort exception
	 * causing another process/thread to be scheduled in the middle of
	 * the critical sequence.  The same strategy as for cmpxchg is used.
	 */
	stmfd	sp!, {r4, r5, r6, lr}
	ldmia	r0, {r4, r5}			@ load old val
	ldmia	r1, {r6, lr}			@ load new val
1:	ldmia	r2, {r0, r1}			@ load current val
	eors	r3, r0, r4			@ compare with oldval (1)
	eoreqs	r3, r1, r5			@ compare with oldval (2)
2:	stmeqia	r2, {r6, lr}			@ store newval if eq
	rsbs	r0, r3, #0			@ set return val and C flag
	ldmfd	sp!, {r4, r5, r6, pc}

	.text
kuser_cmpxchg64_fixup:
	@ Called from kuser_cmpxchg_fixup.
822
	@ r4 = address of interrupted insn (must be preserved).
823 824
	@ sp = saved regs. r7 and r8 are clobbered.
	@ 1b = first critical insn, 2b = last critical insn.
825
	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
826 827
	mov	r7, #0xffff0fff
	sub	r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
828
	subs	r8, r4, r7
829 830 831 832 833 834 835 836 837 838 839 840
	rsbcss	r8, r8, #(2b - 1b)
	strcs	r7, [sp, #S_PC]
#if __LINUX_ARM_ARCH__ < 6
	bcc	kuser_cmpxchg32_fixup
#endif
	mov	pc, lr
	.previous

#else
#warning "NPTL on non MMU needs fixing"
	mov	r0, #-1
	adds	r0, r0, #0
841
	usr_ret	lr
842 843 844 845 846 847
#endif

#else
#error "incoherent kernel configuration"
#endif

848
	kuser_pad __kuser_cmpxchg64, 64
849 850

__kuser_memory_barrier:				@ 0xffff0fa0
851
	smp_dmb	arm
852
	usr_ret	lr
853

854
	kuser_pad __kuser_memory_barrier, 32
855 856 857

__kuser_cmpxchg:				@ 0xffff0fc0

858
#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
859

860 861 862 863 864
	/*
	 * Poor you.  No fast solution possible...
	 * The kernel itself must perform the operation.
	 * A special ghost syscall is used for that (see traps.c).
	 */
865
	stmfd	sp!, {r7, lr}
866
	ldr	r7, 1f			@ it's 20 bits
867
	swi	__ARM_NR_cmpxchg
868
	ldmfd	sp!, {r7, pc}
869
1:	.word	__ARM_NR_cmpxchg
870 871

#elif __LINUX_ARM_ARCH__ < 6
872

873 874
#ifdef CONFIG_MMU

875
	/*
876 877 878 879 880 881 882
	 * The only thing that can break atomicity in this cmpxchg
	 * implementation is either an IRQ or a data abort exception
	 * causing another process/thread to be scheduled in the middle
	 * of the critical sequence.  To prevent this, code is added to
	 * the IRQ and data abort exception handlers to set the pc back
	 * to the beginning of the critical section if it is found to be
	 * within that critical section (see kuser_cmpxchg_fixup).
883
	 */
884 885 886 887 888 889 890
1:	ldr	r3, [r2]			@ load current val
	subs	r3, r3, r0			@ compare with oldval
2:	streq	r1, [r2]			@ store newval if eq
	rsbs	r0, r3, #0			@ set return val and C flag
	usr_ret	lr

	.text
891
kuser_cmpxchg32_fixup:
892
	@ Called from kuser_cmpxchg_check macro.
893
	@ r4 = address of interrupted insn (must be preserved).
894 895
	@ sp = saved regs. r7 and r8 are clobbered.
	@ 1b = first critical insn, 2b = last critical insn.
896
	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
897 898
	mov	r7, #0xffff0fff
	sub	r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
899
	subs	r8, r4, r7
900 901 902 903 904
	rsbcss	r8, r8, #(2b - 1b)
	strcs	r7, [sp, #S_PC]
	mov	pc, lr
	.previous

905 906 907 908
#else
#warning "NPTL on non MMU needs fixing"
	mov	r0, #-1
	adds	r0, r0, #0
909
	usr_ret	lr
910
#endif
911 912 913

#else

914
	smp_dmb	arm
915
1:	ldrex	r3, [r2]
916 917
	subs	r3, r3, r0
	strexeq	r3, r1, [r2]
918 919
	teqeq	r3, #1
	beq	1b
920
	rsbs	r0, r3, #0
921
	/* beware -- each __kuser slot must be 8 instructions max */
922 923
	ALT_SMP(b	__kuser_memory_barrier)
	ALT_UP(usr_ret	lr)
924 925 926

#endif

927
	kuser_pad __kuser_cmpxchg, 32
928 929

__kuser_get_tls:				@ 0xffff0fe0
930
	ldr	r0, [pc, #(16 - 8)]	@ read TLS, set in kuser_get_tls_init
931
	usr_ret	lr
932
	mrc	p15, 0, r0, c13, c0, 3	@ 0xffff0fe8 hardware TLS code
933 934
	kuser_pad __kuser_get_tls, 16
	.rep	3
935 936
	.word	0			@ 0xffff0ff0 software TLS value, then
	.endr				@ pad up to __kuser_helper_version
937 938 939 940 941 942 943

__kuser_helper_version:				@ 0xffff0ffc
	.word	((__kuser_helper_end - __kuser_helper_start) >> 5)

	.globl	__kuser_helper_end
__kuser_helper_end:

944 945
#endif

946
 THUMB(	.thumb	)
947

L
Linus Torvalds 已提交
948 949 950
/*
 * Vector stubs.
 *
R
Russell King 已提交
951 952 953
 * This code is copied to 0xffff1000 so we can use branches in the
 * vectors, rather than ldr's.  Note that this code must not exceed
 * a page size.
L
Linus Torvalds 已提交
954 955 956
 *
 * Common stub entry macro:
 *   Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
R
Russell King 已提交
957 958 959
 *
 * SP points to a minimal amount of processor-private memory, the address
 * of which is copied into r0 for the mode specific abort handler.
L
Linus Torvalds 已提交
960
 */
961
	.macro	vector_stub, name, mode, correction=0
L
Linus Torvalds 已提交
962 963 964 965 966 967
	.align	5

vector_\name:
	.if \correction
	sub	lr, lr, #\correction
	.endif
R
Russell King 已提交
968 969 970 971 972 973

	@
	@ Save r0, lr_<exception> (parent PC) and spsr_<exception>
	@ (parent CPSR)
	@
	stmia	sp, {r0, lr}		@ save r0, lr
L
Linus Torvalds 已提交
974
	mrs	lr, spsr
R
Russell King 已提交
975 976
	str	lr, [sp, #8]		@ save spsr

L
Linus Torvalds 已提交
977
	@
R
Russell King 已提交
978
	@ Prepare for SVC32 mode.  IRQs remain disabled.
L
Linus Torvalds 已提交
979
	@
R
Russell King 已提交
980
	mrs	r0, cpsr
981
	eor	r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
R
Russell King 已提交
982
	msr	spsr_cxsf, r0
L
Linus Torvalds 已提交
983

R
Russell King 已提交
984 985 986 987
	@
	@ the branch table must immediately follow this code
	@
	and	lr, lr, #0x0f
988 989
 THUMB(	adr	r0, 1f			)
 THUMB(	ldr	lr, [r0, lr, lsl #2]	)
990
	mov	r0, sp
991
 ARM(	ldr	lr, [pc, lr, lsl #2]	)
R
Russell King 已提交
992
	movs	pc, lr			@ branch to handler in SVC mode
993
ENDPROC(vector_\name)
994 995 996 997

	.align	2
	@ handler addresses follow this label
1:
L
Linus Torvalds 已提交
998 999
	.endm

1000
	.section .stubs, "ax", %progbits
L
Linus Torvalds 已提交
1001
__stubs_start:
R
Russell King 已提交
1002 1003 1004 1005 1006 1007 1008 1009 1010
	@ This must be the first word
	.word	vector_swi

vector_rst:
 ARM(	swi	SYS_ERROR0	)
 THUMB(	svc	#0		)
 THUMB(	nop			)
	b	vector_und

L
Linus Torvalds 已提交
1011 1012 1013
/*
 * Interrupt dispatcher
 */
1014
	vector_stub	irq, IRQ_MODE, 4
L
Linus Torvalds 已提交
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036

	.long	__irq_usr			@  0  (USR_26 / USR_32)
	.long	__irq_invalid			@  1  (FIQ_26 / FIQ_32)
	.long	__irq_invalid			@  2  (IRQ_26 / IRQ_32)
	.long	__irq_svc			@  3  (SVC_26 / SVC_32)
	.long	__irq_invalid			@  4
	.long	__irq_invalid			@  5
	.long	__irq_invalid			@  6
	.long	__irq_invalid			@  7
	.long	__irq_invalid			@  8
	.long	__irq_invalid			@  9
	.long	__irq_invalid			@  a
	.long	__irq_invalid			@  b
	.long	__irq_invalid			@  c
	.long	__irq_invalid			@  d
	.long	__irq_invalid			@  e
	.long	__irq_invalid			@  f

/*
 * Data abort dispatcher
 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
 */
1037
	vector_stub	dabt, ABT_MODE, 8
L
Linus Torvalds 已提交
1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059

	.long	__dabt_usr			@  0  (USR_26 / USR_32)
	.long	__dabt_invalid			@  1  (FIQ_26 / FIQ_32)
	.long	__dabt_invalid			@  2  (IRQ_26 / IRQ_32)
	.long	__dabt_svc			@  3  (SVC_26 / SVC_32)
	.long	__dabt_invalid			@  4
	.long	__dabt_invalid			@  5
	.long	__dabt_invalid			@  6
	.long	__dabt_invalid			@  7
	.long	__dabt_invalid			@  8
	.long	__dabt_invalid			@  9
	.long	__dabt_invalid			@  a
	.long	__dabt_invalid			@  b
	.long	__dabt_invalid			@  c
	.long	__dabt_invalid			@  d
	.long	__dabt_invalid			@  e
	.long	__dabt_invalid			@  f

/*
 * Prefetch abort dispatcher
 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
 */
1060
	vector_stub	pabt, ABT_MODE, 4
L
Linus Torvalds 已提交
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082

	.long	__pabt_usr			@  0 (USR_26 / USR_32)
	.long	__pabt_invalid			@  1 (FIQ_26 / FIQ_32)
	.long	__pabt_invalid			@  2 (IRQ_26 / IRQ_32)
	.long	__pabt_svc			@  3 (SVC_26 / SVC_32)
	.long	__pabt_invalid			@  4
	.long	__pabt_invalid			@  5
	.long	__pabt_invalid			@  6
	.long	__pabt_invalid			@  7
	.long	__pabt_invalid			@  8
	.long	__pabt_invalid			@  9
	.long	__pabt_invalid			@  a
	.long	__pabt_invalid			@  b
	.long	__pabt_invalid			@  c
	.long	__pabt_invalid			@  d
	.long	__pabt_invalid			@  e
	.long	__pabt_invalid			@  f

/*
 * Undef instr entry dispatcher
 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
 */
1083
	vector_stub	und, UND_MODE
L
Linus Torvalds 已提交
1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103

	.long	__und_usr			@  0 (USR_26 / USR_32)
	.long	__und_invalid			@  1 (FIQ_26 / FIQ_32)
	.long	__und_invalid			@  2 (IRQ_26 / IRQ_32)
	.long	__und_svc			@  3 (SVC_26 / SVC_32)
	.long	__und_invalid			@  4
	.long	__und_invalid			@  5
	.long	__und_invalid			@  6
	.long	__und_invalid			@  7
	.long	__und_invalid			@  8
	.long	__und_invalid			@  9
	.long	__und_invalid			@  a
	.long	__und_invalid			@  b
	.long	__und_invalid			@  c
	.long	__und_invalid			@  d
	.long	__und_invalid			@  e
	.long	__und_invalid			@  f

	.align	5

R
Russell King 已提交
1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
/*=============================================================================
 * Address exception handler
 *-----------------------------------------------------------------------------
 * These aren't too critical.
 * (they're not supposed to happen, and won't happen in 32-bit data mode).
 */

vector_addrexcptn:
	b	vector_addrexcptn

L
Linus Torvalds 已提交
1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126
/*=============================================================================
 * Undefined FIQs
 *-----------------------------------------------------------------------------
 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
 * Basically to switch modes, we *HAVE* to clobber one register...  brain
 * damage alert!  I don't think that we can execute any code in here in any
 * other mode than FIQ...  Ok you can switch to another mode, but you can't
 * get out of that mode without clobbering one register.
 */
vector_fiq:
	subs	pc, lr, #4

1127 1128 1129
	.globl	vector_fiq_offset
	.equ	vector_fiq_offset, vector_fiq

1130
	.section .vectors, "ax", %progbits
1131
__vectors_start:
1132 1133 1134 1135 1136 1137 1138 1139
	W(b)	vector_rst
	W(b)	vector_und
	W(ldr)	pc, __vectors_start + 0x1000
	W(b)	vector_pabt
	W(b)	vector_dabt
	W(b)	vector_addrexcptn
	W(b)	vector_irq
	W(b)	vector_fiq
L
Linus Torvalds 已提交
1140 1141 1142 1143 1144 1145 1146 1147 1148

	.data

	.globl	cr_alignment
	.globl	cr_no_alignment
cr_alignment:
	.space	4
cr_no_alignment:
	.space	4
1149 1150 1151 1152 1153 1154

#ifdef CONFIG_MULTI_IRQ_HANDLER
	.globl	handle_arch_irq
handle_arch_irq:
	.space	4
#endif