entry-armv.S 27.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 *  linux/arch/arm/kernel/entry-armv.S
 *
 *  Copyright (C) 1996,1997,1998 Russell King.
 *  ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
6
 *  nommu support by Hyok S. Choi (hyok.choi@samsung.com)
L
Linus Torvalds 已提交
7 8 9 10 11 12 13
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 *  Low-level vector interface routines
 *
14 15
 *  Note:  there is a StrongARM bug in the STMIA rn, {regs}^ instruction
 *  that causes it to save wrong values...  Be aware!
L
Linus Torvalds 已提交
16 17
 */

18
#include <asm/assembler.h>
19
#include <asm/memory.h>
20 21
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
L
Linus Torvalds 已提交
22
#include <asm/vfpmacros.h>
23
#ifndef CONFIG_MULTI_IRQ_HANDLER
24
#include <mach/entry-macro.S>
25
#endif
26
#include <asm/thread_notify.h>
27
#include <asm/unwind.h>
28
#include <asm/unistd.h>
29
#include <asm/tls.h>
30
#include <asm/system_info.h>
L
Linus Torvalds 已提交
31 32

#include "entry-header.S"
33
#include <asm/entry-macro-multi.S>
L
Linus Torvalds 已提交
34

35
/*
36
 * Interrupt handling.
37 38
 */
	.macro	irq_handler
39
#ifdef CONFIG_MULTI_IRQ_HANDLER
40
	ldr	r1, =handle_arch_irq
41 42
	mov	r0, sp
	adr	lr, BSYM(9997f)
43 44
	ldr	pc, [r1]
#else
45
	arch_irq_handler_default
46
#endif
47
9997:
48 49
	.endm

50
	.macro	pabt_helper
51
	@ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
52
#ifdef MULTI_PABORT
53
	ldr	ip, .LCprocfns
54
	mov	lr, pc
55
	ldr	pc, [ip, #PROCESSOR_PABT_FUNC]
56 57 58 59 60 61 62 63 64 65
#else
	bl	CPU_PABORT_HANDLER
#endif
	.endm

	.macro	dabt_helper

	@
	@ Call the processor-specific abort handler:
	@
66
	@  r2 - pt_regs
67 68
	@  r4 - aborted context pc
	@  r5 - aborted context psr
69 70 71 72 73
	@
	@ The abort handler must return the aborted address in r0, and
	@ the fault status register in r1.  r9 must be preserved.
	@
#ifdef MULTI_DABORT
74
	ldr	ip, .LCprocfns
75
	mov	lr, pc
76
	ldr	pc, [ip, #PROCESSOR_DABT_FUNC]
77 78 79 80 81
#else
	bl	CPU_DABORT_HANDLER
#endif
	.endm

82 83 84 85 86 87
#ifdef CONFIG_KPROBES
	.section	.kprobes.text,"ax",%progbits
#else
	.text
#endif

L
Linus Torvalds 已提交
88 89 90
/*
 * Invalid mode handlers
 */
R
Russell King 已提交
91 92
	.macro	inv_entry, reason
	sub	sp, sp, #S_FRAME_SIZE
93 94 95 96
 ARM(	stmib	sp, {r1 - lr}		)
 THUMB(	stmia	sp, {r0 - r12}		)
 THUMB(	str	sp, [sp, #S_SP]		)
 THUMB(	str	lr, [sp, #S_LR]		)
L
Linus Torvalds 已提交
97 98 99 100
	mov	r1, #\reason
	.endm

__pabt_invalid:
R
Russell King 已提交
101 102
	inv_entry BAD_PREFETCH
	b	common_invalid
103
ENDPROC(__pabt_invalid)
L
Linus Torvalds 已提交
104 105

__dabt_invalid:
R
Russell King 已提交
106 107
	inv_entry BAD_DATA
	b	common_invalid
108
ENDPROC(__dabt_invalid)
L
Linus Torvalds 已提交
109 110

__irq_invalid:
R
Russell King 已提交
111 112
	inv_entry BAD_IRQ
	b	common_invalid
113
ENDPROC(__irq_invalid)
L
Linus Torvalds 已提交
114 115

__und_invalid:
R
Russell King 已提交
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
	inv_entry BAD_UNDEFINSTR

	@
	@ XXX fall through to common_invalid
	@

@
@ common_invalid - generic code for failed exception (re-entrant version of handlers)
@
common_invalid:
	zero_fp

	ldmia	r0, {r4 - r6}
	add	r0, sp, #S_PC		@ here for interlock avoidance
	mov	r7, #-1			@  ""   ""    ""        ""
	str	r4, [sp]		@ save preserved r0
	stmia	r0, {r5 - r7}		@ lr_<exception>,
					@ cpsr_<exception>, "old_r0"
L
Linus Torvalds 已提交
134 135 136

	mov	r0, sp
	b	bad_mode
137
ENDPROC(__und_invalid)
L
Linus Torvalds 已提交
138 139 140 141

/*
 * SVC mode handlers
 */
142 143 144 145 146 147 148

#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
#define SPFIX(code...) code
#else
#define SPFIX(code...)
#endif

149
	.macro	svc_entry, stack_hole=0
150 151
 UNWIND(.fnstart		)
 UNWIND(.save {r0 - pc}		)
152 153 154 155 156 157 158
	sub	sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
#ifdef CONFIG_THUMB2_KERNEL
 SPFIX(	str	r0, [sp]	)	@ temporarily saved
 SPFIX(	mov	r0, sp		)
 SPFIX(	tst	r0, #4		)	@ test original stack alignment
 SPFIX(	ldr	r0, [sp]	)	@ restored
#else
159
 SPFIX(	tst	sp, #4		)
160 161 162
#endif
 SPFIX(	subeq	sp, sp, #4	)
	stmia	sp, {r1 - r12}
R
Russell King 已提交
163

164 165 166 167 168 169
	ldmia	r0, {r3 - r5}
	add	r7, sp, #S_SP - 4	@ here for interlock avoidance
	mov	r6, #-1			@  ""  ""      ""       ""
	add	r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
 SPFIX(	addeq	r2, r2, #4	)
	str	r3, [sp, #-4]!		@ save the "real" r0 copied
R
Russell King 已提交
170 171
					@ from the exception stack

172
	mov	r3, lr
L
Linus Torvalds 已提交
173 174 175 176

	@
	@ We are now ready to fill in the remaining blanks on the stack:
	@
177 178 179 180 181
	@  r2 - sp_svc
	@  r3 - lr_svc
	@  r4 - lr_<exception>, already fixed up for correct return/restart
	@  r5 - spsr_<exception>
	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
L
Linus Torvalds 已提交
182
	@
183
	stmia	r7, {r2 - r6}
L
Linus Torvalds 已提交
184

185 186 187
#ifdef CONFIG_TRACE_IRQFLAGS
	bl	trace_hardirqs_off
#endif
188
	.endm
L
Linus Torvalds 已提交
189

190 191 192
	.align	5
__dabt_svc:
	svc_entry
L
Linus Torvalds 已提交
193
	mov	r2, sp
194
	dabt_helper
195
	svc_exit r5				@ return from exception
196
 UNWIND(.fnend		)
197
ENDPROC(__dabt_svc)
L
Linus Torvalds 已提交
198 199 200

	.align	5
__irq_svc:
R
Russell King 已提交
201
	svc_entry
202
	irq_handler
203

L
Linus Torvalds 已提交
204
#ifdef CONFIG_PREEMPT
205 206
	get_thread_info tsk
	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
207
	ldr	r0, [tsk, #TI_FLAGS]		@ get flags
R
Russell King 已提交
208 209
	teq	r8, #0				@ if preempt count != 0
	movne	r0, #0				@ force flags to 0
L
Linus Torvalds 已提交
210 211 212
	tst	r0, #_TIF_NEED_RESCHED
	blne	svc_preempt
#endif
213

214
	svc_exit r5, irq = 1			@ return from exception
215
 UNWIND(.fnend		)
216
ENDPROC(__irq_svc)
L
Linus Torvalds 已提交
217 218 219 220 221

	.ltorg

#ifdef CONFIG_PREEMPT
svc_preempt:
R
Russell King 已提交
222
	mov	r8, lr
L
Linus Torvalds 已提交
223
1:	bl	preempt_schedule_irq		@ irq en/disable is done inside
224
	ldr	r0, [tsk, #TI_FLAGS]		@ get new tasks TI_FLAGS
L
Linus Torvalds 已提交
225
	tst	r0, #_TIF_NEED_RESCHED
R
Russell King 已提交
226
	moveq	pc, r8				@ go again
L
Linus Torvalds 已提交
227 228 229
	b	1b
#endif

230 231 232 233 234 235 236 237 238 239 240 241 242
__und_fault:
	@ Correct the PC such that it is pointing at the instruction
	@ which caused the fault.  If the faulting instruction was ARM
	@ the PC will be pointing at the next instruction, and have to
	@ subtract 4.  Otherwise, it is Thumb, and the PC will be
	@ pointing at the second half of the Thumb instruction.  We
	@ have to subtract 2.
	ldr	r2, [r0, #S_PC]
	sub	r2, r2, r1
	str	r2, [r0, #S_PC]
	b	do_undefinstr
ENDPROC(__und_fault)

L
Linus Torvalds 已提交
243 244
	.align	5
__und_svc:
245 246 247 248 249 250
#ifdef CONFIG_KPROBES
	@ If a kprobe is about to simulate a "stmdb sp..." instruction,
	@ it obviously needs free stack space which then will belong to
	@ the saved context.
	svc_entry 64
#else
R
Russell King 已提交
251
	svc_entry
252
#endif
L
Linus Torvalds 已提交
253 254 255 256 257 258 259
	@
	@ call emulation code, which returns using r9 if it has emulated
	@ the instruction, or the more conventional lr if we are to treat
	@ this as a real undefined instruction
	@
	@  r0 - instruction
	@
260
#ifndef CONFIG_THUMB2_KERNEL
261
	ldr	r0, [r4, #-4]
262
#else
263
	mov	r1, #2
264
	ldrh	r0, [r4, #-2]			@ Thumb instruction at LR - 2
265
	cmp	r0, #0xe800			@ 32-bit instruction if xx >= 0
266 267 268 269 270
	blo	__und_svc_fault
	ldrh	r9, [r4]			@ bottom 16 bits
	add	r4, r4, #2
	str	r4, [sp, #S_PC]
	orr	r0, r9, r0, lsl #16
271
#endif
272
	adr	r9, BSYM(__und_svc_finish)
273
	mov	r2, r4
L
Linus Torvalds 已提交
274 275
	bl	call_fpe

276 277
	mov	r1, #4				@ PC correction to apply
__und_svc_fault:
L
Linus Torvalds 已提交
278
	mov	r0, sp				@ struct pt_regs *regs
279
	bl	__und_fault
L
Linus Torvalds 已提交
280

281
__und_svc_finish:
282 283
	ldr	r5, [sp, #S_PSR]		@ Get SVC cpsr
	svc_exit r5				@ return from exception
284
 UNWIND(.fnend		)
285
ENDPROC(__und_svc)
L
Linus Torvalds 已提交
286 287 288

	.align	5
__pabt_svc:
R
Russell King 已提交
289
	svc_entry
290
	mov	r2, sp				@ regs
291
	pabt_helper
292
	svc_exit r5				@ return from exception
293
 UNWIND(.fnend		)
294
ENDPROC(__pabt_svc)
L
Linus Torvalds 已提交
295 296

	.align	5
297 298
.LCcralign:
	.word	cr_alignment
P
Paul Brook 已提交
299
#ifdef MULTI_DABORT
L
Linus Torvalds 已提交
300 301 302 303 304 305 306 307
.LCprocfns:
	.word	processor
#endif
.LCfp:
	.word	fp_enter

/*
 * User mode handlers
308 309
 *
 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
L
Linus Torvalds 已提交
310
 */
311 312 313 314 315

#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
#error "sizeof(struct pt_regs) must be a multiple of 8"
#endif

R
Russell King 已提交
316
	.macro	usr_entry
317 318
 UNWIND(.fnstart	)
 UNWIND(.cantunwind	)	@ don't unwind the user space
R
Russell King 已提交
319
	sub	sp, sp, #S_FRAME_SIZE
320 321
 ARM(	stmib	sp, {r1 - r12}	)
 THUMB(	stmia	sp, {r0 - r12}	)
R
Russell King 已提交
322

323
	ldmia	r0, {r3 - r5}
R
Russell King 已提交
324
	add	r0, sp, #S_PC		@ here for interlock avoidance
325
	mov	r6, #-1			@  ""  ""     ""        ""
R
Russell King 已提交
326

327
	str	r3, [sp]		@ save the "real" r0 copied
R
Russell King 已提交
328
					@ from the exception stack
L
Linus Torvalds 已提交
329 330 331 332

	@
	@ We are now ready to fill in the remaining blanks on the stack:
	@
333 334 335
	@  r4 - lr_<exception>, already fixed up for correct return/restart
	@  r5 - spsr_<exception>
	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
L
Linus Torvalds 已提交
336 337 338
	@
	@ Also, separately save sp_usr and lr_usr
	@
339
	stmia	r0, {r4 - r6}
340 341
 ARM(	stmdb	r0, {sp, lr}^			)
 THUMB(	store_user_sp_lr r0, r1, S_SP - S_PC	)
L
Linus Torvalds 已提交
342 343 344 345

	@
	@ Enable the alignment trap while in kernel mode
	@
346
	alignment_trap r0
L
Linus Torvalds 已提交
347 348 349 350 351

	@
	@ Clear FP to mark the first stack frame
	@
	zero_fp
352 353 354 355

#ifdef CONFIG_IRQSOFF_TRACER
	bl	trace_hardirqs_off
#endif
356
	ct_user_exit save = 0
L
Linus Torvalds 已提交
357 358
	.endm

359
	.macro	kuser_cmpxchg_check
360
#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
361 362 363 364 365 366 367
#ifndef CONFIG_MMU
#warning "NPTL on non MMU needs fixing"
#else
	@ Make sure our user space atomic helper is restarted
	@ if it was interrupted in a critical region.  Here we
	@ perform a quick test inline since it should be false
	@ 99.9999% of the time.  The rest is done out of line.
368
	cmp	r4, #TASK_SIZE
369
	blhs	kuser_cmpxchg64_fixup
370 371 372 373
#endif
#endif
	.endm

L
Linus Torvalds 已提交
374 375
	.align	5
__dabt_usr:
R
Russell King 已提交
376
	usr_entry
377
	kuser_cmpxchg_check
L
Linus Torvalds 已提交
378
	mov	r2, sp
379 380
	dabt_helper
	b	ret_from_exception
381
 UNWIND(.fnend		)
382
ENDPROC(__dabt_usr)
L
Linus Torvalds 已提交
383 384 385

	.align	5
__irq_usr:
R
Russell King 已提交
386
	usr_entry
387
	kuser_cmpxchg_check
388
	irq_handler
389
	get_thread_info tsk
L
Linus Torvalds 已提交
390
	mov	why, #0
391
	b	ret_to_user_from_irq
392
 UNWIND(.fnend		)
393
ENDPROC(__irq_usr)
L
Linus Torvalds 已提交
394 395 396 397 398

	.ltorg

	.align	5
__und_usr:
R
Russell King 已提交
399
	usr_entry
400

401 402
	mov	r2, r4
	mov	r3, r5
L
Linus Torvalds 已提交
403

404 405 406
	@ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
	@      faulting instruction depending on Thumb mode.
	@ r3 = regs->ARM_cpsr
L
Linus Torvalds 已提交
407
	@
408 409 410
	@ The emulation code returns using r9 if it has emulated the
	@ instruction, or the more conventional lr if we are to treat
	@ this as a real undefined instruction
L
Linus Torvalds 已提交
411
	@
412
	adr	r9, BSYM(ret_from_exception)
413

414
	tst	r3, #PSR_T_BIT			@ Thumb mode?
415 416 417
	bne	__und_usr_thumb
	sub	r4, r2, #4			@ ARM instr at LR - 4
1:	ldrt	r0, [r4]
418
#ifdef CONFIG_CPU_ENDIAN_BE8
419
	rev	r0, r0				@ little endian instruction
420
#endif
421 422 423 424 425 426 427 428
	@ r0 = 32-bit ARM instruction which caused the exception
	@ r2 = PC value for the following instruction (:= regs->ARM_pc)
	@ r4 = PC value for the faulting instruction
	@ lr = 32-bit undefined instruction function
	adr	lr, BSYM(__und_usr_fault_32)
	b	call_fpe

__und_usr_thumb:
429
	@ Thumb instruction
430
	sub	r4, r2, #2			@ First half of thumb instr at LR - 2
431 432 433 434 435 436 437 438 439 440 441 442 443
#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
/*
 * Thumb-2 instruction handling.  Note that because pre-v6 and >= v6 platforms
 * can never be supported in a single kernel, this code is not applicable at
 * all when __LINUX_ARM_ARCH__ < 6.  This allows simplifying assumptions to be
 * made about .arch directives.
 */
#if __LINUX_ARM_ARCH__ < 7
/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
#define NEED_CPU_ARCHITECTURE
	ldr	r5, .LCcpu_architecture
	ldr	r5, [r5]
	cmp	r5, #CPU_ARCH_ARMv7
444
	blo	__und_usr_fault_16		@ 16bit undefined instruction
445 446 447 448 449 450 451
/*
 * The following code won't get run unless the running CPU really is v7, so
 * coding round the lack of ldrht on older arches is pointless.  Temporarily
 * override the assembler target arch with the minimum required instead:
 */
	.arch	armv6t2
#endif
452
2:	ldrht	r5, [r4]
453
	cmp	r5, #0xe800			@ 32bit instruction if xx != 0
454 455
	blo	__und_usr_fault_16		@ 16bit undefined instruction
3:	ldrht	r0, [r2]
456
	add	r2, r2, #2			@ r2 is PC + 2, make it PC + 4
457
	str	r2, [sp, #S_PC]			@ it's a 2x16bit instr, update
458
	orr	r0, r0, r5, lsl #16
459 460 461 462 463
	adr	lr, BSYM(__und_usr_fault_32)
	@ r0 = the two 16-bit Thumb instructions which caused the exception
	@ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
	@ r4 = PC value for the first 16-bit Thumb instruction
	@ lr = 32bit undefined instruction function
464 465 466 467 468

#if __LINUX_ARM_ARCH__ < 7
/* If the target arch was overridden, change it back: */
#ifdef CONFIG_CPU_32v6K
	.arch	armv6k
469
#else
470 471 472 473
	.arch	armv6
#endif
#endif /* __LINUX_ARM_ARCH__ < 7 */
#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
474
	b	__und_usr_fault_16
475
#endif
476
 UNWIND(.fnend)
477
ENDPROC(__und_usr)
478

L
Linus Torvalds 已提交
479
/*
480
 * The out of line fixup for the ldrt instructions above.
L
Linus Torvalds 已提交
481
 */
482
	.pushsection .fixup, "ax"
483
	.align	2
484
4:	mov	pc, r9
485 486
	.popsection
	.pushsection __ex_table,"a"
487
	.long	1b, 4b
488
#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
489 490 491
	.long	2b, 4b
	.long	3b, 4b
#endif
492
	.popsection
L
Linus Torvalds 已提交
493 494 495 496 497 498 499 500 501 502 503

/*
 * Check whether the instruction is a co-processor instruction.
 * If yes, we need to call the relevant co-processor handler.
 *
 * Note that we don't do a full check here for the co-processor
 * instructions; all instructions with bit 27 set are well
 * defined.  The only instructions that should fault are the
 * co-processor instructions.  However, we have to watch out
 * for the ARM6/ARM7 SWI bug.
 *
504 505 506 507 508 509 510
 * NEON is a special case that has to be handled here. Not all
 * NEON instructions are co-processor instructions, so we have
 * to make a special case of checking for them. Plus, there's
 * five groups of them, so we have a table of mask/opcode pairs
 * to check against, and if any match then we branch off into the
 * NEON handler code.
 *
L
Linus Torvalds 已提交
511
 * Emulators may wish to make use of the following registers:
512 513
 *  r0  = instruction opcode (32-bit ARM or two 16-bit Thumb)
 *  r2  = PC value to resume execution after successful emulation
514
 *  r9  = normal "successful" return address
515
 *  r10 = this threads thread_info structure
516
 *  lr  = unrecognised instruction return address
517
 * IRQs disabled, FIQs enabled.
L
Linus Torvalds 已提交
518
 */
519 520 521 522
	@
	@ Fall-through from Thumb-2 __und_usr
	@
#ifdef CONFIG_NEON
523
	get_thread_info r10			@ get current thread
524 525 526
	adr	r6, .LCneon_thumb_opcodes
	b	2f
#endif
L
Linus Torvalds 已提交
527
call_fpe:
528
	get_thread_info r10			@ get current thread
529
#ifdef CONFIG_NEON
530
	adr	r6, .LCneon_arm_opcodes
531
2:	ldr	r5, [r6], #4			@ mask value
532
	ldr	r7, [r6], #4			@ opcode bits matching in mask
533 534 535
	cmp	r5, #0				@ end mask?
	beq	1f
	and	r8, r0, r5
536 537 538 539 540 541 542 543
	cmp	r8, r7				@ NEON instruction?
	bne	2b
	mov	r7, #1
	strb	r7, [r10, #TI_USED_CP + 10]	@ mark CP#10 as used
	strb	r7, [r10, #TI_USED_CP + 11]	@ mark CP#11 as used
	b	do_vfp				@ let VFP handler handle this
1:
#endif
L
Linus Torvalds 已提交
544
	tst	r0, #0x08000000			@ only CDP/CPRT/LDC/STC have bit 27
545
	tstne	r0, #0x04000000			@ bit 26 set on both ARM and Thumb-2
L
Linus Torvalds 已提交
546 547
	moveq	pc, lr
	and	r8, r0, #0x00000f00		@ mask out CP number
548
 THUMB(	lsr	r8, r8, #8		)
L
Linus Torvalds 已提交
549 550
	mov	r7, #1
	add	r6, r10, #TI_USED_CP
551 552
 ARM(	strb	r7, [r6, r8, lsr #8]	)	@ set appropriate used_cp[]
 THUMB(	strb	r7, [r6, r8]		)	@ set appropriate used_cp[]
L
Linus Torvalds 已提交
553 554 555 556 557 558 559
#ifdef CONFIG_IWMMXT
	@ Test if we need to give access to iWMMXt coprocessors
	ldr	r5, [r10, #TI_FLAGS]
	rsbs	r7, r8, #(1 << 8)		@ CP 0 or 1 only
	movcss	r7, r5, lsr #(TIF_USING_IWMMXT + 1)
	bcs	iwmmxt_task_enable
#endif
560 561 562 563 564
 ARM(	add	pc, pc, r8, lsr #6	)
 THUMB(	lsl	r8, r8, #2		)
 THUMB(	add	pc, r8			)
	nop

565
	movw_pc	lr				@ CP#0
566 567
	W(b)	do_fpe				@ CP#1 (FPE)
	W(b)	do_fpe				@ CP#2 (FPE)
568
	movw_pc	lr				@ CP#3
569 570 571 572 573
#ifdef CONFIG_CRUNCH
	b	crunch_task_enable		@ CP#4 (MaverickCrunch)
	b	crunch_task_enable		@ CP#5 (MaverickCrunch)
	b	crunch_task_enable		@ CP#6 (MaverickCrunch)
#else
574 575 576
	movw_pc	lr				@ CP#4
	movw_pc	lr				@ CP#5
	movw_pc	lr				@ CP#6
577
#endif
578 579 580
	movw_pc	lr				@ CP#7
	movw_pc	lr				@ CP#8
	movw_pc	lr				@ CP#9
L
Linus Torvalds 已提交
581
#ifdef CONFIG_VFP
582 583
	W(b)	do_vfp				@ CP#10 (VFP)
	W(b)	do_vfp				@ CP#11 (VFP)
L
Linus Torvalds 已提交
584
#else
585 586
	movw_pc	lr				@ CP#10 (VFP)
	movw_pc	lr				@ CP#11 (VFP)
L
Linus Torvalds 已提交
587
#endif
588 589 590 591
	movw_pc	lr				@ CP#12
	movw_pc	lr				@ CP#13
	movw_pc	lr				@ CP#14 (Debug)
	movw_pc	lr				@ CP#15 (Control)
L
Linus Torvalds 已提交
592

593 594 595 596 597 598
#ifdef NEED_CPU_ARCHITECTURE
	.align	2
.LCcpu_architecture:
	.word	__cpu_architecture
#endif

599 600 601
#ifdef CONFIG_NEON
	.align	6

602
.LCneon_arm_opcodes:
603 604 605 606 607 608
	.word	0xfe000000			@ mask
	.word	0xf2000000			@ opcode

	.word	0xff100000			@ mask
	.word	0xf4000000			@ opcode

609 610 611 612 613 614 615 616 617 618
	.word	0x00000000			@ mask
	.word	0x00000000			@ opcode

.LCneon_thumb_opcodes:
	.word	0xef000000			@ mask
	.word	0xef000000			@ opcode

	.word	0xff100000			@ mask
	.word	0xf9000000			@ opcode

619 620 621 622
	.word	0x00000000			@ mask
	.word	0x00000000			@ opcode
#endif

L
Linus Torvalds 已提交
623
do_fpe:
624
	enable_irq
L
Linus Torvalds 已提交
625 626 627 628 629 630 631 632 633 634 635 636 637
	ldr	r4, .LCfp
	add	r10, r10, #TI_FPSTATE		@ r10 = workspace
	ldr	pc, [r4]			@ Call FP module USR entry point

/*
 * The FP module is called with these registers set:
 *  r0  = instruction
 *  r2  = PC+4
 *  r9  = normal "successful" return address
 *  r10 = FP workspace
 *  lr  = unrecognised FP instruction return address
 */

638
	.pushsection .data
L
Linus Torvalds 已提交
639
ENTRY(fp_enter)
640
	.word	no_fp
641
	.popsection
L
Linus Torvalds 已提交
642

643 644 645
ENTRY(no_fp)
	mov	pc, lr
ENDPROC(no_fp)
646

647 648 649 650 651 652
__und_usr_fault_32:
	mov	r1, #4
	b	1f
__und_usr_fault_16:
	mov	r1, #2
1:	enable_irq
L
Linus Torvalds 已提交
653
	mov	r0, sp
654
	adr	lr, BSYM(ret_from_exception)
655 656 657
	b	__und_fault
ENDPROC(__und_usr_fault_32)
ENDPROC(__und_usr_fault_16)
L
Linus Torvalds 已提交
658 659 660

	.align	5
__pabt_usr:
R
Russell King 已提交
661
	usr_entry
662
	mov	r2, sp				@ regs
663
	pabt_helper
664
 UNWIND(.fnend		)
L
Linus Torvalds 已提交
665 666 667 668 669
	/* fall through */
/*
 * This is the return code to user mode for abort handlers
 */
ENTRY(ret_from_exception)
670 671
 UNWIND(.fnstart	)
 UNWIND(.cantunwind	)
L
Linus Torvalds 已提交
672 673 674
	get_thread_info tsk
	mov	why, #0
	b	ret_to_user
675
 UNWIND(.fnend		)
676 677
ENDPROC(__pabt_usr)
ENDPROC(ret_from_exception)
L
Linus Torvalds 已提交
678 679 680 681 682 683 684

/*
 * Register switch for ARMv3 and ARMv4 processors
 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
 * previous and next are guaranteed not to be the same.
 */
ENTRY(__switch_to)
685 686
 UNWIND(.fnstart	)
 UNWIND(.cantunwind	)
L
Linus Torvalds 已提交
687
	add	ip, r1, #TI_CPU_SAVE
688 689 690 691
 ARM(	stmia	ip!, {r4 - sl, fp, sp, lr} )	@ Store most regs on stack
 THUMB(	stmia	ip!, {r4 - sl, fp}	   )	@ Store most regs on stack
 THUMB(	str	sp, [ip], #4		   )
 THUMB(	str	lr, [ip], #4		   )
692 693
	ldr	r4, [r2, #TI_TP_VALUE]
	ldr	r5, [r2, #TI_TP_VALUE + 4]
694
#ifdef CONFIG_CPU_USE_DOMAINS
695
	ldr	r6, [r2, #TI_CPU_DOMAIN]
696
#endif
697
	switch_tls r1, r4, r5, r3, r7
698 699 700 701 702
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
	ldr	r7, [r2, #TI_TASK]
	ldr	r8, =__stack_chk_guard
	ldr	r7, [r7, #TSK_STACK_CANARY]
#endif
703
#ifdef CONFIG_CPU_USE_DOMAINS
L
Linus Torvalds 已提交
704 705
	mcr	p15, 0, r6, c3, c0, 0		@ Set domain register
#endif
706 707 708 709 710
	mov	r5, r0
	add	r4, r2, #TI_CPU_SAVE
	ldr	r0, =thread_notify_head
	mov	r1, #THREAD_NOTIFY_SWITCH
	bl	atomic_notifier_call_chain
711 712 713
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
	str	r7, [r8]
#endif
714
 THUMB(	mov	ip, r4			   )
715
	mov	r0, r5
716 717 718 719
 ARM(	ldmia	r4, {r4 - sl, fp, sp, pc}  )	@ Load all regs saved previously
 THUMB(	ldmia	ip!, {r4 - sl, fp}	   )	@ Load all regs saved previously
 THUMB(	ldr	sp, [ip], #4		   )
 THUMB(	ldr	pc, [ip]		   )
720
 UNWIND(.fnend		)
721
ENDPROC(__switch_to)
L
Linus Torvalds 已提交
722 723

	__INIT
724 725 726 727 728 729 730 731 732

/*
 * User helpers.
 *
 * Each segment is 32-byte aligned and will be moved to the top of the high
 * vector page.  New segments (if ever needed) must be added in front of
 * existing ones.  This mechanism should be used only for things that are
 * really small and justified, and not be abused freely.
 *
733
 * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
734
 */
735
 THUMB(	.arm	)
736

737 738 739 740 741 742 743 744
	.macro	usr_ret, reg
#ifdef CONFIG_ARM_THUMB
	bx	\reg
#else
	mov	pc, \reg
#endif
	.endm

745 746 747 748 749 750 751 752 753 754 755
	.macro	kuser_pad, sym, size
	.if	(. - \sym) & 3
	.rept	4 - (. - \sym) & 3
	.byte	0
	.endr
	.endif
	.rept	(\size - (. - \sym)) / 4
	.word	0xe7fddef1
	.endr
	.endm

756 757 758 759
	.align	5
	.globl	__kuser_helper_start
__kuser_helper_start:

760
/*
761 762
 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
763 764
 */

765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791
__kuser_cmpxchg64:				@ 0xffff0f60

#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)

	/*
	 * Poor you.  No fast solution possible...
	 * The kernel itself must perform the operation.
	 * A special ghost syscall is used for that (see traps.c).
	 */
	stmfd	sp!, {r7, lr}
	ldr	r7, 1f			@ it's 20 bits
	swi	__ARM_NR_cmpxchg64
	ldmfd	sp!, {r7, pc}
1:	.word	__ARM_NR_cmpxchg64

#elif defined(CONFIG_CPU_32v6K)

	stmfd	sp!, {r4, r5, r6, r7}
	ldrd	r4, r5, [r0]			@ load old val
	ldrd	r6, r7, [r1]			@ load new val
	smp_dmb	arm
1:	ldrexd	r0, r1, [r2]			@ load current val
	eors	r3, r0, r4			@ compare with oldval (1)
	eoreqs	r3, r1, r5			@ compare with oldval (2)
	strexdeq r3, r6, r7, [r2]		@ store newval if eq
	teqeq	r3, #1				@ success?
	beq	1b				@ if no then retry
792
	smp_dmb	arm
793 794
	rsbs	r0, r3, #0			@ set returned val and C flag
	ldmfd	sp!, {r4, r5, r6, r7}
795
	usr_ret	lr
796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819

#elif !defined(CONFIG_SMP)

#ifdef CONFIG_MMU

	/*
	 * The only thing that can break atomicity in this cmpxchg64
	 * implementation is either an IRQ or a data abort exception
	 * causing another process/thread to be scheduled in the middle of
	 * the critical sequence.  The same strategy as for cmpxchg is used.
	 */
	stmfd	sp!, {r4, r5, r6, lr}
	ldmia	r0, {r4, r5}			@ load old val
	ldmia	r1, {r6, lr}			@ load new val
1:	ldmia	r2, {r0, r1}			@ load current val
	eors	r3, r0, r4			@ compare with oldval (1)
	eoreqs	r3, r1, r5			@ compare with oldval (2)
2:	stmeqia	r2, {r6, lr}			@ store newval if eq
	rsbs	r0, r3, #0			@ set return val and C flag
	ldmfd	sp!, {r4, r5, r6, pc}

	.text
kuser_cmpxchg64_fixup:
	@ Called from kuser_cmpxchg_fixup.
820
	@ r4 = address of interrupted insn (must be preserved).
821 822
	@ sp = saved regs. r7 and r8 are clobbered.
	@ 1b = first critical insn, 2b = last critical insn.
823
	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
824 825
	mov	r7, #0xffff0fff
	sub	r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
826
	subs	r8, r4, r7
827 828 829 830 831 832 833 834 835 836 837 838
	rsbcss	r8, r8, #(2b - 1b)
	strcs	r7, [sp, #S_PC]
#if __LINUX_ARM_ARCH__ < 6
	bcc	kuser_cmpxchg32_fixup
#endif
	mov	pc, lr
	.previous

#else
#warning "NPTL on non MMU needs fixing"
	mov	r0, #-1
	adds	r0, r0, #0
839
	usr_ret	lr
840 841 842 843 844 845
#endif

#else
#error "incoherent kernel configuration"
#endif

846
	kuser_pad __kuser_cmpxchg64, 64
847 848

__kuser_memory_barrier:				@ 0xffff0fa0
849
	smp_dmb	arm
850
	usr_ret	lr
851

852
	kuser_pad __kuser_memory_barrier, 32
853 854 855

__kuser_cmpxchg:				@ 0xffff0fc0

856
#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
857

858 859 860 861 862
	/*
	 * Poor you.  No fast solution possible...
	 * The kernel itself must perform the operation.
	 * A special ghost syscall is used for that (see traps.c).
	 */
863
	stmfd	sp!, {r7, lr}
864
	ldr	r7, 1f			@ it's 20 bits
865
	swi	__ARM_NR_cmpxchg
866
	ldmfd	sp!, {r7, pc}
867
1:	.word	__ARM_NR_cmpxchg
868 869

#elif __LINUX_ARM_ARCH__ < 6
870

871 872
#ifdef CONFIG_MMU

873
	/*
874 875 876 877 878 879 880
	 * The only thing that can break atomicity in this cmpxchg
	 * implementation is either an IRQ or a data abort exception
	 * causing another process/thread to be scheduled in the middle
	 * of the critical sequence.  To prevent this, code is added to
	 * the IRQ and data abort exception handlers to set the pc back
	 * to the beginning of the critical section if it is found to be
	 * within that critical section (see kuser_cmpxchg_fixup).
881
	 */
882 883 884 885 886 887 888
1:	ldr	r3, [r2]			@ load current val
	subs	r3, r3, r0			@ compare with oldval
2:	streq	r1, [r2]			@ store newval if eq
	rsbs	r0, r3, #0			@ set return val and C flag
	usr_ret	lr

	.text
889
kuser_cmpxchg32_fixup:
890
	@ Called from kuser_cmpxchg_check macro.
891
	@ r4 = address of interrupted insn (must be preserved).
892 893
	@ sp = saved regs. r7 and r8 are clobbered.
	@ 1b = first critical insn, 2b = last critical insn.
894
	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
895 896
	mov	r7, #0xffff0fff
	sub	r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
897
	subs	r8, r4, r7
898 899 900 901 902
	rsbcss	r8, r8, #(2b - 1b)
	strcs	r7, [sp, #S_PC]
	mov	pc, lr
	.previous

903 904 905 906
#else
#warning "NPTL on non MMU needs fixing"
	mov	r0, #-1
	adds	r0, r0, #0
907
	usr_ret	lr
908
#endif
909 910 911

#else

912
	smp_dmb	arm
913
1:	ldrex	r3, [r2]
914 915
	subs	r3, r3, r0
	strexeq	r3, r1, [r2]
916 917
	teqeq	r3, #1
	beq	1b
918
	rsbs	r0, r3, #0
919
	/* beware -- each __kuser slot must be 8 instructions max */
920 921
	ALT_SMP(b	__kuser_memory_barrier)
	ALT_UP(usr_ret	lr)
922 923 924

#endif

925
	kuser_pad __kuser_cmpxchg, 32
926 927

__kuser_get_tls:				@ 0xffff0fe0
928
	ldr	r0, [pc, #(16 - 8)]	@ read TLS, set in kuser_get_tls_init
929
	usr_ret	lr
930
	mrc	p15, 0, r0, c13, c0, 3	@ 0xffff0fe8 hardware TLS code
931 932
	kuser_pad __kuser_get_tls, 16
	.rep	3
933 934
	.word	0			@ 0xffff0ff0 software TLS value, then
	.endr				@ pad up to __kuser_helper_version
935 936 937 938 939 940 941

__kuser_helper_version:				@ 0xffff0ffc
	.word	((__kuser_helper_end - __kuser_helper_start) >> 5)

	.globl	__kuser_helper_end
__kuser_helper_end:

942
 THUMB(	.thumb	)
943

L
Linus Torvalds 已提交
944 945 946
/*
 * Vector stubs.
 *
947 948 949
 * This code is copied to 0xffff0200 so we can use branches in the
 * vectors, rather than ldr's.  Note that this code must not
 * exceed 0x300 bytes.
L
Linus Torvalds 已提交
950 951 952
 *
 * Common stub entry macro:
 *   Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
R
Russell King 已提交
953 954 955
 *
 * SP points to a minimal amount of processor-private memory, the address
 * of which is copied into r0 for the mode specific abort handler.
L
Linus Torvalds 已提交
956
 */
957
	.macro	vector_stub, name, mode, correction=0
L
Linus Torvalds 已提交
958 959 960 961 962 963
	.align	5

vector_\name:
	.if \correction
	sub	lr, lr, #\correction
	.endif
R
Russell King 已提交
964 965 966 967 968 969

	@
	@ Save r0, lr_<exception> (parent PC) and spsr_<exception>
	@ (parent CPSR)
	@
	stmia	sp, {r0, lr}		@ save r0, lr
L
Linus Torvalds 已提交
970
	mrs	lr, spsr
R
Russell King 已提交
971 972
	str	lr, [sp, #8]		@ save spsr

L
Linus Torvalds 已提交
973
	@
R
Russell King 已提交
974
	@ Prepare for SVC32 mode.  IRQs remain disabled.
L
Linus Torvalds 已提交
975
	@
R
Russell King 已提交
976
	mrs	r0, cpsr
977
	eor	r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
R
Russell King 已提交
978
	msr	spsr_cxsf, r0
L
Linus Torvalds 已提交
979

R
Russell King 已提交
980 981 982 983
	@
	@ the branch table must immediately follow this code
	@
	and	lr, lr, #0x0f
984 985
 THUMB(	adr	r0, 1f			)
 THUMB(	ldr	lr, [r0, lr, lsl #2]	)
986
	mov	r0, sp
987
 ARM(	ldr	lr, [pc, lr, lsl #2]	)
R
Russell King 已提交
988
	movs	pc, lr			@ branch to handler in SVC mode
989
ENDPROC(vector_\name)
990 991 992 993

	.align	2
	@ handler addresses follow this label
1:
L
Linus Torvalds 已提交
994 995
	.endm

996
	.globl	__stubs_start
L
Linus Torvalds 已提交
997 998 999 1000
__stubs_start:
/*
 * Interrupt dispatcher
 */
1001
	vector_stub	irq, IRQ_MODE, 4
L
Linus Torvalds 已提交
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023

	.long	__irq_usr			@  0  (USR_26 / USR_32)
	.long	__irq_invalid			@  1  (FIQ_26 / FIQ_32)
	.long	__irq_invalid			@  2  (IRQ_26 / IRQ_32)
	.long	__irq_svc			@  3  (SVC_26 / SVC_32)
	.long	__irq_invalid			@  4
	.long	__irq_invalid			@  5
	.long	__irq_invalid			@  6
	.long	__irq_invalid			@  7
	.long	__irq_invalid			@  8
	.long	__irq_invalid			@  9
	.long	__irq_invalid			@  a
	.long	__irq_invalid			@  b
	.long	__irq_invalid			@  c
	.long	__irq_invalid			@  d
	.long	__irq_invalid			@  e
	.long	__irq_invalid			@  f

/*
 * Data abort dispatcher
 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
 */
1024
	vector_stub	dabt, ABT_MODE, 8
L
Linus Torvalds 已提交
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046

	.long	__dabt_usr			@  0  (USR_26 / USR_32)
	.long	__dabt_invalid			@  1  (FIQ_26 / FIQ_32)
	.long	__dabt_invalid			@  2  (IRQ_26 / IRQ_32)
	.long	__dabt_svc			@  3  (SVC_26 / SVC_32)
	.long	__dabt_invalid			@  4
	.long	__dabt_invalid			@  5
	.long	__dabt_invalid			@  6
	.long	__dabt_invalid			@  7
	.long	__dabt_invalid			@  8
	.long	__dabt_invalid			@  9
	.long	__dabt_invalid			@  a
	.long	__dabt_invalid			@  b
	.long	__dabt_invalid			@  c
	.long	__dabt_invalid			@  d
	.long	__dabt_invalid			@  e
	.long	__dabt_invalid			@  f

/*
 * Prefetch abort dispatcher
 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
 */
1047
	vector_stub	pabt, ABT_MODE, 4
L
Linus Torvalds 已提交
1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069

	.long	__pabt_usr			@  0 (USR_26 / USR_32)
	.long	__pabt_invalid			@  1 (FIQ_26 / FIQ_32)
	.long	__pabt_invalid			@  2 (IRQ_26 / IRQ_32)
	.long	__pabt_svc			@  3 (SVC_26 / SVC_32)
	.long	__pabt_invalid			@  4
	.long	__pabt_invalid			@  5
	.long	__pabt_invalid			@  6
	.long	__pabt_invalid			@  7
	.long	__pabt_invalid			@  8
	.long	__pabt_invalid			@  9
	.long	__pabt_invalid			@  a
	.long	__pabt_invalid			@  b
	.long	__pabt_invalid			@  c
	.long	__pabt_invalid			@  d
	.long	__pabt_invalid			@  e
	.long	__pabt_invalid			@  f

/*
 * Undef instr entry dispatcher
 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
 */
1070
	vector_stub	und, UND_MODE
L
Linus Torvalds 已提交
1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122

	.long	__und_usr			@  0 (USR_26 / USR_32)
	.long	__und_invalid			@  1 (FIQ_26 / FIQ_32)
	.long	__und_invalid			@  2 (IRQ_26 / IRQ_32)
	.long	__und_svc			@  3 (SVC_26 / SVC_32)
	.long	__und_invalid			@  4
	.long	__und_invalid			@  5
	.long	__und_invalid			@  6
	.long	__und_invalid			@  7
	.long	__und_invalid			@  8
	.long	__und_invalid			@  9
	.long	__und_invalid			@  a
	.long	__und_invalid			@  b
	.long	__und_invalid			@  c
	.long	__und_invalid			@  d
	.long	__und_invalid			@  e
	.long	__und_invalid			@  f

	.align	5

/*=============================================================================
 * Undefined FIQs
 *-----------------------------------------------------------------------------
 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
 * Basically to switch modes, we *HAVE* to clobber one register...  brain
 * damage alert!  I don't think that we can execute any code in here in any
 * other mode than FIQ...  Ok you can switch to another mode, but you can't
 * get out of that mode without clobbering one register.
 */
vector_fiq:
	subs	pc, lr, #4

/*=============================================================================
 * Address exception handler
 *-----------------------------------------------------------------------------
 * These aren't too critical.
 * (they're not supposed to happen, and won't happen in 32-bit data mode).
 */

vector_addrexcptn:
	b	vector_addrexcptn

/*
 * We group all the following data together to optimise
 * for CPUs with separate I & D caches.
 */
	.align	5

.LCvswi:
	.word	vector_swi

1123
	.globl	__stubs_end
L
Linus Torvalds 已提交
1124 1125
__stubs_end:

1126
	.equ	stubs_offset, __vectors_start + 0x200 - __stubs_start
L
Linus Torvalds 已提交
1127

1128 1129
	.globl	__vectors_start
__vectors_start:
1130 1131 1132 1133 1134 1135 1136 1137 1138 1139
 ARM(	swi	SYS_ERROR0	)
 THUMB(	svc	#0		)
 THUMB(	nop			)
	W(b)	vector_und + stubs_offset
	W(ldr)	pc, .LCvswi + stubs_offset
	W(b)	vector_pabt + stubs_offset
	W(b)	vector_dabt + stubs_offset
	W(b)	vector_addrexcptn + stubs_offset
	W(b)	vector_irq + stubs_offset
	W(b)	vector_fiq + stubs_offset
1140 1141 1142

	.globl	__vectors_end
__vectors_end:
L
Linus Torvalds 已提交
1143 1144 1145 1146 1147 1148 1149 1150 1151

	.data

	.globl	cr_alignment
	.globl	cr_no_alignment
cr_alignment:
	.space	4
cr_no_alignment:
	.space	4
1152 1153 1154 1155 1156 1157

#ifdef CONFIG_MULTI_IRQ_HANDLER
	.globl	handle_arch_irq
handle_arch_irq:
	.space	4
#endif