entry-armv.S 28.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 *  linux/arch/arm/kernel/entry-armv.S
 *
 *  Copyright (C) 1996,1997,1998 Russell King.
 *  ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
6
 *  nommu support by Hyok S. Choi (hyok.choi@samsung.com)
L
Linus Torvalds 已提交
7 8 9 10 11 12 13
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 *  Low-level vector interface routines
 *
14 15
 *  Note:  there is a StrongARM bug in the STMIA rn, {regs}^ instruction
 *  that causes it to save wrong values...  Be aware!
L
Linus Torvalds 已提交
16 17
 */

18
#include <asm/memory.h>
19 20
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
L
Linus Torvalds 已提交
21
#include <asm/vfpmacros.h>
22
#include <mach/entry-macro.S>
23
#include <asm/thread_notify.h>
24
#include <asm/unwind.h>
25
#include <asm/unistd.h>
26
#include <asm/tls.h>
L
Linus Torvalds 已提交
27 28

#include "entry-header.S"
29
#include <asm/entry-macro-multi.S>
L
Linus Torvalds 已提交
30

31
/*
32
 * Interrupt handling.
33 34
 */
	.macro	irq_handler
35
#ifdef CONFIG_MULTI_IRQ_HANDLER
36
	ldr	r1, =handle_arch_irq
37
	mov	r0, sp
38
	ldr	r1, [r1]
39
	adr	lr, BSYM(9997f)
40 41
	teq	r1, #0
	movne	pc, r1
42
#endif
43
	arch_irq_handler_default
44
9997:
45 46
	.endm

47
	.macro	pabt_helper
48
	@ PABORT handler takes fault address in r4
49
#ifdef MULTI_PABORT
50
	ldr	ip, .LCprocfns
51
	mov	lr, pc
52
	ldr	pc, [ip, #PROCESSOR_PABT_FUNC]
53 54 55 56 57 58
#else
	bl	CPU_PABORT_HANDLER
#endif
	.endm

	.macro	dabt_helper
59 60
	mov	r2, r4
	mov	r3, r5
61 62 63 64 65 66 67 68 69 70 71

	@
	@ Call the processor-specific abort handler:
	@
	@  r2 - aborted context pc
	@  r3 - aborted context cpsr
	@
	@ The abort handler must return the aborted address in r0, and
	@ the fault status register in r1.  r9 must be preserved.
	@
#ifdef MULTI_DABORT
72
	ldr	ip, .LCprocfns
73
	mov	lr, pc
74
	ldr	pc, [ip, #PROCESSOR_DABT_FUNC]
75 76 77 78 79
#else
	bl	CPU_DABORT_HANDLER
#endif
	.endm

80 81 82 83 84 85
#ifdef CONFIG_KPROBES
	.section	.kprobes.text,"ax",%progbits
#else
	.text
#endif

L
Linus Torvalds 已提交
86 87 88
/*
 * Invalid mode handlers
 */
R
Russell King 已提交
89 90
	.macro	inv_entry, reason
	sub	sp, sp, #S_FRAME_SIZE
91 92 93 94
 ARM(	stmib	sp, {r1 - lr}		)
 THUMB(	stmia	sp, {r0 - r12}		)
 THUMB(	str	sp, [sp, #S_SP]		)
 THUMB(	str	lr, [sp, #S_LR]		)
L
Linus Torvalds 已提交
95 96 97 98
	mov	r1, #\reason
	.endm

__pabt_invalid:
R
Russell King 已提交
99 100
	inv_entry BAD_PREFETCH
	b	common_invalid
101
ENDPROC(__pabt_invalid)
L
Linus Torvalds 已提交
102 103

__dabt_invalid:
R
Russell King 已提交
104 105
	inv_entry BAD_DATA
	b	common_invalid
106
ENDPROC(__dabt_invalid)
L
Linus Torvalds 已提交
107 108

__irq_invalid:
R
Russell King 已提交
109 110
	inv_entry BAD_IRQ
	b	common_invalid
111
ENDPROC(__irq_invalid)
L
Linus Torvalds 已提交
112 113

__und_invalid:
R
Russell King 已提交
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
	inv_entry BAD_UNDEFINSTR

	@
	@ XXX fall through to common_invalid
	@

@
@ common_invalid - generic code for failed exception (re-entrant version of handlers)
@
common_invalid:
	zero_fp

	ldmia	r0, {r4 - r6}
	add	r0, sp, #S_PC		@ here for interlock avoidance
	mov	r7, #-1			@  ""   ""    ""        ""
	str	r4, [sp]		@ save preserved r0
	stmia	r0, {r5 - r7}		@ lr_<exception>,
					@ cpsr_<exception>, "old_r0"
L
Linus Torvalds 已提交
132 133 134

	mov	r0, sp
	b	bad_mode
135
ENDPROC(__und_invalid)
L
Linus Torvalds 已提交
136 137 138 139

/*
 * SVC mode handlers
 */
140 141 142 143 144 145 146

#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
#define SPFIX(code...) code
#else
#define SPFIX(code...)
#endif

147
	.macro	svc_entry, stack_hole=0
148 149
 UNWIND(.fnstart		)
 UNWIND(.save {r0 - pc}		)
150 151 152 153 154 155 156
	sub	sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
#ifdef CONFIG_THUMB2_KERNEL
 SPFIX(	str	r0, [sp]	)	@ temporarily saved
 SPFIX(	mov	r0, sp		)
 SPFIX(	tst	r0, #4		)	@ test original stack alignment
 SPFIX(	ldr	r0, [sp]	)	@ restored
#else
157
 SPFIX(	tst	sp, #4		)
158 159 160
#endif
 SPFIX(	subeq	sp, sp, #4	)
	stmia	sp, {r1 - r12}
R
Russell King 已提交
161

162 163 164 165 166 167
	ldmia	r0, {r3 - r5}
	add	r7, sp, #S_SP - 4	@ here for interlock avoidance
	mov	r6, #-1			@  ""  ""      ""       ""
	add	r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
 SPFIX(	addeq	r2, r2, #4	)
	str	r3, [sp, #-4]!		@ save the "real" r0 copied
R
Russell King 已提交
168 169
					@ from the exception stack

170
	mov	r3, lr
L
Linus Torvalds 已提交
171 172 173 174

	@
	@ We are now ready to fill in the remaining blanks on the stack:
	@
175 176 177 178 179
	@  r2 - sp_svc
	@  r3 - lr_svc
	@  r4 - lr_<exception>, already fixed up for correct return/restart
	@  r5 - spsr_<exception>
	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
L
Linus Torvalds 已提交
180
	@
181
	stmia	r7, {r2 - r6}
L
Linus Torvalds 已提交
182

183 184 185
#ifdef CONFIG_TRACE_IRQFLAGS
	bl	trace_hardirqs_off
#endif
186
	.endm
L
Linus Torvalds 已提交
187

188 189 190
	.align	5
__dabt_svc:
	svc_entry
191
	dabt_helper
L
Linus Torvalds 已提交
192 193

	@
194
	@ call main handler
L
Linus Torvalds 已提交
195 196 197 198 199 200 201
	@
	mov	r2, sp
	bl	do_DataAbort

	@
	@ IRQs off again before pulling preserved data off the stack
	@
202
	disable_irq_notrace
L
Linus Torvalds 已提交
203 204 205 206

	@
	@ restore SPSR and restart the instruction
	@
207
	ldr	r5, [sp, #S_PSR]
208 209 210 211 212 213
#ifdef CONFIG_TRACE_IRQFLAGS
	tst	r5, #PSR_I_BIT
	bleq	trace_hardirqs_on
	tst	r5, #PSR_I_BIT
	blne	trace_hardirqs_off
#endif
214
	svc_exit r5				@ return from exception
215
 UNWIND(.fnend		)
216
ENDPROC(__dabt_svc)
L
Linus Torvalds 已提交
217 218 219

	.align	5
__irq_svc:
R
Russell King 已提交
220
	svc_entry
221
	irq_handler
222

L
Linus Torvalds 已提交
223
#ifdef CONFIG_PREEMPT
224 225
	get_thread_info tsk
	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
226
	ldr	r0, [tsk, #TI_FLAGS]		@ get flags
R
Russell King 已提交
227 228
	teq	r8, #0				@ if preempt count != 0
	movne	r0, #0				@ force flags to 0
L
Linus Torvalds 已提交
229 230 231
	tst	r0, #_TIF_NEED_RESCHED
	blne	svc_preempt
#endif
232
	ldr	r5, [sp, #S_PSR]
R
Russell King 已提交
233
#ifdef CONFIG_TRACE_IRQFLAGS
234 235 236
	@ The parent context IRQs must have been enabled to get here in
	@ the first place, so there's no point checking the PSR I bit.
	bl	trace_hardirqs_on
R
Russell King 已提交
237
#endif
238
	svc_exit r5				@ return from exception
239
 UNWIND(.fnend		)
240
ENDPROC(__irq_svc)
L
Linus Torvalds 已提交
241 242 243 244 245

	.ltorg

#ifdef CONFIG_PREEMPT
svc_preempt:
R
Russell King 已提交
246
	mov	r8, lr
L
Linus Torvalds 已提交
247
1:	bl	preempt_schedule_irq		@ irq en/disable is done inside
248
	ldr	r0, [tsk, #TI_FLAGS]		@ get new tasks TI_FLAGS
L
Linus Torvalds 已提交
249
	tst	r0, #_TIF_NEED_RESCHED
R
Russell King 已提交
250
	moveq	pc, r8				@ go again
L
Linus Torvalds 已提交
251 252 253 254 255
	b	1b
#endif

	.align	5
__und_svc:
256 257 258 259 260 261
#ifdef CONFIG_KPROBES
	@ If a kprobe is about to simulate a "stmdb sp..." instruction,
	@ it obviously needs free stack space which then will belong to
	@ the saved context.
	svc_entry 64
#else
R
Russell King 已提交
262
	svc_entry
263
#endif
L
Linus Torvalds 已提交
264 265 266 267 268 269 270
	@
	@ call emulation code, which returns using r9 if it has emulated
	@ the instruction, or the more conventional lr if we are to treat
	@ this as a real undefined instruction
	@
	@  r0 - instruction
	@
271
#ifndef	CONFIG_THUMB2_KERNEL
272
	ldr	r0, [r4, #-4]
273
#else
274
	ldrh	r0, [r4, #-2]			@ Thumb instruction at LR - 2
275 276
	and	r9, r0, #0xf800
	cmp	r9, #0xe800			@ 32-bit instruction if xx >= 0
277
	ldrhhs	r9, [r4]			@ bottom 16 bits
278 279
	orrhs	r0, r9, r0, lsl #16
#endif
280
	adr	r9, BSYM(1f)
281
	mov	r2, r4
L
Linus Torvalds 已提交
282 283 284 285 286 287 288 289
	bl	call_fpe

	mov	r0, sp				@ struct pt_regs *regs
	bl	do_undefinstr

	@
	@ IRQs off again before pulling preserved data off the stack
	@
290
1:	disable_irq_notrace
L
Linus Torvalds 已提交
291 292 293 294

	@
	@ restore SPSR and restart the instruction
	@
295
	ldr	r5, [sp, #S_PSR]		@ Get SVC cpsr
296 297 298 299 300 301
#ifdef CONFIG_TRACE_IRQFLAGS
	tst	r5, #PSR_I_BIT
	bleq	trace_hardirqs_on
	tst	r5, #PSR_I_BIT
	blne	trace_hardirqs_off
#endif
302
	svc_exit r5				@ return from exception
303
 UNWIND(.fnend		)
304
ENDPROC(__und_svc)
L
Linus Torvalds 已提交
305 306 307

	.align	5
__pabt_svc:
R
Russell King 已提交
308
	svc_entry
309
	pabt_helper
310
	mov	r2, sp				@ regs
L
Linus Torvalds 已提交
311 312 313 314 315
	bl	do_PrefetchAbort		@ call abort handler

	@
	@ IRQs off again before pulling preserved data off the stack
	@
316
	disable_irq_notrace
L
Linus Torvalds 已提交
317 318 319 320

	@
	@ restore SPSR and restart the instruction
	@
321
	ldr	r5, [sp, #S_PSR]
322 323 324 325 326 327
#ifdef CONFIG_TRACE_IRQFLAGS
	tst	r5, #PSR_I_BIT
	bleq	trace_hardirqs_on
	tst	r5, #PSR_I_BIT
	blne	trace_hardirqs_off
#endif
328
	svc_exit r5				@ return from exception
329
 UNWIND(.fnend		)
330
ENDPROC(__pabt_svc)
L
Linus Torvalds 已提交
331 332

	.align	5
333 334
.LCcralign:
	.word	cr_alignment
P
Paul Brook 已提交
335
#ifdef MULTI_DABORT
L
Linus Torvalds 已提交
336 337 338 339 340 341 342 343
.LCprocfns:
	.word	processor
#endif
.LCfp:
	.word	fp_enter

/*
 * User mode handlers
344 345
 *
 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
L
Linus Torvalds 已提交
346
 */
347 348 349 350 351

#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
#error "sizeof(struct pt_regs) must be a multiple of 8"
#endif

R
Russell King 已提交
352
	.macro	usr_entry
353 354
 UNWIND(.fnstart	)
 UNWIND(.cantunwind	)	@ don't unwind the user space
R
Russell King 已提交
355
	sub	sp, sp, #S_FRAME_SIZE
356 357
 ARM(	stmib	sp, {r1 - r12}	)
 THUMB(	stmia	sp, {r0 - r12}	)
R
Russell King 已提交
358

359
	ldmia	r0, {r3 - r5}
R
Russell King 已提交
360
	add	r0, sp, #S_PC		@ here for interlock avoidance
361
	mov	r6, #-1			@  ""  ""     ""        ""
R
Russell King 已提交
362

363
	str	r3, [sp]		@ save the "real" r0 copied
R
Russell King 已提交
364
					@ from the exception stack
L
Linus Torvalds 已提交
365 366 367 368

	@
	@ We are now ready to fill in the remaining blanks on the stack:
	@
369 370 371
	@  r4 - lr_<exception>, already fixed up for correct return/restart
	@  r5 - spsr_<exception>
	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
L
Linus Torvalds 已提交
372 373 374
	@
	@ Also, separately save sp_usr and lr_usr
	@
375
	stmia	r0, {r4 - r6}
376 377
 ARM(	stmdb	r0, {sp, lr}^			)
 THUMB(	store_user_sp_lr r0, r1, S_SP - S_PC	)
L
Linus Torvalds 已提交
378 379 380 381

	@
	@ Enable the alignment trap while in kernel mode
	@
382
	alignment_trap r0
L
Linus Torvalds 已提交
383 384 385 386 387

	@
	@ Clear FP to mark the first stack frame
	@
	zero_fp
388 389 390 391

#ifdef CONFIG_IRQSOFF_TRACER
	bl	trace_hardirqs_off
#endif
L
Linus Torvalds 已提交
392 393
	.endm

394 395 396 397 398 399 400 401 402
	.macro	kuser_cmpxchg_check
#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
#ifndef CONFIG_MMU
#warning "NPTL on non MMU needs fixing"
#else
	@ Make sure our user space atomic helper is restarted
	@ if it was interrupted in a critical region.  Here we
	@ perform a quick test inline since it should be false
	@ 99.9999% of the time.  The rest is done out of line.
403
	cmp	r4, #TASK_SIZE
404 405 406 407 408
	blhs	kuser_cmpxchg_fixup
#endif
#endif
	.endm

L
Linus Torvalds 已提交
409 410
	.align	5
__dabt_usr:
R
Russell King 已提交
411
	usr_entry
412
	kuser_cmpxchg_check
413
	dabt_helper
L
Linus Torvalds 已提交
414 415

	mov	r2, sp
416
	adr	lr, BSYM(ret_from_exception)
L
Linus Torvalds 已提交
417
	b	do_DataAbort
418
 UNWIND(.fnend		)
419
ENDPROC(__dabt_usr)
L
Linus Torvalds 已提交
420 421 422

	.align	5
__irq_usr:
R
Russell King 已提交
423
	usr_entry
424
	kuser_cmpxchg_check
425
	irq_handler
426
	get_thread_info tsk
L
Linus Torvalds 已提交
427
	mov	why, #0
428
	b	ret_to_user_from_irq
429
 UNWIND(.fnend		)
430
ENDPROC(__irq_usr)
L
Linus Torvalds 已提交
431 432 433 434 435

	.ltorg

	.align	5
__und_usr:
R
Russell King 已提交
436
	usr_entry
437

438 439
	mov	r2, r4
	mov	r3, r5
L
Linus Torvalds 已提交
440 441 442 443 444 445 446 447

	@
	@ fall through to the emulation code, which returns using r9 if
	@ it has emulated the instruction, or the more conventional lr
	@ if we are to treat this as a real undefined instruction
	@
	@  r0 - instruction
	@
448 449
	adr	r9, BSYM(ret_from_exception)
	adr	lr, BSYM(__und_usr_unknown)
450
	tst	r3, #PSR_T_BIT			@ Thumb mode?
451
	itet	eq				@ explicit IT needed for the 1f label
452 453 454
	subeq	r4, r2, #4			@ ARM instr at LR - 4
	subne	r4, r2, #2			@ Thumb instr at LR - 2
1:	ldreqt	r0, [r4]
455 456 457
#ifdef CONFIG_CPU_ENDIAN_BE8
	reveq	r0, r0				@ little endian instruction
#endif
458 459 460
	beq	call_fpe
	@ Thumb instruction
#if __LINUX_ARM_ARCH__ >= 7
461 462 463 464
2:
 ARM(	ldrht	r5, [r4], #2	)
 THUMB(	ldrht	r5, [r4]	)
 THUMB(	add	r4, r4, #2	)
465 466 467 468 469 470 471 472 473
	and	r0, r5, #0xf800			@ mask bits 111x x... .... ....
	cmp	r0, #0xe800			@ 32bit instruction if xx != 0
	blo	__und_usr_unknown
3:	ldrht	r0, [r4]
	add	r2, r2, #2			@ r2 is PC + 2, make it PC + 4
	orr	r0, r0, r5, lsl #16
#else
	b	__und_usr_unknown
#endif
474
 UNWIND(.fnend		)
475
ENDPROC(__und_usr)
476

L
Linus Torvalds 已提交
477 478 479 480 481 482 483
	@
	@ fallthrough to call_fpe
	@

/*
 * The out of line fixup for the ldrt above.
 */
484
	.pushsection .fixup, "ax"
485
4:	mov	pc, r9
486 487
	.popsection
	.pushsection __ex_table,"a"
488 489 490 491 492
	.long	1b, 4b
#if __LINUX_ARM_ARCH__ >= 7
	.long	2b, 4b
	.long	3b, 4b
#endif
493
	.popsection
L
Linus Torvalds 已提交
494 495 496 497 498 499 500 501 502 503 504

/*
 * Check whether the instruction is a co-processor instruction.
 * If yes, we need to call the relevant co-processor handler.
 *
 * Note that we don't do a full check here for the co-processor
 * instructions; all instructions with bit 27 set are well
 * defined.  The only instructions that should fault are the
 * co-processor instructions.  However, we have to watch out
 * for the ARM6/ARM7 SWI bug.
 *
505 506 507 508 509 510 511
 * NEON is a special case that has to be handled here. Not all
 * NEON instructions are co-processor instructions, so we have
 * to make a special case of checking for them. Plus, there's
 * five groups of them, so we have a table of mask/opcode pairs
 * to check against, and if any match then we branch off into the
 * NEON handler code.
 *
L
Linus Torvalds 已提交
512 513 514
 * Emulators may wish to make use of the following registers:
 *  r0  = instruction opcode.
 *  r2  = PC+4
515
 *  r9  = normal "successful" return address
L
Linus Torvalds 已提交
516
 *  r10 = this threads thread_info structure.
517
 *  lr  = unrecognised instruction return address
L
Linus Torvalds 已提交
518
 */
519 520 521 522 523 524 525
	@
	@ Fall-through from Thumb-2 __und_usr
	@
#ifdef CONFIG_NEON
	adr	r6, .LCneon_thumb_opcodes
	b	2f
#endif
L
Linus Torvalds 已提交
526
call_fpe:
527
#ifdef CONFIG_NEON
528
	adr	r6, .LCneon_arm_opcodes
529 530 531 532 533 534 535 536 537 538 539 540 541 542 543
2:
	ldr	r7, [r6], #4			@ mask value
	cmp	r7, #0				@ end mask?
	beq	1f
	and	r8, r0, r7
	ldr	r7, [r6], #4			@ opcode bits matching in mask
	cmp	r8, r7				@ NEON instruction?
	bne	2b
	get_thread_info r10
	mov	r7, #1
	strb	r7, [r10, #TI_USED_CP + 10]	@ mark CP#10 as used
	strb	r7, [r10, #TI_USED_CP + 11]	@ mark CP#11 as used
	b	do_vfp				@ let VFP handler handle this
1:
#endif
L
Linus Torvalds 已提交
544
	tst	r0, #0x08000000			@ only CDP/CPRT/LDC/STC have bit 27
545
	tstne	r0, #0x04000000			@ bit 26 set on both ARM and Thumb-2
L
Linus Torvalds 已提交
546 547 548 549 550 551 552
#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
	and	r8, r0, #0x0f000000		@ mask out op-code bits
	teqne	r8, #0x0f000000			@ SWI (ARM6/7 bug)?
#endif
	moveq	pc, lr
	get_thread_info r10			@ get current thread
	and	r8, r0, #0x00000f00		@ mask out CP number
553
 THUMB(	lsr	r8, r8, #8		)
L
Linus Torvalds 已提交
554 555
	mov	r7, #1
	add	r6, r10, #TI_USED_CP
556 557
 ARM(	strb	r7, [r6, r8, lsr #8]	)	@ set appropriate used_cp[]
 THUMB(	strb	r7, [r6, r8]		)	@ set appropriate used_cp[]
L
Linus Torvalds 已提交
558 559 560 561 562 563 564
#ifdef CONFIG_IWMMXT
	@ Test if we need to give access to iWMMXt coprocessors
	ldr	r5, [r10, #TI_FLAGS]
	rsbs	r7, r8, #(1 << 8)		@ CP 0 or 1 only
	movcss	r7, r5, lsr #(TIF_USING_IWMMXT + 1)
	bcs	iwmmxt_task_enable
#endif
565 566 567 568 569
 ARM(	add	pc, pc, r8, lsr #6	)
 THUMB(	lsl	r8, r8, #2		)
 THUMB(	add	pc, r8			)
	nop

570
	movw_pc	lr				@ CP#0
571 572
	W(b)	do_fpe				@ CP#1 (FPE)
	W(b)	do_fpe				@ CP#2 (FPE)
573
	movw_pc	lr				@ CP#3
574 575 576 577 578
#ifdef CONFIG_CRUNCH
	b	crunch_task_enable		@ CP#4 (MaverickCrunch)
	b	crunch_task_enable		@ CP#5 (MaverickCrunch)
	b	crunch_task_enable		@ CP#6 (MaverickCrunch)
#else
579 580 581
	movw_pc	lr				@ CP#4
	movw_pc	lr				@ CP#5
	movw_pc	lr				@ CP#6
582
#endif
583 584 585
	movw_pc	lr				@ CP#7
	movw_pc	lr				@ CP#8
	movw_pc	lr				@ CP#9
L
Linus Torvalds 已提交
586
#ifdef CONFIG_VFP
587 588
	W(b)	do_vfp				@ CP#10 (VFP)
	W(b)	do_vfp				@ CP#11 (VFP)
L
Linus Torvalds 已提交
589
#else
590 591
	movw_pc	lr				@ CP#10 (VFP)
	movw_pc	lr				@ CP#11 (VFP)
L
Linus Torvalds 已提交
592
#endif
593 594 595 596
	movw_pc	lr				@ CP#12
	movw_pc	lr				@ CP#13
	movw_pc	lr				@ CP#14 (Debug)
	movw_pc	lr				@ CP#15 (Control)
L
Linus Torvalds 已提交
597

598 599 600
#ifdef CONFIG_NEON
	.align	6

601
.LCneon_arm_opcodes:
602 603 604 605 606 607
	.word	0xfe000000			@ mask
	.word	0xf2000000			@ opcode

	.word	0xff100000			@ mask
	.word	0xf4000000			@ opcode

608 609 610 611 612 613 614 615 616 617
	.word	0x00000000			@ mask
	.word	0x00000000			@ opcode

.LCneon_thumb_opcodes:
	.word	0xef000000			@ mask
	.word	0xef000000			@ opcode

	.word	0xff100000			@ mask
	.word	0xf9000000			@ opcode

618 619 620 621
	.word	0x00000000			@ mask
	.word	0x00000000			@ opcode
#endif

L
Linus Torvalds 已提交
622
do_fpe:
623
	enable_irq
L
Linus Torvalds 已提交
624 625 626 627 628 629 630 631 632 633 634 635 636
	ldr	r4, .LCfp
	add	r10, r10, #TI_FPSTATE		@ r10 = workspace
	ldr	pc, [r4]			@ Call FP module USR entry point

/*
 * The FP module is called with these registers set:
 *  r0  = instruction
 *  r2  = PC+4
 *  r9  = normal "successful" return address
 *  r10 = FP workspace
 *  lr  = unrecognised FP instruction return address
 */

637
	.pushsection .data
L
Linus Torvalds 已提交
638
ENTRY(fp_enter)
639
	.word	no_fp
640
	.popsection
L
Linus Torvalds 已提交
641

642 643 644
ENTRY(no_fp)
	mov	pc, lr
ENDPROC(no_fp)
645 646

__und_usr_unknown:
647
	enable_irq
L
Linus Torvalds 已提交
648
	mov	r0, sp
649
	adr	lr, BSYM(ret_from_exception)
L
Linus Torvalds 已提交
650
	b	do_undefinstr
651
ENDPROC(__und_usr_unknown)
L
Linus Torvalds 已提交
652 653 654

	.align	5
__pabt_usr:
R
Russell King 已提交
655
	usr_entry
656
	pabt_helper
657
	mov	r2, sp				@ regs
L
Linus Torvalds 已提交
658
	bl	do_PrefetchAbort		@ call abort handler
659
 UNWIND(.fnend		)
L
Linus Torvalds 已提交
660 661 662 663 664
	/* fall through */
/*
 * This is the return code to user mode for abort handlers
 */
ENTRY(ret_from_exception)
665 666
 UNWIND(.fnstart	)
 UNWIND(.cantunwind	)
L
Linus Torvalds 已提交
667 668 669
	get_thread_info tsk
	mov	why, #0
	b	ret_to_user
670
 UNWIND(.fnend		)
671 672
ENDPROC(__pabt_usr)
ENDPROC(ret_from_exception)
L
Linus Torvalds 已提交
673 674 675 676 677 678 679

/*
 * Register switch for ARMv3 and ARMv4 processors
 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
 * previous and next are guaranteed not to be the same.
 */
ENTRY(__switch_to)
680 681
 UNWIND(.fnstart	)
 UNWIND(.cantunwind	)
L
Linus Torvalds 已提交
682 683
	add	ip, r1, #TI_CPU_SAVE
	ldr	r3, [r2, #TI_TP_VALUE]
684 685 686 687
 ARM(	stmia	ip!, {r4 - sl, fp, sp, lr} )	@ Store most regs on stack
 THUMB(	stmia	ip!, {r4 - sl, fp}	   )	@ Store most regs on stack
 THUMB(	str	sp, [ip], #4		   )
 THUMB(	str	lr, [ip], #4		   )
688
#ifdef CONFIG_CPU_USE_DOMAINS
689
	ldr	r6, [r2, #TI_CPU_DOMAIN]
690
#endif
691
	set_tls	r3, r4, r5
692 693 694 695 696
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
	ldr	r7, [r2, #TI_TASK]
	ldr	r8, =__stack_chk_guard
	ldr	r7, [r7, #TSK_STACK_CANARY]
#endif
697
#ifdef CONFIG_CPU_USE_DOMAINS
L
Linus Torvalds 已提交
698 699
	mcr	p15, 0, r6, c3, c0, 0		@ Set domain register
#endif
700 701 702 703 704
	mov	r5, r0
	add	r4, r2, #TI_CPU_SAVE
	ldr	r0, =thread_notify_head
	mov	r1, #THREAD_NOTIFY_SWITCH
	bl	atomic_notifier_call_chain
705 706 707
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
	str	r7, [r8]
#endif
708
 THUMB(	mov	ip, r4			   )
709
	mov	r0, r5
710 711 712 713
 ARM(	ldmia	r4, {r4 - sl, fp, sp, pc}  )	@ Load all regs saved previously
 THUMB(	ldmia	ip!, {r4 - sl, fp}	   )	@ Load all regs saved previously
 THUMB(	ldr	sp, [ip], #4		   )
 THUMB(	ldr	pc, [ip]		   )
714
 UNWIND(.fnend		)
715
ENDPROC(__switch_to)
L
Linus Torvalds 已提交
716 717

	__INIT
718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747

/*
 * User helpers.
 *
 * These are segment of kernel provided user code reachable from user space
 * at a fixed address in kernel memory.  This is used to provide user space
 * with some operations which require kernel help because of unimplemented
 * native feature and/or instructions in many ARM CPUs. The idea is for
 * this code to be executed directly in user mode for best efficiency but
 * which is too intimate with the kernel counter part to be left to user
 * libraries.  In fact this code might even differ from one CPU to another
 * depending on the available  instruction set and restrictions like on
 * SMP systems.  In other words, the kernel reserves the right to change
 * this code as needed without warning. Only the entry points and their
 * results are guaranteed to be stable.
 *
 * Each segment is 32-byte aligned and will be moved to the top of the high
 * vector page.  New segments (if ever needed) must be added in front of
 * existing ones.  This mechanism should be used only for things that are
 * really small and justified, and not be abused freely.
 *
 * User space is expected to implement those things inline when optimizing
 * for a processor that has the necessary native support, but only if such
 * resulting binaries are already to be incompatible with earlier ARM
 * processors due to the use of unsupported instructions other than what
 * is provided here.  In other words don't make binaries unable to run on
 * earlier processors just for the sake of not using these kernel helpers
 * if your compiled code is not going to use the new instructions for other
 * purpose.
 */
748
 THUMB(	.arm	)
749

750 751 752 753 754 755 756 757
	.macro	usr_ret, reg
#ifdef CONFIG_ARM_THUMB
	bx	\reg
#else
	mov	pc, \reg
#endif
	.endm

758 759 760 761
	.align	5
	.globl	__kuser_helper_start
__kuser_helper_start:

762 763 764 765 766 767 768 769 770 771 772 773 774 775 776
/*
 * Reference prototype:
 *
 *	void __kernel_memory_barrier(void)
 *
 * Input:
 *
 *	lr = return address
 *
 * Output:
 *
 *	none
 *
 * Clobbered:
 *
777
 *	none
778 779 780 781 782 783 784 785 786 787 788 789 790
 *
 * Definition and user space usage example:
 *
 *	typedef void (__kernel_dmb_t)(void);
 *	#define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
 *
 * Apply any needed memory barrier to preserve consistency with data modified
 * manually and __kuser_cmpxchg usage.
 *
 * This could be used as follows:
 *
 * #define __kernel_dmb() \
 *         asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
791
 *	        : : : "r0", "lr","cc" )
792 793 794
 */

__kuser_memory_barrier:				@ 0xffff0fa0
795
	smp_dmb	arm
796
	usr_ret	lr
797 798 799

	.align	5

800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830
/*
 * Reference prototype:
 *
 *	int __kernel_cmpxchg(int oldval, int newval, int *ptr)
 *
 * Input:
 *
 *	r0 = oldval
 *	r1 = newval
 *	r2 = ptr
 *	lr = return address
 *
 * Output:
 *
 *	r0 = returned value (zero or non-zero)
 *	C flag = set if r0 == 0, clear if r0 != 0
 *
 * Clobbered:
 *
 *	r3, ip, flags
 *
 * Definition and user space usage example:
 *
 *	typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
 *	#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
 *
 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
 * Return zero if *ptr was changed or non-zero if no exchange happened.
 * The C flag is also set if *ptr was changed to allow for assembly
 * optimization in the calling code.
 *
831 832 833 834
 * Notes:
 *
 *    - This routine already includes memory barriers as needed.
 *
835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855
 * For example, a user space atomic_add implementation could look like this:
 *
 * #define atomic_add(ptr, val) \
 *	({ register unsigned int *__ptr asm("r2") = (ptr); \
 *	   register unsigned int __result asm("r1"); \
 *	   asm volatile ( \
 *	       "1: @ atomic_add\n\t" \
 *	       "ldr	r0, [r2]\n\t" \
 *	       "mov	r3, #0xffff0fff\n\t" \
 *	       "add	lr, pc, #4\n\t" \
 *	       "add	r1, r0, %2\n\t" \
 *	       "add	pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
 *	       "bcc	1b" \
 *	       : "=&r" (__result) \
 *	       : "r" (__ptr), "rIL" (val) \
 *	       : "r0","r3","ip","lr","cc","memory" ); \
 *	   __result; })
 */

__kuser_cmpxchg:				@ 0xffff0fc0

856
#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
857

858 859 860 861 862
	/*
	 * Poor you.  No fast solution possible...
	 * The kernel itself must perform the operation.
	 * A special ghost syscall is used for that (see traps.c).
	 */
863
	stmfd	sp!, {r7, lr}
864
	ldr	r7, 1f			@ it's 20 bits
865
	swi	__ARM_NR_cmpxchg
866
	ldmfd	sp!, {r7, pc}
867
1:	.word	__ARM_NR_cmpxchg
868 869

#elif __LINUX_ARM_ARCH__ < 6
870

871 872
#ifdef CONFIG_MMU

873
	/*
874 875 876 877 878 879 880
	 * The only thing that can break atomicity in this cmpxchg
	 * implementation is either an IRQ or a data abort exception
	 * causing another process/thread to be scheduled in the middle
	 * of the critical sequence.  To prevent this, code is added to
	 * the IRQ and data abort exception handlers to set the pc back
	 * to the beginning of the critical section if it is found to be
	 * within that critical section (see kuser_cmpxchg_fixup).
881
	 */
882 883 884 885 886 887 888 889 890
1:	ldr	r3, [r2]			@ load current val
	subs	r3, r3, r0			@ compare with oldval
2:	streq	r1, [r2]			@ store newval if eq
	rsbs	r0, r3, #0			@ set return val and C flag
	usr_ret	lr

	.text
kuser_cmpxchg_fixup:
	@ Called from kuser_cmpxchg_check macro.
891
	@ r4 = address of interrupted insn (must be preserved).
892 893
	@ sp = saved regs. r7 and r8 are clobbered.
	@ 1b = first critical insn, 2b = last critical insn.
894
	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
895 896
	mov	r7, #0xffff0fff
	sub	r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
897
	subs	r8, r4, r7
898 899 900 901 902
	rsbcss	r8, r8, #(2b - 1b)
	strcs	r7, [sp, #S_PC]
	mov	pc, lr
	.previous

903 904 905 906
#else
#warning "NPTL on non MMU needs fixing"
	mov	r0, #-1
	adds	r0, r0, #0
907
	usr_ret	lr
908
#endif
909 910 911

#else

912
	smp_dmb	arm
913
1:	ldrex	r3, [r2]
914 915
	subs	r3, r3, r0
	strexeq	r3, r1, [r2]
916 917
	teqeq	r3, #1
	beq	1b
918
	rsbs	r0, r3, #0
919
	/* beware -- each __kuser slot must be 8 instructions max */
920 921
	ALT_SMP(b	__kuser_memory_barrier)
	ALT_UP(usr_ret	lr)
922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941

#endif

	.align	5

/*
 * Reference prototype:
 *
 *	int __kernel_get_tls(void)
 *
 * Input:
 *
 *	lr = return address
 *
 * Output:
 *
 *	r0 = TLS value
 *
 * Clobbered:
 *
942
 *	none
943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960
 *
 * Definition and user space usage example:
 *
 *	typedef int (__kernel_get_tls_t)(void);
 *	#define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
 *
 * Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
 *
 * This could be used as follows:
 *
 * #define __kernel_get_tls() \
 *	({ register unsigned int __val asm("r0"); \
 *         asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
 *	        : "=r" (__val) : : "lr","cc" ); \
 *	   __val; })
 */

__kuser_get_tls:				@ 0xffff0fe0
961
	ldr	r0, [pc, #(16 - 8)]	@ read TLS, set in kuser_get_tls_init
962
	usr_ret	lr
963 964 965 966
	mrc	p15, 0, r0, c13, c0, 3	@ 0xffff0fe8 hardware TLS code
	.rep	4
	.word	0			@ 0xffff0ff0 software TLS value, then
	.endr				@ pad up to __kuser_helper_version
967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986

/*
 * Reference declaration:
 *
 *	extern unsigned int __kernel_helper_version;
 *
 * Definition and user space usage example:
 *
 *	#define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
 *
 * User space may read this to determine the curent number of helpers
 * available.
 */

__kuser_helper_version:				@ 0xffff0ffc
	.word	((__kuser_helper_end - __kuser_helper_start) >> 5)

	.globl	__kuser_helper_end
__kuser_helper_end:

987
 THUMB(	.thumb	)
988

L
Linus Torvalds 已提交
989 990 991
/*
 * Vector stubs.
 *
992 993 994
 * This code is copied to 0xffff0200 so we can use branches in the
 * vectors, rather than ldr's.  Note that this code must not
 * exceed 0x300 bytes.
L
Linus Torvalds 已提交
995 996 997
 *
 * Common stub entry macro:
 *   Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
R
Russell King 已提交
998 999 1000
 *
 * SP points to a minimal amount of processor-private memory, the address
 * of which is copied into r0 for the mode specific abort handler.
L
Linus Torvalds 已提交
1001
 */
1002
	.macro	vector_stub, name, mode, correction=0
L
Linus Torvalds 已提交
1003 1004 1005 1006 1007 1008
	.align	5

vector_\name:
	.if \correction
	sub	lr, lr, #\correction
	.endif
R
Russell King 已提交
1009 1010 1011 1012 1013 1014

	@
	@ Save r0, lr_<exception> (parent PC) and spsr_<exception>
	@ (parent CPSR)
	@
	stmia	sp, {r0, lr}		@ save r0, lr
L
Linus Torvalds 已提交
1015
	mrs	lr, spsr
R
Russell King 已提交
1016 1017
	str	lr, [sp, #8]		@ save spsr

L
Linus Torvalds 已提交
1018
	@
R
Russell King 已提交
1019
	@ Prepare for SVC32 mode.  IRQs remain disabled.
L
Linus Torvalds 已提交
1020
	@
R
Russell King 已提交
1021
	mrs	r0, cpsr
1022
	eor	r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
R
Russell King 已提交
1023
	msr	spsr_cxsf, r0
L
Linus Torvalds 已提交
1024

R
Russell King 已提交
1025 1026 1027 1028
	@
	@ the branch table must immediately follow this code
	@
	and	lr, lr, #0x0f
1029 1030
 THUMB(	adr	r0, 1f			)
 THUMB(	ldr	lr, [r0, lr, lsl #2]	)
1031
	mov	r0, sp
1032
 ARM(	ldr	lr, [pc, lr, lsl #2]	)
R
Russell King 已提交
1033
	movs	pc, lr			@ branch to handler in SVC mode
1034
ENDPROC(vector_\name)
1035 1036 1037 1038

	.align	2
	@ handler addresses follow this label
1:
L
Linus Torvalds 已提交
1039 1040
	.endm

1041
	.globl	__stubs_start
L
Linus Torvalds 已提交
1042 1043 1044 1045
__stubs_start:
/*
 * Interrupt dispatcher
 */
1046
	vector_stub	irq, IRQ_MODE, 4
L
Linus Torvalds 已提交
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068

	.long	__irq_usr			@  0  (USR_26 / USR_32)
	.long	__irq_invalid			@  1  (FIQ_26 / FIQ_32)
	.long	__irq_invalid			@  2  (IRQ_26 / IRQ_32)
	.long	__irq_svc			@  3  (SVC_26 / SVC_32)
	.long	__irq_invalid			@  4
	.long	__irq_invalid			@  5
	.long	__irq_invalid			@  6
	.long	__irq_invalid			@  7
	.long	__irq_invalid			@  8
	.long	__irq_invalid			@  9
	.long	__irq_invalid			@  a
	.long	__irq_invalid			@  b
	.long	__irq_invalid			@  c
	.long	__irq_invalid			@  d
	.long	__irq_invalid			@  e
	.long	__irq_invalid			@  f

/*
 * Data abort dispatcher
 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
 */
1069
	vector_stub	dabt, ABT_MODE, 8
L
Linus Torvalds 已提交
1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091

	.long	__dabt_usr			@  0  (USR_26 / USR_32)
	.long	__dabt_invalid			@  1  (FIQ_26 / FIQ_32)
	.long	__dabt_invalid			@  2  (IRQ_26 / IRQ_32)
	.long	__dabt_svc			@  3  (SVC_26 / SVC_32)
	.long	__dabt_invalid			@  4
	.long	__dabt_invalid			@  5
	.long	__dabt_invalid			@  6
	.long	__dabt_invalid			@  7
	.long	__dabt_invalid			@  8
	.long	__dabt_invalid			@  9
	.long	__dabt_invalid			@  a
	.long	__dabt_invalid			@  b
	.long	__dabt_invalid			@  c
	.long	__dabt_invalid			@  d
	.long	__dabt_invalid			@  e
	.long	__dabt_invalid			@  f

/*
 * Prefetch abort dispatcher
 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
 */
1092
	vector_stub	pabt, ABT_MODE, 4
L
Linus Torvalds 已提交
1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114

	.long	__pabt_usr			@  0 (USR_26 / USR_32)
	.long	__pabt_invalid			@  1 (FIQ_26 / FIQ_32)
	.long	__pabt_invalid			@  2 (IRQ_26 / IRQ_32)
	.long	__pabt_svc			@  3 (SVC_26 / SVC_32)
	.long	__pabt_invalid			@  4
	.long	__pabt_invalid			@  5
	.long	__pabt_invalid			@  6
	.long	__pabt_invalid			@  7
	.long	__pabt_invalid			@  8
	.long	__pabt_invalid			@  9
	.long	__pabt_invalid			@  a
	.long	__pabt_invalid			@  b
	.long	__pabt_invalid			@  c
	.long	__pabt_invalid			@  d
	.long	__pabt_invalid			@  e
	.long	__pabt_invalid			@  f

/*
 * Undef instr entry dispatcher
 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
 */
1115
	vector_stub	und, UND_MODE
L
Linus Torvalds 已提交
1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168

	.long	__und_usr			@  0 (USR_26 / USR_32)
	.long	__und_invalid			@  1 (FIQ_26 / FIQ_32)
	.long	__und_invalid			@  2 (IRQ_26 / IRQ_32)
	.long	__und_svc			@  3 (SVC_26 / SVC_32)
	.long	__und_invalid			@  4
	.long	__und_invalid			@  5
	.long	__und_invalid			@  6
	.long	__und_invalid			@  7
	.long	__und_invalid			@  8
	.long	__und_invalid			@  9
	.long	__und_invalid			@  a
	.long	__und_invalid			@  b
	.long	__und_invalid			@  c
	.long	__und_invalid			@  d
	.long	__und_invalid			@  e
	.long	__und_invalid			@  f

	.align	5

/*=============================================================================
 * Undefined FIQs
 *-----------------------------------------------------------------------------
 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
 * Basically to switch modes, we *HAVE* to clobber one register...  brain
 * damage alert!  I don't think that we can execute any code in here in any
 * other mode than FIQ...  Ok you can switch to another mode, but you can't
 * get out of that mode without clobbering one register.
 */
vector_fiq:
	disable_fiq
	subs	pc, lr, #4

/*=============================================================================
 * Address exception handler
 *-----------------------------------------------------------------------------
 * These aren't too critical.
 * (they're not supposed to happen, and won't happen in 32-bit data mode).
 */

vector_addrexcptn:
	b	vector_addrexcptn

/*
 * We group all the following data together to optimise
 * for CPUs with separate I & D caches.
 */
	.align	5

.LCvswi:
	.word	vector_swi

1169
	.globl	__stubs_end
L
Linus Torvalds 已提交
1170 1171
__stubs_end:

1172
	.equ	stubs_offset, __vectors_start + 0x200 - __stubs_start
L
Linus Torvalds 已提交
1173

1174 1175
	.globl	__vectors_start
__vectors_start:
1176 1177 1178 1179 1180 1181 1182 1183 1184 1185
 ARM(	swi	SYS_ERROR0	)
 THUMB(	svc	#0		)
 THUMB(	nop			)
	W(b)	vector_und + stubs_offset
	W(ldr)	pc, .LCvswi + stubs_offset
	W(b)	vector_pabt + stubs_offset
	W(b)	vector_dabt + stubs_offset
	W(b)	vector_addrexcptn + stubs_offset
	W(b)	vector_irq + stubs_offset
	W(b)	vector_fiq + stubs_offset
1186 1187 1188

	.globl	__vectors_end
__vectors_end:
L
Linus Torvalds 已提交
1189 1190 1191 1192 1193 1194 1195 1196 1197

	.data

	.globl	cr_alignment
	.globl	cr_no_alignment
cr_alignment:
	.space	4
cr_no_alignment:
	.space	4
1198 1199 1200 1201 1202 1203

#ifdef CONFIG_MULTI_IRQ_HANDLER
	.globl	handle_arch_irq
handle_arch_irq:
	.space	4
#endif