entry-armv.S 24.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 *  linux/arch/arm/kernel/entry-armv.S
 *
 *  Copyright (C) 1996,1997,1998 Russell King.
 *  ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
6
 *  nommu support by Hyok S. Choi (hyok.choi@samsung.com)
L
Linus Torvalds 已提交
7 8 9 10 11 12 13 14 15 16 17 18
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 *  Low-level vector interface routines
 *
 *  Note:  there is a StrongARM bug in the STMIA rn, {regs}^ instruction that causes
 *  it to save wrong values...  Be aware!
 */
#include <linux/config.h>

19
#include <asm/memory.h>
L
Linus Torvalds 已提交
20 21
#include <asm/glue.h>
#include <asm/vfpmacros.h>
22
#include <asm/arch/entry-macro.S>
L
Linus Torvalds 已提交
23 24 25

#include "entry-header.S"

26 27 28 29 30 31 32 33 34 35 36
/*
 * Interrupt handling.  Preserves r7, r8, r9
 */
	.macro	irq_handler
1:	get_irqnr_and_base r0, r6, r5, lr
	movne	r1, sp
	@
	@ routine called with r0 = irq number, r1 = struct pt_regs *
	@
	adrne	lr, 1b
	bne	asm_do_IRQ
R
Russell King 已提交
37 38 39 40 41 42 43 44 45 46 47 48

#ifdef CONFIG_SMP
	/*
	 * XXX
	 *
	 * this macro assumes that irqstat (r6) and base (r5) are
	 * preserved from get_irqnr_and_base above
	 */
	test_for_ipi r0, r6, r5, lr
	movne	r0, sp
	adrne	lr, 1b
	bne	do_IPI
49 50 51 52 53 54 55

#ifdef CONFIG_LOCAL_TIMERS
	test_for_ltirq r0, r6, r5, lr
	movne	r0, sp
	adrne	lr, 1b
	bne	do_local_timer
#endif
R
Russell King 已提交
56 57
#endif

58 59
	.endm

L
Linus Torvalds 已提交
60 61 62
/*
 * Invalid mode handlers
 */
R
Russell King 已提交
63 64 65
	.macro	inv_entry, reason
	sub	sp, sp, #S_FRAME_SIZE
	stmib	sp, {r1 - lr}
L
Linus Torvalds 已提交
66 67 68 69
	mov	r1, #\reason
	.endm

__pabt_invalid:
R
Russell King 已提交
70 71
	inv_entry BAD_PREFETCH
	b	common_invalid
L
Linus Torvalds 已提交
72 73

__dabt_invalid:
R
Russell King 已提交
74 75
	inv_entry BAD_DATA
	b	common_invalid
L
Linus Torvalds 已提交
76 77

__irq_invalid:
R
Russell King 已提交
78 79
	inv_entry BAD_IRQ
	b	common_invalid
L
Linus Torvalds 已提交
80 81

__und_invalid:
R
Russell King 已提交
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
	inv_entry BAD_UNDEFINSTR

	@
	@ XXX fall through to common_invalid
	@

@
@ common_invalid - generic code for failed exception (re-entrant version of handlers)
@
common_invalid:
	zero_fp

	ldmia	r0, {r4 - r6}
	add	r0, sp, #S_PC		@ here for interlock avoidance
	mov	r7, #-1			@  ""   ""    ""        ""
	str	r4, [sp]		@ save preserved r0
	stmia	r0, {r5 - r7}		@ lr_<exception>,
					@ cpsr_<exception>, "old_r0"
L
Linus Torvalds 已提交
100 101

	mov	r0, sp
R
Russell King 已提交
102
	and	r2, r6, #0x1f
L
Linus Torvalds 已提交
103 104 105 106 107
	b	bad_mode

/*
 * SVC mode handlers
 */
108 109 110 111 112 113 114

#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
#define SPFIX(code...) code
#else
#define SPFIX(code...)
#endif

R
Russell King 已提交
115
	.macro	svc_entry
L
Linus Torvalds 已提交
116
	sub	sp, sp, #S_FRAME_SIZE
117 118
 SPFIX(	tst	sp, #4		)
 SPFIX(	bicne	sp, sp, #4	)
R
Russell King 已提交
119 120 121 122 123 124
	stmib	sp, {r1 - r12}

	ldmia	r0, {r1 - r3}
	add	r5, sp, #S_SP		@ here for interlock avoidance
	mov	r4, #-1			@  ""  ""      ""       ""
	add	r0, sp, #S_FRAME_SIZE   @  ""  ""      ""       ""
125
 SPFIX(	addne	r0, r0, #4	)
R
Russell King 已提交
126 127 128
	str	r1, [sp]		@ save the "real" r0 copied
					@ from the exception stack

L
Linus Torvalds 已提交
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
	mov	r1, lr

	@
	@ We are now ready to fill in the remaining blanks on the stack:
	@
	@  r0 - sp_svc
	@  r1 - lr_svc
	@  r2 - lr_<exception>, already fixed up for correct return/restart
	@  r3 - spsr_<exception>
	@  r4 - orig_r0 (see pt_regs definition in ptrace.h)
	@
	stmia	r5, {r0 - r4}
	.endm

	.align	5
__dabt_svc:
R
Russell King 已提交
145
	svc_entry
L
Linus Torvalds 已提交
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180

	@
	@ get ready to re-enable interrupts if appropriate
	@
	mrs	r9, cpsr
	tst	r3, #PSR_I_BIT
	biceq	r9, r9, #PSR_I_BIT

	@
	@ Call the processor-specific abort handler:
	@
	@  r2 - aborted context pc
	@  r3 - aborted context cpsr
	@
	@ The abort handler must return the aborted address in r0, and
	@ the fault status register in r1.  r9 must be preserved.
	@
#ifdef MULTI_ABORT
	ldr	r4, .LCprocfns
	mov	lr, pc
	ldr	pc, [r4]
#else
	bl	CPU_ABORT_HANDLER
#endif

	@
	@ set desired IRQ state, then call main handler
	@
	msr	cpsr_c, r9
	mov	r2, sp
	bl	do_DataAbort

	@
	@ IRQs off again before pulling preserved data off the stack
	@
181
	disable_irq
L
Linus Torvalds 已提交
182 183 184 185 186 187 188 189 190 191

	@
	@ restore SPSR and restart the instruction
	@
	ldr	r0, [sp, #S_PSR]
	msr	spsr_cxsf, r0
	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr

	.align	5
__irq_svc:
R
Russell King 已提交
192 193
	svc_entry

L
Linus Torvalds 已提交
194
#ifdef CONFIG_PREEMPT
195 196 197 198
	get_thread_info tsk
	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
	add	r7, r8, #1			@ increment it
	str	r7, [tsk, #TI_PREEMPT]
L
Linus Torvalds 已提交
199
#endif
R
Russell King 已提交
200

201
	irq_handler
L
Linus Torvalds 已提交
202
#ifdef CONFIG_PREEMPT
203
	ldr	r0, [tsk, #TI_FLAGS]		@ get flags
L
Linus Torvalds 已提交
204 205 206
	tst	r0, #_TIF_NEED_RESCHED
	blne	svc_preempt
preempt_return:
207 208
	ldr	r0, [tsk, #TI_PREEMPT]		@ read preempt value
	str	r8, [tsk, #TI_PREEMPT]		@ restore preempt count
L
Linus Torvalds 已提交
209 210 211 212 213 214 215 216 217 218 219
	teq	r0, r7
	strne	r0, [r0, -r0]			@ bug()
#endif
	ldr	r0, [sp, #S_PSR]		@ irqs are already disabled
	msr	spsr_cxsf, r0
	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr

	.ltorg

#ifdef CONFIG_PREEMPT
svc_preempt:
220
	teq	r8, #0				@ was preempt count = 0
L
Linus Torvalds 已提交
221 222 223 224 225 226 227
	ldreq	r6, .LCirq_stat
	movne	pc, lr				@ no
	ldr	r0, [r6, #4]			@ local_irq_count
	ldr	r1, [r6, #8]			@ local_bh_count
	adds	r0, r0, r1
	movne	pc, lr
	mov	r7, #0				@ preempt_schedule_irq
228
	str	r7, [tsk, #TI_PREEMPT]		@ expects preempt_count == 0
L
Linus Torvalds 已提交
229
1:	bl	preempt_schedule_irq		@ irq en/disable is done inside
230
	ldr	r0, [tsk, #TI_FLAGS]		@ get new tasks TI_FLAGS
L
Linus Torvalds 已提交
231 232 233 234 235 236 237
	tst	r0, #_TIF_NEED_RESCHED
	beq	preempt_return			@ go again
	b	1b
#endif

	.align	5
__und_svc:
R
Russell King 已提交
238
	svc_entry
L
Linus Torvalds 已提交
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256

	@
	@ call emulation code, which returns using r9 if it has emulated
	@ the instruction, or the more conventional lr if we are to treat
	@ this as a real undefined instruction
	@
	@  r0 - instruction
	@
	ldr	r0, [r2, #-4]
	adr	r9, 1f
	bl	call_fpe

	mov	r0, sp				@ struct pt_regs *regs
	bl	do_undefinstr

	@
	@ IRQs off again before pulling preserved data off the stack
	@
257
1:	disable_irq
L
Linus Torvalds 已提交
258 259 260 261 262 263 264 265 266 267

	@
	@ restore SPSR and restart the instruction
	@
	ldr	lr, [sp, #S_PSR]		@ Get SVC cpsr
	msr	spsr_cxsf, lr
	ldmia	sp, {r0 - pc}^			@ Restore SVC registers

	.align	5
__pabt_svc:
R
Russell King 已提交
268
	svc_entry
L
Linus Torvalds 已提交
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290

	@
	@ re-enable interrupts if appropriate
	@
	mrs	r9, cpsr
	tst	r3, #PSR_I_BIT
	biceq	r9, r9, #PSR_I_BIT
	msr	cpsr_c, r9

	@
	@ set args, then call main handler
	@
	@  r0 - address of faulting instruction
	@  r1 - pointer to registers on stack
	@
	mov	r0, r2				@ address (pc)
	mov	r1, sp				@ regs
	bl	do_PrefetchAbort		@ call abort handler

	@
	@ IRQs off again before pulling preserved data off the stack
	@
291
	disable_irq
L
Linus Torvalds 已提交
292 293 294 295 296 297 298 299 300

	@
	@ restore SPSR and restart the instruction
	@
	ldr	r0, [sp, #S_PSR]
	msr	spsr_cxsf, r0
	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr

	.align	5
301 302
.LCcralign:
	.word	cr_alignment
L
Linus Torvalds 已提交
303 304 305 306 307 308 309 310 311 312 313 314 315
#ifdef MULTI_ABORT
.LCprocfns:
	.word	processor
#endif
.LCfp:
	.word	fp_enter
#ifdef CONFIG_PREEMPT
.LCirq_stat:
	.word	irq_stat
#endif

/*
 * User mode handlers
316 317
 *
 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
L
Linus Torvalds 已提交
318
 */
319 320 321 322 323

#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
#error "sizeof(struct pt_regs) must be a multiple of 8"
#endif

R
Russell King 已提交
324 325 326 327 328 329 330 331 332 333
	.macro	usr_entry
	sub	sp, sp, #S_FRAME_SIZE
	stmib	sp, {r1 - r12}

	ldmia	r0, {r1 - r3}
	add	r0, sp, #S_PC		@ here for interlock avoidance
	mov	r4, #-1			@  ""  ""     ""        ""

	str	r1, [sp]		@ save the "real" r0 copied
					@ from the exception stack
L
Linus Torvalds 已提交
334

335
#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
336
	@ make sure our user space atomic helper is aborted
337
	cmp	r2, #TASK_SIZE
338 339 340
	bichs	r3, r3, #PSR_Z_BIT
#endif

L
Linus Torvalds 已提交
341 342 343 344 345 346 347 348 349
	@
	@ We are now ready to fill in the remaining blanks on the stack:
	@
	@  r2 - lr_<exception>, already fixed up for correct return/restart
	@  r3 - spsr_<exception>
	@  r4 - orig_r0 (see pt_regs definition in ptrace.h)
	@
	@ Also, separately save sp_usr and lr_usr
	@
R
Russell King 已提交
350 351
	stmia	r0, {r2 - r4}
	stmdb	r0, {sp, lr}^
L
Linus Torvalds 已提交
352 353 354 355

	@
	@ Enable the alignment trap while in kernel mode
	@
356
	alignment_trap r0
L
Linus Torvalds 已提交
357 358 359 360 361 362 363 364 365

	@
	@ Clear FP to mark the first stack frame
	@
	zero_fp
	.endm

	.align	5
__dabt_usr:
R
Russell King 已提交
366
	usr_entry
L
Linus Torvalds 已提交
367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387

	@
	@ Call the processor-specific abort handler:
	@
	@  r2 - aborted context pc
	@  r3 - aborted context cpsr
	@
	@ The abort handler must return the aborted address in r0, and
	@ the fault status register in r1.
	@
#ifdef MULTI_ABORT
	ldr	r4, .LCprocfns
	mov	lr, pc
	ldr	pc, [r4]
#else
	bl	CPU_ABORT_HANDLER
#endif

	@
	@ IRQs on, then call the main handler
	@
388
	enable_irq
L
Linus Torvalds 已提交
389 390 391 392 393 394
	mov	r2, sp
	adr	lr, ret_from_exception
	b	do_DataAbort

	.align	5
__irq_usr:
R
Russell King 已提交
395
	usr_entry
L
Linus Torvalds 已提交
396

397
	get_thread_info tsk
L
Linus Torvalds 已提交
398
#ifdef CONFIG_PREEMPT
399 400 401
	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
	add	r7, r8, #1			@ increment it
	str	r7, [tsk, #TI_PREEMPT]
L
Linus Torvalds 已提交
402
#endif
R
Russell King 已提交
403

404
	irq_handler
L
Linus Torvalds 已提交
405
#ifdef CONFIG_PREEMPT
406 407
	ldr	r0, [tsk, #TI_PREEMPT]
	str	r8, [tsk, #TI_PREEMPT]
L
Linus Torvalds 已提交
408 409 410
	teq	r0, r7
	strne	r0, [r0, -r0]
#endif
R
Russell King 已提交
411

L
Linus Torvalds 已提交
412 413 414 415 416 417 418
	mov	why, #0
	b	ret_to_user

	.ltorg

	.align	5
__und_usr:
R
Russell King 已提交
419
	usr_entry
L
Linus Torvalds 已提交
420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482

	tst	r3, #PSR_T_BIT			@ Thumb mode?
	bne	fpundefinstr			@ ignore FP
	sub	r4, r2, #4

	@
	@ fall through to the emulation code, which returns using r9 if
	@ it has emulated the instruction, or the more conventional lr
	@ if we are to treat this as a real undefined instruction
	@
	@  r0 - instruction
	@
1:	ldrt	r0, [r4]
	adr	r9, ret_from_exception
	adr	lr, fpundefinstr
	@
	@ fallthrough to call_fpe
	@

/*
 * The out of line fixup for the ldrt above.
 */
	.section .fixup, "ax"
2:	mov	pc, r9
	.previous
	.section __ex_table,"a"
	.long	1b, 2b
	.previous

/*
 * Check whether the instruction is a co-processor instruction.
 * If yes, we need to call the relevant co-processor handler.
 *
 * Note that we don't do a full check here for the co-processor
 * instructions; all instructions with bit 27 set are well
 * defined.  The only instructions that should fault are the
 * co-processor instructions.  However, we have to watch out
 * for the ARM6/ARM7 SWI bug.
 *
 * Emulators may wish to make use of the following registers:
 *  r0  = instruction opcode.
 *  r2  = PC+4
 *  r10 = this threads thread_info structure.
 */
call_fpe:
	tst	r0, #0x08000000			@ only CDP/CPRT/LDC/STC have bit 27
#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
	and	r8, r0, #0x0f000000		@ mask out op-code bits
	teqne	r8, #0x0f000000			@ SWI (ARM6/7 bug)?
#endif
	moveq	pc, lr
	get_thread_info r10			@ get current thread
	and	r8, r0, #0x00000f00		@ mask out CP number
	mov	r7, #1
	add	r6, r10, #TI_USED_CP
	strb	r7, [r6, r8, lsr #8]		@ set appropriate used_cp[]
#ifdef CONFIG_IWMMXT
	@ Test if we need to give access to iWMMXt coprocessors
	ldr	r5, [r10, #TI_FLAGS]
	rsbs	r7, r8, #(1 << 8)		@ CP 0 or 1 only
	movcss	r7, r5, lsr #(TIF_USING_IWMMXT + 1)
	bcs	iwmmxt_task_enable
#endif
483
	enable_irq
L
Linus Torvalds 已提交
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534
	add	pc, pc, r8, lsr #6
	mov	r0, r0

	mov	pc, lr				@ CP#0
	b	do_fpe				@ CP#1 (FPE)
	b	do_fpe				@ CP#2 (FPE)
	mov	pc, lr				@ CP#3
	mov	pc, lr				@ CP#4
	mov	pc, lr				@ CP#5
	mov	pc, lr				@ CP#6
	mov	pc, lr				@ CP#7
	mov	pc, lr				@ CP#8
	mov	pc, lr				@ CP#9
#ifdef CONFIG_VFP
	b	do_vfp				@ CP#10 (VFP)
	b	do_vfp				@ CP#11 (VFP)
#else
	mov	pc, lr				@ CP#10 (VFP)
	mov	pc, lr				@ CP#11 (VFP)
#endif
	mov	pc, lr				@ CP#12
	mov	pc, lr				@ CP#13
	mov	pc, lr				@ CP#14 (Debug)
	mov	pc, lr				@ CP#15 (Control)

do_fpe:
	ldr	r4, .LCfp
	add	r10, r10, #TI_FPSTATE		@ r10 = workspace
	ldr	pc, [r4]			@ Call FP module USR entry point

/*
 * The FP module is called with these registers set:
 *  r0  = instruction
 *  r2  = PC+4
 *  r9  = normal "successful" return address
 *  r10 = FP workspace
 *  lr  = unrecognised FP instruction return address
 */

	.data
ENTRY(fp_enter)
	.word	fpundefinstr
	.text

fpundefinstr:
	mov	r0, sp
	adr	lr, ret_from_exception
	b	do_undefinstr

	.align	5
__pabt_usr:
R
Russell King 已提交
535
	usr_entry
L
Linus Torvalds 已提交
536

537
	enable_irq				@ Enable interrupts
L
Linus Torvalds 已提交
538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
	mov	r0, r2				@ address (pc)
	mov	r1, sp				@ regs
	bl	do_PrefetchAbort		@ call abort handler
	/* fall through */
/*
 * This is the return code to user mode for abort handlers
 */
ENTRY(ret_from_exception)
	get_thread_info tsk
	mov	why, #0
	b	ret_to_user

/*
 * Register switch for ARMv3 and ARMv4 processors
 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
 * previous and next are guaranteed not to be the same.
 */
ENTRY(__switch_to)
	add	ip, r1, #TI_CPU_SAVE
	ldr	r3, [r2, #TI_TP_VALUE]
	stmia	ip!, {r4 - sl, fp, sp, lr}	@ Store most regs on stack
559 560 561
#ifndef CONFIG_MMU
	add	r2, r2, #TI_CPU_DOMAIN
#else
L
Linus Torvalds 已提交
562
	ldr	r6, [r2, #TI_CPU_DOMAIN]!
563
#endif
564 565 566 567
#if __LINUX_ARM_ARCH__ >= 6
#ifdef CONFIG_CPU_MPCORE
	clrex
#else
568
	strex	r5, r4, [ip]			@ Clear exclusive monitor
569 570
#endif
#endif
L
Linus Torvalds 已提交
571 572 573 574
#if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT)
	mra	r4, r5, acc0
	stmia   ip, {r4, r5}
#endif
575
#if defined(CONFIG_HAS_TLS_REG)
576
	mcr	p15, 0, r3, c13, c0, 3		@ set TLS register
577
#elif !defined(CONFIG_TLS_REG_EMUL)
L
Linus Torvalds 已提交
578
	mov	r4, #0xffff0fff
579 580
	str	r3, [r4, #-15]			@ TLS val at 0xffff0ff0
#endif
581
#ifdef CONFIG_MMU
L
Linus Torvalds 已提交
582
	mcr	p15, 0, r6, c3, c0, 0		@ Set domain register
583
#endif
L
Linus Torvalds 已提交
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
#ifdef CONFIG_VFP
	@ Always disable VFP so we can lazily save/restore the old
	@ state. This occurs in the context of the previous thread.
	VFPFMRX	r4, FPEXC
	bic	r4, r4, #FPEXC_ENABLE
	VFPFMXR	FPEXC, r4
#endif
#if defined(CONFIG_IWMMXT)
	bl	iwmmxt_task_switch
#elif defined(CONFIG_CPU_XSCALE)
	add	r4, r2, #40			@ cpu_context_save->extra
	ldmib	r4, {r4, r5}
	mar	acc0, r4, r5
#endif
	ldmib	r2, {r4 - sl, fp, sp, pc}	@ Load all regs saved previously

	__INIT
601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635

/*
 * User helpers.
 *
 * These are segment of kernel provided user code reachable from user space
 * at a fixed address in kernel memory.  This is used to provide user space
 * with some operations which require kernel help because of unimplemented
 * native feature and/or instructions in many ARM CPUs. The idea is for
 * this code to be executed directly in user mode for best efficiency but
 * which is too intimate with the kernel counter part to be left to user
 * libraries.  In fact this code might even differ from one CPU to another
 * depending on the available  instruction set and restrictions like on
 * SMP systems.  In other words, the kernel reserves the right to change
 * this code as needed without warning. Only the entry points and their
 * results are guaranteed to be stable.
 *
 * Each segment is 32-byte aligned and will be moved to the top of the high
 * vector page.  New segments (if ever needed) must be added in front of
 * existing ones.  This mechanism should be used only for things that are
 * really small and justified, and not be abused freely.
 *
 * User space is expected to implement those things inline when optimizing
 * for a processor that has the necessary native support, but only if such
 * resulting binaries are already to be incompatible with earlier ARM
 * processors due to the use of unsupported instructions other than what
 * is provided here.  In other words don't make binaries unable to run on
 * earlier processors just for the sake of not using these kernel helpers
 * if your compiled code is not going to use the new instructions for other
 * purpose.
 */

	.align	5
	.globl	__kuser_helper_start
__kuser_helper_start:

636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
/*
 * Reference prototype:
 *
 *	void __kernel_memory_barrier(void)
 *
 * Input:
 *
 *	lr = return address
 *
 * Output:
 *
 *	none
 *
 * Clobbered:
 *
 *	the Z flag might be lost
 *
 * Definition and user space usage example:
 *
 *	typedef void (__kernel_dmb_t)(void);
 *	#define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
 *
 * Apply any needed memory barrier to preserve consistency with data modified
 * manually and __kuser_cmpxchg usage.
 *
 * This could be used as follows:
 *
 * #define __kernel_dmb() \
 *         asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
 *	        : : : "lr","cc" )
 */

__kuser_memory_barrier:				@ 0xffff0fa0

#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP)
	mcr	p15, 0, r0, c7, c10, 5	@ dmb
#endif
	mov	pc, lr

	.align	5

677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707
/*
 * Reference prototype:
 *
 *	int __kernel_cmpxchg(int oldval, int newval, int *ptr)
 *
 * Input:
 *
 *	r0 = oldval
 *	r1 = newval
 *	r2 = ptr
 *	lr = return address
 *
 * Output:
 *
 *	r0 = returned value (zero or non-zero)
 *	C flag = set if r0 == 0, clear if r0 != 0
 *
 * Clobbered:
 *
 *	r3, ip, flags
 *
 * Definition and user space usage example:
 *
 *	typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
 *	#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
 *
 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
 * Return zero if *ptr was changed or non-zero if no exchange happened.
 * The C flag is also set if *ptr was changed to allow for assembly
 * optimization in the calling code.
 *
708 709
 * Note: this routine already includes memory barriers as needed.
 *
710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730
 * For example, a user space atomic_add implementation could look like this:
 *
 * #define atomic_add(ptr, val) \
 *	({ register unsigned int *__ptr asm("r2") = (ptr); \
 *	   register unsigned int __result asm("r1"); \
 *	   asm volatile ( \
 *	       "1: @ atomic_add\n\t" \
 *	       "ldr	r0, [r2]\n\t" \
 *	       "mov	r3, #0xffff0fff\n\t" \
 *	       "add	lr, pc, #4\n\t" \
 *	       "add	r1, r0, %2\n\t" \
 *	       "add	pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
 *	       "bcc	1b" \
 *	       : "=&r" (__result) \
 *	       : "r" (__ptr), "rIL" (val) \
 *	       : "r0","r3","ip","lr","cc","memory" ); \
 *	   __result; })
 */

__kuser_cmpxchg:				@ 0xffff0fc0

731
#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
732

733 734 735 736 737 738 739 740 741
	/*
	 * Poor you.  No fast solution possible...
	 * The kernel itself must perform the operation.
	 * A special ghost syscall is used for that (see traps.c).
	 */
	swi	#0x9ffff0
	mov	pc, lr

#elif __LINUX_ARM_ARCH__ < 6
742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765

	/*
	 * Theory of operation:
	 *
	 * We set the Z flag before loading oldval. If ever an exception
	 * occurs we can not be sure the loaded value will still be the same
	 * when the exception returns, therefore the user exception handler
	 * will clear the Z flag whenever the interrupted user code was
	 * actually from the kernel address space (see the usr_entry macro).
	 *
	 * The post-increment on the str is used to prevent a race with an
	 * exception happening just after the str instruction which would
	 * clear the Z flag although the exchange was done.
	 */
	teq	ip, ip			@ set Z flag
	ldr	ip, [r2]		@ load current val
	add	r3, r2, #1		@ prepare store ptr
	teqeq	ip, r0			@ compare with oldval if still allowed
	streq	r1, [r3, #-1]!		@ store newval if still allowed
	subs	r0, r2, r3		@ if r2 == r3 the str occured
	mov	pc, lr

#else

766 767 768
#ifdef CONFIG_SMP
	mcr	p15, 0, r0, c7, c10, 5	@ dmb
#endif
769 770 771 772
	ldrex	r3, [r2]
	subs	r3, r3, r0
	strexeq	r3, r1, [r2]
	rsbs	r0, r3, #0
773 774 775
#ifdef CONFIG_SMP
	mcr	p15, 0, r0, c7, c10, 5	@ dmb
#endif
776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816
	mov	pc, lr

#endif

	.align	5

/*
 * Reference prototype:
 *
 *	int __kernel_get_tls(void)
 *
 * Input:
 *
 *	lr = return address
 *
 * Output:
 *
 *	r0 = TLS value
 *
 * Clobbered:
 *
 *	the Z flag might be lost
 *
 * Definition and user space usage example:
 *
 *	typedef int (__kernel_get_tls_t)(void);
 *	#define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
 *
 * Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
 *
 * This could be used as follows:
 *
 * #define __kernel_get_tls() \
 *	({ register unsigned int __val asm("r0"); \
 *         asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
 *	        : "=r" (__val) : : "lr","cc" ); \
 *	   __val; })
 */

__kuser_get_tls:				@ 0xffff0fe0

817
#if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL)
818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852

	ldr	r0, [pc, #(16 - 8)]		@ TLS stored at 0xffff0ff0
	mov	pc, lr

#else

	mrc	p15, 0, r0, c13, c0, 3		@ read TLS register
	mov	pc, lr

#endif

	.rep	5
	.word	0			@ pad up to __kuser_helper_version
	.endr

/*
 * Reference declaration:
 *
 *	extern unsigned int __kernel_helper_version;
 *
 * Definition and user space usage example:
 *
 *	#define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
 *
 * User space may read this to determine the curent number of helpers
 * available.
 */

__kuser_helper_version:				@ 0xffff0ffc
	.word	((__kuser_helper_end - __kuser_helper_start) >> 5)

	.globl	__kuser_helper_end
__kuser_helper_end:


L
Linus Torvalds 已提交
853 854 855
/*
 * Vector stubs.
 *
856 857 858
 * This code is copied to 0xffff0200 so we can use branches in the
 * vectors, rather than ldr's.  Note that this code must not
 * exceed 0x300 bytes.
L
Linus Torvalds 已提交
859 860 861
 *
 * Common stub entry macro:
 *   Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
R
Russell King 已提交
862 863 864
 *
 * SP points to a minimal amount of processor-private memory, the address
 * of which is copied into r0 for the mode specific abort handler.
L
Linus Torvalds 已提交
865
 */
866
	.macro	vector_stub, name, mode, correction=0
L
Linus Torvalds 已提交
867 868 869 870 871 872
	.align	5

vector_\name:
	.if \correction
	sub	lr, lr, #\correction
	.endif
R
Russell King 已提交
873 874 875 876 877 878

	@
	@ Save r0, lr_<exception> (parent PC) and spsr_<exception>
	@ (parent CPSR)
	@
	stmia	sp, {r0, lr}		@ save r0, lr
L
Linus Torvalds 已提交
879
	mrs	lr, spsr
R
Russell King 已提交
880 881
	str	lr, [sp, #8]		@ save spsr

L
Linus Torvalds 已提交
882
	@
R
Russell King 已提交
883
	@ Prepare for SVC32 mode.  IRQs remain disabled.
L
Linus Torvalds 已提交
884
	@
R
Russell King 已提交
885
	mrs	r0, cpsr
886
	eor	r0, r0, #(\mode ^ SVC_MODE)
R
Russell King 已提交
887
	msr	spsr_cxsf, r0
L
Linus Torvalds 已提交
888

R
Russell King 已提交
889 890 891 892
	@
	@ the branch table must immediately follow this code
	@
	and	lr, lr, #0x0f
893
	mov	r0, sp
L
Linus Torvalds 已提交
894
	ldr	lr, [pc, lr, lsl #2]
R
Russell King 已提交
895
	movs	pc, lr			@ branch to handler in SVC mode
L
Linus Torvalds 已提交
896 897
	.endm

898
	.globl	__stubs_start
L
Linus Torvalds 已提交
899 900 901 902
__stubs_start:
/*
 * Interrupt dispatcher
 */
903
	vector_stub	irq, IRQ_MODE, 4
L
Linus Torvalds 已提交
904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925

	.long	__irq_usr			@  0  (USR_26 / USR_32)
	.long	__irq_invalid			@  1  (FIQ_26 / FIQ_32)
	.long	__irq_invalid			@  2  (IRQ_26 / IRQ_32)
	.long	__irq_svc			@  3  (SVC_26 / SVC_32)
	.long	__irq_invalid			@  4
	.long	__irq_invalid			@  5
	.long	__irq_invalid			@  6
	.long	__irq_invalid			@  7
	.long	__irq_invalid			@  8
	.long	__irq_invalid			@  9
	.long	__irq_invalid			@  a
	.long	__irq_invalid			@  b
	.long	__irq_invalid			@  c
	.long	__irq_invalid			@  d
	.long	__irq_invalid			@  e
	.long	__irq_invalid			@  f

/*
 * Data abort dispatcher
 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
 */
926
	vector_stub	dabt, ABT_MODE, 8
L
Linus Torvalds 已提交
927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948

	.long	__dabt_usr			@  0  (USR_26 / USR_32)
	.long	__dabt_invalid			@  1  (FIQ_26 / FIQ_32)
	.long	__dabt_invalid			@  2  (IRQ_26 / IRQ_32)
	.long	__dabt_svc			@  3  (SVC_26 / SVC_32)
	.long	__dabt_invalid			@  4
	.long	__dabt_invalid			@  5
	.long	__dabt_invalid			@  6
	.long	__dabt_invalid			@  7
	.long	__dabt_invalid			@  8
	.long	__dabt_invalid			@  9
	.long	__dabt_invalid			@  a
	.long	__dabt_invalid			@  b
	.long	__dabt_invalid			@  c
	.long	__dabt_invalid			@  d
	.long	__dabt_invalid			@  e
	.long	__dabt_invalid			@  f

/*
 * Prefetch abort dispatcher
 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
 */
949
	vector_stub	pabt, ABT_MODE, 4
L
Linus Torvalds 已提交
950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971

	.long	__pabt_usr			@  0 (USR_26 / USR_32)
	.long	__pabt_invalid			@  1 (FIQ_26 / FIQ_32)
	.long	__pabt_invalid			@  2 (IRQ_26 / IRQ_32)
	.long	__pabt_svc			@  3 (SVC_26 / SVC_32)
	.long	__pabt_invalid			@  4
	.long	__pabt_invalid			@  5
	.long	__pabt_invalid			@  6
	.long	__pabt_invalid			@  7
	.long	__pabt_invalid			@  8
	.long	__pabt_invalid			@  9
	.long	__pabt_invalid			@  a
	.long	__pabt_invalid			@  b
	.long	__pabt_invalid			@  c
	.long	__pabt_invalid			@  d
	.long	__pabt_invalid			@  e
	.long	__pabt_invalid			@  f

/*
 * Undef instr entry dispatcher
 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
 */
972
	vector_stub	und, UND_MODE
L
Linus Torvalds 已提交
973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025

	.long	__und_usr			@  0 (USR_26 / USR_32)
	.long	__und_invalid			@  1 (FIQ_26 / FIQ_32)
	.long	__und_invalid			@  2 (IRQ_26 / IRQ_32)
	.long	__und_svc			@  3 (SVC_26 / SVC_32)
	.long	__und_invalid			@  4
	.long	__und_invalid			@  5
	.long	__und_invalid			@  6
	.long	__und_invalid			@  7
	.long	__und_invalid			@  8
	.long	__und_invalid			@  9
	.long	__und_invalid			@  a
	.long	__und_invalid			@  b
	.long	__und_invalid			@  c
	.long	__und_invalid			@  d
	.long	__und_invalid			@  e
	.long	__und_invalid			@  f

	.align	5

/*=============================================================================
 * Undefined FIQs
 *-----------------------------------------------------------------------------
 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
 * Basically to switch modes, we *HAVE* to clobber one register...  brain
 * damage alert!  I don't think that we can execute any code in here in any
 * other mode than FIQ...  Ok you can switch to another mode, but you can't
 * get out of that mode without clobbering one register.
 */
vector_fiq:
	disable_fiq
	subs	pc, lr, #4

/*=============================================================================
 * Address exception handler
 *-----------------------------------------------------------------------------
 * These aren't too critical.
 * (they're not supposed to happen, and won't happen in 32-bit data mode).
 */

vector_addrexcptn:
	b	vector_addrexcptn

/*
 * We group all the following data together to optimise
 * for CPUs with separate I & D caches.
 */
	.align	5

.LCvswi:
	.word	vector_swi

1026
	.globl	__stubs_end
L
Linus Torvalds 已提交
1027 1028
__stubs_end:

1029
	.equ	stubs_offset, __vectors_start + 0x200 - __stubs_start
L
Linus Torvalds 已提交
1030

1031 1032
	.globl	__vectors_start
__vectors_start:
L
Linus Torvalds 已提交
1033
	swi	SYS_ERROR0
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
	b	vector_und + stubs_offset
	ldr	pc, .LCvswi + stubs_offset
	b	vector_pabt + stubs_offset
	b	vector_dabt + stubs_offset
	b	vector_addrexcptn + stubs_offset
	b	vector_irq + stubs_offset
	b	vector_fiq + stubs_offset

	.globl	__vectors_end
__vectors_end:
L
Linus Torvalds 已提交
1044 1045 1046 1047 1048 1049 1050 1051 1052

	.data

	.globl	cr_alignment
	.globl	cr_no_alignment
cr_alignment:
	.space	4
cr_no_alignment:
	.space	4