entry-header.S 5.0 KB
Newer Older
1
#include <linux/init.h>
L
Linus Torvalds 已提交
2 3 4
#include <linux/linkage.h>

#include <asm/assembler.h>
5
#include <asm/asm-offsets.h>
L
Linus Torvalds 已提交
6
#include <asm/errno.h>
7
#include <asm/thread_info.h>
L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15 16 17 18

@ Bad Abort numbers
@ -----------------
@
#define BAD_PREFETCH	0
#define BAD_DATA	1
#define BAD_ADDREXCPTN	2
#define BAD_IRQ		3
#define BAD_UNDEFINSTR	4

@
R
Russell King 已提交
19 20
@ Most of the stack format comes from struct pt_regs, but with
@ the addition of 8 bytes for storing syscall args 5 and 6.
21
@ This _must_ remain a multiple of 8 for EABI.
L
Linus Torvalds 已提交
22 23 24
@
#define S_OFF		8

R
Russell King 已提交
25 26 27 28 29 30 31 32
/* 
 * The SWI code relies on the fact that R0 is at the bottom of the stack
 * (due to slow/fast restore user regs).
 */
#if S_R0 != 0
#error "Please fix"
#endif

33 34 35 36 37 38
	.macro	zero_fp
#ifdef CONFIG_FRAME_POINTER
	mov	fp, #0
#endif
	.endm

39
	.macro	alignment_trap, rtemp
L
Linus Torvalds 已提交
40
#ifdef CONFIG_ALIGNMENT_TRAP
41 42
	ldr	\rtemp, .LCcralign
	ldr	\rtemp, [\rtemp]
L
Linus Torvalds 已提交
43 44 45 46
	mcr	p15, 0, \rtemp, c1, c0
#endif
	.endm

47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
	@
	@ Store/load the USER SP and LR registers by switching to the SYS
	@ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not
	@ available. Should only be called from SVC mode
	@
	.macro	store_user_sp_lr, rd, rtemp, offset = 0
	mrs	\rtemp, cpsr
	eor	\rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
	msr	cpsr_c, \rtemp			@ switch to the SYS mode

	str	sp, [\rd, #\offset]		@ save sp_usr
	str	lr, [\rd, #\offset + 4]		@ save lr_usr

	eor	\rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
	msr	cpsr_c, \rtemp			@ switch back to the SVC mode
	.endm

	.macro	load_user_sp_lr, rd, rtemp, offset = 0
	mrs	\rtemp, cpsr
	eor	\rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
	msr	cpsr_c, \rtemp			@ switch to the SYS mode

	ldr	sp, [\rd, #\offset]		@ load sp_usr
	ldr	lr, [\rd, #\offset + 4]		@ load lr_usr

	eor	\rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
	msr	cpsr_c, \rtemp			@ switch back to the SVC mode
	.endm

#ifndef CONFIG_THUMB2_KERNEL
	.macro	svc_exit, rpsr
	msr	spsr_cxsf, \rpsr
79 80
#if defined(CONFIG_CPU_32v6K)
	clrex					@ clear the exclusive monitor
81
	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
82 83 84 85
#elif defined (CONFIG_CPU_V6)
	ldr	r0, [sp]
	strex	r1, r2, [sp]			@ clear the exclusive monitor
	ldmib	sp, {r1 - pc}^			@ load r1 - pc, cpsr
N
Nicolas Pitre 已提交
86 87
#else
	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
88
#endif
89 90 91 92 93 94
	.endm

	.macro	restore_user_regs, fast = 0, offset = 0
	ldr	r1, [sp, #\offset + S_PSR]	@ get calling cpsr
	ldr	lr, [sp, #\offset + S_PC]!	@ get pc
	msr	spsr_cxsf, r1			@ save in spsr_svc
95 96 97 98 99
#if defined(CONFIG_CPU_32v6K)
	clrex					@ clear the exclusive monitor
#elif defined (CONFIG_CPU_V6)
	strex	r1, r2, [sp]			@ clear the exclusive monitor
#endif
100 101 102 103 104
	.if	\fast
	ldmdb	sp, {r1 - lr}^			@ get calling r1 - lr
	.else
	ldmdb	sp, {r0 - lr}^			@ get calling r0 - lr
	.endif
105 106
	mov	r0, r0				@ ARMv5T and earlier require a nop
						@ after ldm {}^
107 108 109 110 111 112 113 114
	add	sp, sp, #S_FRAME_SIZE - S_PC
	movs	pc, lr				@ return & move spsr_svc into cpsr
	.endm

	.macro	get_thread_info, rd
	mov	\rd, sp, lsr #13
	mov	\rd, \rd, lsl #13
	.endm
115 116 117 118 119 120 121

	@
	@ 32-bit wide "mov pc, reg"
	@
	.macro	movw_pc, reg
	mov	pc, \reg
	.endm
122 123
#else	/* CONFIG_THUMB2_KERNEL */
	.macro	svc_exit, rpsr
124
	clrex					@ clear the exclusive monitor
125 126 127 128 129 130 131 132 133 134 135 136
	ldr	r0, [sp, #S_SP]			@ top of the stack
	ldr	r1, [sp, #S_PC]			@ return address
	tst	r0, #4				@ orig stack 8-byte aligned?
	stmdb	r0, {r1, \rpsr}			@ rfe context
	ldmia	sp, {r0 - r12}
	ldr	lr, [sp, #S_LR]
	addeq	sp, sp, #S_FRAME_SIZE - 8	@ aligned
	addne	sp, sp, #S_FRAME_SIZE - 4	@ not aligned
	rfeia	sp!
	.endm

	.macro	restore_user_regs, fast = 0, offset = 0
137
	clrex					@ clear the exclusive monitor
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
	mov	r2, sp
	load_user_sp_lr r2, r3, \offset + S_SP	@ calling sp, lr
	ldr	r1, [sp, #\offset + S_PSR]	@ get calling cpsr
	ldr	lr, [sp, #\offset + S_PC]	@ get pc
	add	sp, sp, #\offset + S_SP
	msr	spsr_cxsf, r1			@ save in spsr_svc
	.if	\fast
	ldmdb	sp, {r1 - r12}			@ get calling r1 - r12
	.else
	ldmdb	sp, {r0 - r12}			@ get calling r0 - r12
	.endif
	add	sp, sp, #S_FRAME_SIZE - S_SP
	movs	pc, lr				@ return & move spsr_svc into cpsr
	.endm

	.macro	get_thread_info, rd
	mov	\rd, sp
	lsr	\rd, \rd, #13
	mov	\rd, \rd, lsl #13
	.endm
158 159 160 161 162 163 164 165

	@
	@ 32-bit wide "mov pc, reg"
	@
	.macro	movw_pc, reg
	mov	pc, \reg
	nop
	.endm
166
#endif	/* !CONFIG_THUMB2_KERNEL */
L
Linus Torvalds 已提交
167

168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
	@
	@ Debug exceptions are taken as prefetch or data aborts.
	@ We must disable preemption during the handler so that
	@ we can access the debug registers safely.
	@
	.macro	debug_entry, fsr
#if defined(CONFIG_HAVE_HW_BREAKPOINT) && defined(CONFIG_PREEMPT)
	ldr	r4, =0x40f		@ mask out fsr.fs
	and	r5, r4, \fsr
	cmp	r5, #2			@ debug exception
	bne	1f
	get_thread_info r10
	ldr	r6, [r10, #TI_PREEMPT]	@ get preempt count
	add	r11, r6, #1		@ increment it
	str	r11, [r10, #TI_PREEMPT]
1:
#endif
	.endm

L
Linus Torvalds 已提交
187 188 189 190 191 192 193 194 195 196 197 198 199 200
/*
 * These are the registers used in the syscall handler, and allow us to
 * have in theory up to 7 arguments to a function - r0 to r6.
 *
 * r7 is reserved for the system call number for thumb mode.
 *
 * Note that tbl == why is intentional.
 *
 * We must set at least "tsk" and "why" when calling ret_with_reschedule.
 */
scno	.req	r7		@ syscall number
tbl	.req	r8		@ syscall table pointer
why	.req	r8		@ Linux syscall (!= 0)
tsk	.req	r9		@ current thread_info