entry-common.S 14.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11
/*
 *  linux/arch/arm/kernel/entry-common.S
 *
 *  Copyright (C) 2000 Russell King
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <asm/unistd.h>
12
#include <asm/ftrace.h>
13
#include <asm/unwind.h>
L
Linus Torvalds 已提交
14

15 16 17 18 19 20 21
#ifdef CONFIG_NEED_RET_TO_USER
#include <mach/entry-macro.S>
#else
	.macro  arch_ret_to_user, tmp1, tmp2
	.endm
#endif

L
Linus Torvalds 已提交
22 23 24 25 26 27 28 29 30 31
#include "entry-header.S"


	.align	5
/*
 * This is the fast syscall return path.  We do as little as
 * possible here, and this includes saving r0 back into the SVC
 * stack.
 */
ret_fast_syscall:
32 33
 UNWIND(.fnstart	)
 UNWIND(.cantunwind	)
34
	disable_irq				@ disable interrupts
L
Linus Torvalds 已提交
35 36 37
	ldr	r1, [tsk, #TI_FLAGS]
	tst	r1, #_TIF_WORK_MASK
	bne	fast_work_pending
38 39 40
#if defined(CONFIG_IRQSOFF_TRACER)
	asm_trace_hardirqs_on
#endif
41

42 43 44
	/* perform architecture specific actions before user return */
	arch_ret_to_user r1, lr

45
	restore_user_regs fast = 1, offset = S_OFF
46
 UNWIND(.fnend		)
L
Linus Torvalds 已提交
47 48 49 50 51 52 53 54 55

/*
 * Ok, we need to do extra processing, enter the slow path.
 */
fast_work_pending:
	str	r0, [sp, #S_R0+S_OFF]!		@ returned r0
work_pending:
	tst	r1, #_TIF_NEED_RESCHED
	bne	work_resched
56
	tst	r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME
L
Linus Torvalds 已提交
57 58 59
	beq	no_work_pending
	mov	r0, sp				@ 'regs'
	mov	r2, why				@ 'syscall'
60 61
	tst	r1, #_TIF_SIGPENDING		@ delivering a signal?
	movne	why, #0				@ prevent further restarts
L
Linus Torvalds 已提交
62
	bl	do_notify_resume
63
	b	ret_slow_syscall		@ Check work again
L
Linus Torvalds 已提交
64 65 66 67 68 69 70 71

work_resched:
	bl	schedule
/*
 * "slow" syscall return path.  "why" tells us if this was a real syscall.
 */
ENTRY(ret_to_user)
ret_slow_syscall:
72
	disable_irq				@ disable interrupts
73
ENTRY(ret_to_user_from_irq)
L
Linus Torvalds 已提交
74 75 76 77
	ldr	r1, [tsk, #TI_FLAGS]
	tst	r1, #_TIF_WORK_MASK
	bne	work_pending
no_work_pending:
78 79 80
#if defined(CONFIG_IRQSOFF_TRACER)
	asm_trace_hardirqs_on
#endif
81 82 83
	/* perform architecture specific actions before user return */
	arch_ret_to_user r1, lr

84
	restore_user_regs fast = 0, offset = 0
85
ENDPROC(ret_to_user_from_irq)
86
ENDPROC(ret_to_user)
L
Linus Torvalds 已提交
87 88 89 90 91 92 93 94 95

/*
 * This is how we return from a fork.
 */
ENTRY(ret_from_fork)
	bl	schedule_tail
	get_thread_info tsk
	ldr	r1, [tsk, #TI_FLAGS]		@ check for syscall tracing
	mov	why, #1
96
	tst	r1, #_TIF_SYSCALL_WORK		@ are we tracing syscalls?
L
Linus Torvalds 已提交
97 98 99 100 101
	beq	ret_slow_syscall
	mov	r1, sp
	mov	r0, #1				@ trace exit [IP = 1]
	bl	syscall_trace
	b	ret_slow_syscall
102
ENDPROC(ret_from_fork)
L
Linus Torvalds 已提交
103

104 105
	.equ NR_syscalls,0
#define CALL(x) .equ NR_syscalls,NR_syscalls+1
L
Linus Torvalds 已提交
106
#include "calls.S"
107 108
#undef CALL
#define CALL(x) .long x
L
Linus Torvalds 已提交
109

110
#ifdef CONFIG_FUNCTION_TRACER
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
/*
 * When compiling with -pg, gcc inserts a call to the mcount routine at the
 * start of every function.  In mcount, apart from the function's address (in
 * lr), we need to get hold of the function's caller's address.
 *
 * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
 *
 *	bl	mcount
 *
 * These versions have the limitation that in order for the mcount routine to
 * be able to determine the function's caller's address, an APCS-style frame
 * pointer (which is set up with something like the code below) is required.
 *
 *	mov     ip, sp
 *	push    {fp, ip, lr, pc}
 *	sub     fp, ip, #4
 *
 * With EABI, these frame pointers are not available unless -mapcs-frame is
 * specified, and if building as Thumb-2, not even then.
 *
 * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
 * with call sites like:
 *
 *	push	{lr}
 *	bl	__gnu_mcount_nc
 *
 * With these compilers, frame pointers are not necessary.
 *
 * mcount can be thought of as a function called in the middle of a subroutine
 * call.  As such, it needs to be transparent for both the caller and the
 * callee: the original lr needs to be restored when leaving mcount, and no
 * registers should be clobbered.  (In the __gnu_mcount_nc implementation, we
 * clobber the ip register.  This is OK because the ARM calling convention
 * allows it to be clobbered in subroutines and doesn't use it to hold
 * parameters.)
146 147 148 149
 *
 * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
 * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
 * arch/arm/kernel/ftrace.c).
150
 */
151 152 153 154 155 156 157

#ifndef CONFIG_OLD_MCOUNT
#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
#endif
#endif

158 159 160 161 162
.macro mcount_adjust_addr rd, rn
	bic	\rd, \rn, #1		@ clear the Thumb bit if present
	sub	\rd, \rd, #MCOUNT_INSN_SIZE
.endm

163 164 165 166 167 168 169
.macro __mcount suffix
	mcount_enter
	ldr	r0, =ftrace_trace_function
	ldr	r2, [r0]
	adr	r0, .Lftrace_stub
	cmp	r0, r2
	bne	1f
170

171 172 173 174 175 176 177 178 179 180 181 182 183
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	ldr     r1, =ftrace_graph_return
	ldr     r2, [r1]
	cmp     r0, r2
	bne     ftrace_graph_caller\suffix

	ldr     r1, =ftrace_graph_entry
	ldr     r2, [r1]
	ldr     r0, =ftrace_graph_entry_stub
	cmp     r0, r2
	bne     ftrace_graph_caller\suffix
#endif

184
	mcount_exit
185

186
1: 	mcount_get_lr	r1			@ lr of instrumented func
187
	mcount_adjust_addr	r0, lr		@ instrumented function
188 189 190 191
	adr	lr, BSYM(2f)
	mov	pc, r2
2:	mcount_exit
.endm
A
Abhishek Sagar 已提交
192

193 194
.macro __ftrace_caller suffix
	mcount_enter
A
Abhishek Sagar 已提交
195

196
	mcount_get_lr	r1			@ lr of instrumented func
197
	mcount_adjust_addr	r0, lr		@ instrumented function
198 199 200

	.globl ftrace_call\suffix
ftrace_call\suffix:
201
	bl	ftrace_stub
202

203 204 205 206 207 208
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	.globl ftrace_graph_call\suffix
ftrace_graph_call\suffix:
	mov	r0, r0
#endif

209 210
	mcount_exit
.endm
A
Abhishek Sagar 已提交
211

212 213
.macro __ftrace_graph_caller
	sub	r0, fp, #4		@ &lr of instrumented routine (&parent)
214 215 216
#ifdef CONFIG_DYNAMIC_FTRACE
	@ called from __ftrace_caller, saved in mcount_enter
	ldr	r1, [sp, #16]		@ instrumented routine (func)
217
	mcount_adjust_addr	r1, r1
218 219
#else
	@ called from __mcount, untouched in lr
220
	mcount_adjust_addr	r1, lr	@ instrumented routine (func)
221
#endif
222 223 224 225
	mov	r2, fp			@ frame pointer
	bl	prepare_ftrace_return
	mcount_exit
.endm
A
Abhishek Sagar 已提交
226

227
#ifdef CONFIG_OLD_MCOUNT
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
/*
 * mcount
 */

.macro mcount_enter
	stmdb	sp!, {r0-r3, lr}
.endm

.macro mcount_get_lr reg
	ldr	\reg, [fp, #-4]
.endm

.macro mcount_exit
	ldr	lr, [fp, #-4]
	ldmia	sp!, {r0-r3, pc}
.endm

245
ENTRY(mcount)
246
#ifdef CONFIG_DYNAMIC_FTRACE
247 248 249
	stmdb	sp!, {lr}
	ldr	lr, [fp, #-4]
	ldmia	sp!, {pc}
250 251 252
#else
	__mcount _old
#endif
253
ENDPROC(mcount)
A
Abhishek Sagar 已提交
254

255
#ifdef CONFIG_DYNAMIC_FTRACE
256
ENTRY(ftrace_caller_old)
257
	__ftrace_caller _old
258 259
ENDPROC(ftrace_caller_old)
#endif
A
Abhishek Sagar 已提交
260

261 262 263 264 265
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller_old)
	__ftrace_graph_caller
ENDPROC(ftrace_graph_caller_old)
#endif
A
Abhishek Sagar 已提交
266

267 268 269 270
.purgem mcount_enter
.purgem mcount_get_lr
.purgem mcount_exit
#endif
A
Abhishek Sagar 已提交
271

272 273 274 275 276
/*
 * __gnu_mcount_nc
 */

.macro mcount_enter
277
	stmdb	sp!, {r0-r3, lr}
278 279 280 281 282 283 284
.endm

.macro mcount_get_lr reg
	ldr	\reg, [sp, #20]
.endm

.macro mcount_exit
285 286
	ldmia	sp!, {r0-r3, ip, lr}
	mov	pc, ip
287
.endm
288

289 290 291 292
ENTRY(__gnu_mcount_nc)
#ifdef CONFIG_DYNAMIC_FTRACE
	mov	ip, lr
	ldmia	sp!, {lr}
293
	mov	pc, ip
294 295 296
#else
	__mcount
#endif
297
ENDPROC(__gnu_mcount_nc)
298

299 300 301 302
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(ftrace_caller)
	__ftrace_caller
ENDPROC(ftrace_caller)
303
#endif
A
Abhishek Sagar 已提交
304

305 306 307 308
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
	__ftrace_graph_caller
ENDPROC(ftrace_graph_caller)
309
#endif
A
Abhishek Sagar 已提交
310

311 312 313
.purgem mcount_enter
.purgem mcount_get_lr
.purgem mcount_exit
A
Abhishek Sagar 已提交
314

315 316 317 318 319 320 321 322 323 324
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	.globl return_to_handler
return_to_handler:
	stmdb	sp!, {r0-r3}
	mov	r0, fp			@ frame pointer
	bl	ftrace_return_to_handler
	mov	lr, r0			@ r0 has real ret addr
	ldmia	sp!, {r0-r3}
	mov	pc, lr
#endif
A
Abhishek Sagar 已提交
325

326
ENTRY(ftrace_stub)
327
.Lftrace_stub:
328
	mov	pc, lr
329
ENDPROC(ftrace_stub)
A
Abhishek Sagar 已提交
330

331
#endif /* CONFIG_FUNCTION_TRACER */
A
Abhishek Sagar 已提交
332

L
Linus Torvalds 已提交
333 334 335 336 337 338 339 340 341
/*=============================================================================
 * SWI handler
 *-----------------------------------------------------------------------------
 */

	/* If we're optimising for StrongARM the resulting code won't 
	   run on an ARM7 and we can save a couple of instructions.  
								--pb */
#ifdef CONFIG_CPU_ARM710
342 343
#define A710(code...) code
.Larm710bug:
L
Linus Torvalds 已提交
344 345 346
	ldmia	sp, {r0 - lr}^			@ Get calling r0 - lr
	mov	r0, r0
	add	sp, sp, #S_FRAME_SIZE
347
	subs	pc, lr, #4
L
Linus Torvalds 已提交
348
#else
349
#define A710(code...)
L
Linus Torvalds 已提交
350 351 352 353
#endif

	.align	5
ENTRY(vector_swi)
354 355
	sub	sp, sp, #S_FRAME_SIZE
	stmia	sp, {r0 - r12}			@ Calling r0 - r12
356 357 358 359
 ARM(	add	r8, sp, #S_PC		)
 ARM(	stmdb	r8, {sp, lr}^		)	@ Calling sp, lr
 THUMB(	mov	r8, sp			)
 THUMB(	store_user_sp_lr r8, r10, S_SP	)	@ calling sp, lr
360 361 362 363
	mrs	r8, spsr			@ called from non-FIQ mode, so ok.
	str	lr, [sp, #S_PC]			@ Save calling PC
	str	r8, [sp, #S_PSR]		@ Save CPSR
	str	r0, [sp, #S_OLD_R0]		@ Save OLD_R0
L
Linus Torvalds 已提交
364
	zero_fp
365 366 367 368

	/*
	 * Get the system call number.
	 */
369

370
#if defined(CONFIG_OABI_COMPAT)
371

372 373 374 375 376 377 378 379 380 381 382 383 384 385
	/*
	 * If we have CONFIG_OABI_COMPAT then we need to look at the swi
	 * value to determine if it is an EABI or an old ABI call.
	 */
#ifdef CONFIG_ARM_THUMB
	tst	r8, #PSR_T_BIT
	movne	r10, #0				@ no thumb OABI emulation
	ldreq	r10, [lr, #-4]			@ get SWI instruction
#else
	ldr	r10, [lr, #-4]			@ get SWI instruction
  A710(	and	ip, r10, #0x0f000000		@ check for SWI		)
  A710(	teq	ip, #0x0f000000						)
  A710(	bne	.Larm710bug						)
#endif
386 387 388
#ifdef CONFIG_CPU_ENDIAN_BE8
	rev	r10, r10			@ little endian instruction
#endif
389 390 391 392 393 394

#elif defined(CONFIG_AEABI)

	/*
	 * Pure EABI user space always put syscall number into scno (r7).
	 */
395 396 397 398
  A710(	ldr	ip, [lr, #-4]			@ get SWI instruction	)
  A710(	and	ip, ip, #0x0f000000		@ check for SWI		)
  A710(	teq	ip, #0x0f000000						)
  A710(	bne	.Larm710bug						)
399

400
#elif defined(CONFIG_ARM_THUMB)
401 402

	/* Legacy ABI only, possibly thumb mode. */
403 404 405
	tst	r8, #PSR_T_BIT			@ this is SPSR from save_user_regs
	addne	scno, r7, #__NR_SYSCALL_BASE	@ put OS number in
	ldreq	scno, [lr, #-4]
406

407
#else
408 409

	/* Legacy ABI only. */
410
	ldr	scno, [lr, #-4]			@ get SWI instruction
411 412 413
  A710(	and	ip, scno, #0x0f000000		@ check for SWI		)
  A710(	teq	ip, #0x0f000000						)
  A710(	bne	.Larm710bug						)
414

415
#endif
L
Linus Torvalds 已提交
416 417 418 419 420 421

#ifdef CONFIG_ALIGNMENT_TRAP
	ldr	ip, __cr_alignment
	ldr	ip, [ip]
	mcr	p15, 0, ip, c1, c0		@ update control register
#endif
422
	enable_irq
L
Linus Torvalds 已提交
423 424

	get_thread_info tsk
425 426 427 428 429 430 431 432 433 434 435 436 437
	adr	tbl, sys_call_table		@ load syscall table pointer

#if defined(CONFIG_OABI_COMPAT)
	/*
	 * If the swi argument is zero, this is an EABI call and we do nothing.
	 *
	 * If this is an old ABI call, get the syscall number into scno and
	 * get the old ABI syscall table address.
	 */
	bics	r10, r10, #0xff000000
	eorne	scno, r10, #__NR_OABI_SYSCALL_BASE
	ldrne	tbl, =sys_oabi_call_table
#elif !defined(CONFIG_AEABI)
L
Linus Torvalds 已提交
438
	bic	scno, scno, #0xff000000		@ mask off SWI op-code
439
	eor	scno, scno, #__NR_SYSCALL_BASE	@ check OS number
440
#endif
441

N
Nicolas Pitre 已提交
442
	ldr	r10, [tsk, #TI_FLAGS]		@ check for syscall tracing
443
	stmdb	sp!, {r4, r5}			@ push fifth and sixth args
N
Nicolas Pitre 已提交
444 445 446 447 448 449 450 451 452 453 454

#ifdef CONFIG_SECCOMP
	tst	r10, #_TIF_SECCOMP
	beq	1f
	mov	r0, scno
	bl	__secure_computing	
	add	r0, sp, #S_R0 + S_OFF		@ pointer to regs
	ldmia	r0, {r0 - r3}			@ have to reload r0 - r3
1:
#endif

455
	tst	r10, #_TIF_SYSCALL_WORK		@ are we tracing syscalls?
L
Linus Torvalds 已提交
456 457 458
	bne	__sys_trace

	cmp	scno, #NR_syscalls		@ check upper syscall limit
459
	adr	lr, BSYM(ret_fast_syscall)	@ return address
L
Linus Torvalds 已提交
460 461 462 463
	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine

	add	r1, sp, #S_OFF
2:	mov	why, #0				@ no longer a real syscall
464 465
	cmp	scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
	eor	r0, scno, #__NR_SYSCALL_BASE	@ put OS number back
L
Linus Torvalds 已提交
466 467
	bcs	arm_syscall	
	b	sys_ni_syscall			@ not private func
468
ENDPROC(vector_swi)
L
Linus Torvalds 已提交
469 470 471 472 473 474

	/*
	 * This is the really slow path.  We're going to be doing
	 * context switches, and waiting for our parent to respond.
	 */
__sys_trace:
475
	mov	r2, scno
L
Linus Torvalds 已提交
476 477 478 479
	add	r1, sp, #S_OFF
	mov	r0, #0				@ trace entry [IP = 0]
	bl	syscall_trace

480
	adr	lr, BSYM(__sys_trace_return)	@ return address
481
	mov	scno, r0			@ syscall number (possibly new)
L
Linus Torvalds 已提交
482 483 484 485 486 487 488 489
	add	r1, sp, #S_R0 + S_OFF		@ pointer to regs
	cmp	scno, #NR_syscalls		@ check upper syscall limit
	ldmccia	r1, {r0 - r3}			@ have to reload r0 - r3
	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
	b	2b

__sys_trace_return:
	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
490
	mov	r2, scno
L
Linus Torvalds 已提交
491 492 493 494 495 496 497 498 499 500
	mov	r1, sp
	mov	r0, #1				@ trace exit [IP = 1]
	bl	syscall_trace
	b	ret_slow_syscall

	.align	5
#ifdef CONFIG_ALIGNMENT_TRAP
	.type	__cr_alignment, #object
__cr_alignment:
	.word	cr_alignment
501 502 503 504 505 506 507 508 509 510 511 512
#endif
	.ltorg

/*
 * This is the syscall table declaration for native ABI syscalls.
 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
 */
#define ABI(native, compat) native
#ifdef CONFIG_AEABI
#define OBSOLETE(syscall) sys_ni_syscall
#else
#define OBSOLETE(syscall) syscall
L
Linus Torvalds 已提交
513 514 515 516 517
#endif

	.type	sys_call_table, #object
ENTRY(sys_call_table)
#include "calls.S"
518 519
#undef ABI
#undef OBSOLETE
L
Linus Torvalds 已提交
520 521 522 523 524

/*============================================================================
 * Special system call wrappers
 */
@ r0 = syscall number
525
@ r8 = syscall table
L
Linus Torvalds 已提交
526
sys_syscall:
527
		bic	scno, r0, #__NR_OABI_SYSCALL_BASE
L
Linus Torvalds 已提交
528 529 530 531 532 533 534 535 536
		cmp	scno, #__NR_syscall - __NR_SYSCALL_BASE
		cmpne	scno, #NR_syscalls	@ check range
		stmloia	sp, {r5, r6}		@ shuffle args
		movlo	r0, r1
		movlo	r1, r2
		movlo	r2, r3
		movlo	r3, r4
		ldrlo	pc, [tbl, scno, lsl #2]
		b	sys_ni_syscall
537
ENDPROC(sys_syscall)
L
Linus Torvalds 已提交
538 539 540 541

sys_fork_wrapper:
		add	r0, sp, #S_OFF
		b	sys_fork
542
ENDPROC(sys_fork_wrapper)
L
Linus Torvalds 已提交
543 544 545 546

sys_vfork_wrapper:
		add	r0, sp, #S_OFF
		b	sys_vfork
547
ENDPROC(sys_vfork_wrapper)
L
Linus Torvalds 已提交
548 549 550 551

sys_execve_wrapper:
		add	r3, sp, #S_OFF
		b	sys_execve
552
ENDPROC(sys_execve_wrapper)
L
Linus Torvalds 已提交
553 554 555 556 557

sys_clone_wrapper:
		add	ip, sp, #S_OFF
		str	ip, [sp, #4]
		b	sys_clone
558
ENDPROC(sys_clone_wrapper)
L
Linus Torvalds 已提交
559 560 561

sys_sigreturn_wrapper:
		add	r0, sp, #S_OFF
A
Al Viro 已提交
562
		mov	why, #0		@ prevent syscall restart handling
L
Linus Torvalds 已提交
563
		b	sys_sigreturn
564
ENDPROC(sys_sigreturn_wrapper)
L
Linus Torvalds 已提交
565 566 567

sys_rt_sigreturn_wrapper:
		add	r0, sp, #S_OFF
A
Al Viro 已提交
568
		mov	why, #0		@ prevent syscall restart handling
L
Linus Torvalds 已提交
569
		b	sys_rt_sigreturn
570
ENDPROC(sys_rt_sigreturn_wrapper)
L
Linus Torvalds 已提交
571 572 573 574

sys_sigaltstack_wrapper:
		ldr	r2, [sp, #S_OFF + S_SP]
		b	do_sigaltstack
575
ENDPROC(sys_sigaltstack_wrapper)
L
Linus Torvalds 已提交
576

577 578 579 580
sys_statfs64_wrapper:
		teq	r1, #88
		moveq	r1, #84
		b	sys_statfs64
581
ENDPROC(sys_statfs64_wrapper)
582 583 584 585 586

sys_fstatfs64_wrapper:
		teq	r1, #88
		moveq	r1, #84
		b	sys_fstatfs64
587
ENDPROC(sys_fstatfs64_wrapper)
588

L
Linus Torvalds 已提交
589 590 591 592 593 594 595 596 597
/*
 * Note: off_4k (r5) is always units of 4K.  If we can't do the requested
 * offset, we return EINVAL.
 */
sys_mmap2:
#if PAGE_SHIFT > 12
		tst	r5, #PGOFF_MASK
		moveq	r5, r5, lsr #PAGE_SHIFT - 12
		streq	r5, [sp, #4]
A
Al Viro 已提交
598
		beq	sys_mmap_pgoff
L
Linus Torvalds 已提交
599
		mov	r0, #-EINVAL
R
Russell King 已提交
600
		mov	pc, lr
L
Linus Torvalds 已提交
601 602
#else
		str	r5, [sp, #4]
A
Al Viro 已提交
603
		b	sys_mmap_pgoff
L
Linus Torvalds 已提交
604
#endif
605
ENDPROC(sys_mmap2)
606 607

#ifdef CONFIG_OABI_COMPAT
608

609 610 611 612 613 614 615
/*
 * These are syscalls with argument register differences
 */

sys_oabi_pread64:
		stmia	sp, {r3, r4}
		b	sys_pread64
616
ENDPROC(sys_oabi_pread64)
617 618 619 620

sys_oabi_pwrite64:
		stmia	sp, {r3, r4}
		b	sys_pwrite64
621
ENDPROC(sys_oabi_pwrite64)
622 623 624 625 626

sys_oabi_truncate64:
		mov	r3, r2
		mov	r2, r1
		b	sys_truncate64
627
ENDPROC(sys_oabi_truncate64)
628 629 630 631 632

sys_oabi_ftruncate64:
		mov	r3, r2
		mov	r2, r1
		b	sys_ftruncate64
633
ENDPROC(sys_oabi_ftruncate64)
634 635 636 637 638 639

sys_oabi_readahead:
		str	r3, [sp]
		mov	r3, r2
		mov	r2, r1
		b	sys_readahead
640
ENDPROC(sys_oabi_readahead)
641

642 643 644 645 646 647 648 649 650 651 652 653 654
/*
 * Let's declare a second syscall table for old ABI binaries
 * using the compatibility syscall entries.
 */
#define ABI(native, compat) compat
#define OBSOLETE(syscall) syscall

	.type	sys_oabi_call_table, #object
ENTRY(sys_oabi_call_table)
#include "calls.S"
#undef ABI
#undef OBSOLETE

655 656
#endif