entry-common.S 13.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11
/*
 *  linux/arch/arm/kernel/entry-common.S
 *
 *  Copyright (C) 2000 Russell King
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <asm/unistd.h>
12
#include <asm/ftrace.h>
13
#include <asm/unwind.h>
L
Linus Torvalds 已提交
14

15 16 17 18 19 20 21
#ifdef CONFIG_NEED_RET_TO_USER
#include <mach/entry-macro.S>
#else
	.macro  arch_ret_to_user, tmp1, tmp2
	.endm
#endif

L
Linus Torvalds 已提交
22 23 24 25 26 27 28 29 30 31
#include "entry-header.S"


	.align	5
/*
 * This is the fast syscall return path.  We do as little as
 * possible here, and this includes saving r0 back into the SVC
 * stack.
 */
ret_fast_syscall:
32 33
 UNWIND(.fnstart	)
 UNWIND(.cantunwind	)
34
	disable_irq				@ disable interrupts
L
Linus Torvalds 已提交
35 36 37
	ldr	r1, [tsk, #TI_FLAGS]
	tst	r1, #_TIF_WORK_MASK
	bne	fast_work_pending
38 39 40
#if defined(CONFIG_IRQSOFF_TRACER)
	asm_trace_hardirqs_on
#endif
41

42 43 44
	/* perform architecture specific actions before user return */
	arch_ret_to_user r1, lr

45
	restore_user_regs fast = 1, offset = S_OFF
46
 UNWIND(.fnend		)
L
Linus Torvalds 已提交
47 48 49 50 51 52 53 54 55

/*
 * Ok, we need to do extra processing, enter the slow path.
 */
fast_work_pending:
	str	r0, [sp, #S_R0+S_OFF]!		@ returned r0
work_pending:
	tst	r1, #_TIF_NEED_RESCHED
	bne	work_resched
56 57 58
	/*
	 * TIF_SIGPENDING or TIF_NOTIFY_RESUME must've been set if we got here
	 */
59
	ldr	r2, [sp, #S_PSR]
L
Linus Torvalds 已提交
60
	mov	r0, sp				@ 'regs'
61 62
	tst	r2, #15				@ are we returning to user mode?
	bne	no_work_pending			@ no?  just leave, then...
L
Linus Torvalds 已提交
63
	mov	r2, why				@ 'syscall'
64 65
	tst	r1, #_TIF_SIGPENDING		@ delivering a signal?
	movne	why, #0				@ prevent further restarts
L
Linus Torvalds 已提交
66
	bl	do_notify_resume
67
	b	ret_slow_syscall		@ Check work again
L
Linus Torvalds 已提交
68 69 70 71 72 73 74 75

work_resched:
	bl	schedule
/*
 * "slow" syscall return path.  "why" tells us if this was a real syscall.
 */
ENTRY(ret_to_user)
ret_slow_syscall:
76
	disable_irq				@ disable interrupts
77
ENTRY(ret_to_user_from_irq)
L
Linus Torvalds 已提交
78 79 80 81
	ldr	r1, [tsk, #TI_FLAGS]
	tst	r1, #_TIF_WORK_MASK
	bne	work_pending
no_work_pending:
82 83 84
#if defined(CONFIG_IRQSOFF_TRACER)
	asm_trace_hardirqs_on
#endif
85 86 87
	/* perform architecture specific actions before user return */
	arch_ret_to_user r1, lr

88
	restore_user_regs fast = 0, offset = 0
89
ENDPROC(ret_to_user_from_irq)
90
ENDPROC(ret_to_user)
L
Linus Torvalds 已提交
91 92 93 94 95 96 97 98 99

/*
 * This is how we return from a fork.
 */
ENTRY(ret_from_fork)
	bl	schedule_tail
	get_thread_info tsk
	mov	why, #1
	b	ret_slow_syscall
100
ENDPROC(ret_from_fork)
L
Linus Torvalds 已提交
101

102 103
	.equ NR_syscalls,0
#define CALL(x) .equ NR_syscalls,NR_syscalls+1
L
Linus Torvalds 已提交
104
#include "calls.S"
105 106
#undef CALL
#define CALL(x) .long x
L
Linus Torvalds 已提交
107

108
#ifdef CONFIG_FUNCTION_TRACER
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
/*
 * When compiling with -pg, gcc inserts a call to the mcount routine at the
 * start of every function.  In mcount, apart from the function's address (in
 * lr), we need to get hold of the function's caller's address.
 *
 * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
 *
 *	bl	mcount
 *
 * These versions have the limitation that in order for the mcount routine to
 * be able to determine the function's caller's address, an APCS-style frame
 * pointer (which is set up with something like the code below) is required.
 *
 *	mov     ip, sp
 *	push    {fp, ip, lr, pc}
 *	sub     fp, ip, #4
 *
 * With EABI, these frame pointers are not available unless -mapcs-frame is
 * specified, and if building as Thumb-2, not even then.
 *
 * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
 * with call sites like:
 *
 *	push	{lr}
 *	bl	__gnu_mcount_nc
 *
 * With these compilers, frame pointers are not necessary.
 *
 * mcount can be thought of as a function called in the middle of a subroutine
 * call.  As such, it needs to be transparent for both the caller and the
 * callee: the original lr needs to be restored when leaving mcount, and no
 * registers should be clobbered.  (In the __gnu_mcount_nc implementation, we
 * clobber the ip register.  This is OK because the ARM calling convention
 * allows it to be clobbered in subroutines and doesn't use it to hold
 * parameters.)
144 145 146 147
 *
 * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
 * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
 * arch/arm/kernel/ftrace.c).
148
 */
149 150 151 152 153 154 155

#ifndef CONFIG_OLD_MCOUNT
#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
#endif
#endif

156 157 158 159 160
.macro mcount_adjust_addr rd, rn
	bic	\rd, \rn, #1		@ clear the Thumb bit if present
	sub	\rd, \rd, #MCOUNT_INSN_SIZE
.endm

161 162 163 164 165 166 167
.macro __mcount suffix
	mcount_enter
	ldr	r0, =ftrace_trace_function
	ldr	r2, [r0]
	adr	r0, .Lftrace_stub
	cmp	r0, r2
	bne	1f
168

169 170 171 172 173 174 175 176 177 178 179 180 181
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	ldr     r1, =ftrace_graph_return
	ldr     r2, [r1]
	cmp     r0, r2
	bne     ftrace_graph_caller\suffix

	ldr     r1, =ftrace_graph_entry
	ldr     r2, [r1]
	ldr     r0, =ftrace_graph_entry_stub
	cmp     r0, r2
	bne     ftrace_graph_caller\suffix
#endif

182
	mcount_exit
183

184
1: 	mcount_get_lr	r1			@ lr of instrumented func
185
	mcount_adjust_addr	r0, lr		@ instrumented function
186 187 188 189
	adr	lr, BSYM(2f)
	mov	pc, r2
2:	mcount_exit
.endm
A
Abhishek Sagar 已提交
190

191 192
.macro __ftrace_caller suffix
	mcount_enter
A
Abhishek Sagar 已提交
193

194
	mcount_get_lr	r1			@ lr of instrumented func
195
	mcount_adjust_addr	r0, lr		@ instrumented function
196 197 198

	.globl ftrace_call\suffix
ftrace_call\suffix:
199
	bl	ftrace_stub
200

201 202 203 204 205 206
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	.globl ftrace_graph_call\suffix
ftrace_graph_call\suffix:
	mov	r0, r0
#endif

207 208
	mcount_exit
.endm
A
Abhishek Sagar 已提交
209

210 211
.macro __ftrace_graph_caller
	sub	r0, fp, #4		@ &lr of instrumented routine (&parent)
212 213 214
#ifdef CONFIG_DYNAMIC_FTRACE
	@ called from __ftrace_caller, saved in mcount_enter
	ldr	r1, [sp, #16]		@ instrumented routine (func)
215
	mcount_adjust_addr	r1, r1
216 217
#else
	@ called from __mcount, untouched in lr
218
	mcount_adjust_addr	r1, lr	@ instrumented routine (func)
219
#endif
220 221 222 223
	mov	r2, fp			@ frame pointer
	bl	prepare_ftrace_return
	mcount_exit
.endm
A
Abhishek Sagar 已提交
224

225
#ifdef CONFIG_OLD_MCOUNT
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
/*
 * mcount
 */

.macro mcount_enter
	stmdb	sp!, {r0-r3, lr}
.endm

.macro mcount_get_lr reg
	ldr	\reg, [fp, #-4]
.endm

.macro mcount_exit
	ldr	lr, [fp, #-4]
	ldmia	sp!, {r0-r3, pc}
.endm

243
ENTRY(mcount)
244
#ifdef CONFIG_DYNAMIC_FTRACE
245 246 247
	stmdb	sp!, {lr}
	ldr	lr, [fp, #-4]
	ldmia	sp!, {pc}
248 249 250
#else
	__mcount _old
#endif
251
ENDPROC(mcount)
A
Abhishek Sagar 已提交
252

253
#ifdef CONFIG_DYNAMIC_FTRACE
254
ENTRY(ftrace_caller_old)
255
	__ftrace_caller _old
256 257
ENDPROC(ftrace_caller_old)
#endif
A
Abhishek Sagar 已提交
258

259 260 261 262 263
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller_old)
	__ftrace_graph_caller
ENDPROC(ftrace_graph_caller_old)
#endif
A
Abhishek Sagar 已提交
264

265 266 267 268
.purgem mcount_enter
.purgem mcount_get_lr
.purgem mcount_exit
#endif
A
Abhishek Sagar 已提交
269

270 271 272 273 274
/*
 * __gnu_mcount_nc
 */

.macro mcount_enter
275
	stmdb	sp!, {r0-r3, lr}
276 277 278 279 280 281 282
.endm

.macro mcount_get_lr reg
	ldr	\reg, [sp, #20]
.endm

.macro mcount_exit
283 284
	ldmia	sp!, {r0-r3, ip, lr}
	mov	pc, ip
285
.endm
286

287 288 289 290
ENTRY(__gnu_mcount_nc)
#ifdef CONFIG_DYNAMIC_FTRACE
	mov	ip, lr
	ldmia	sp!, {lr}
291
	mov	pc, ip
292 293 294
#else
	__mcount
#endif
295
ENDPROC(__gnu_mcount_nc)
296

297 298 299 300
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(ftrace_caller)
	__ftrace_caller
ENDPROC(ftrace_caller)
301
#endif
A
Abhishek Sagar 已提交
302

303 304 305 306
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
	__ftrace_graph_caller
ENDPROC(ftrace_graph_caller)
307
#endif
A
Abhishek Sagar 已提交
308

309 310 311
.purgem mcount_enter
.purgem mcount_get_lr
.purgem mcount_exit
A
Abhishek Sagar 已提交
312

313 314 315 316 317 318 319 320 321 322
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	.globl return_to_handler
return_to_handler:
	stmdb	sp!, {r0-r3}
	mov	r0, fp			@ frame pointer
	bl	ftrace_return_to_handler
	mov	lr, r0			@ r0 has real ret addr
	ldmia	sp!, {r0-r3}
	mov	pc, lr
#endif
A
Abhishek Sagar 已提交
323

324
ENTRY(ftrace_stub)
325
.Lftrace_stub:
326
	mov	pc, lr
327
ENDPROC(ftrace_stub)
A
Abhishek Sagar 已提交
328

329
#endif /* CONFIG_FUNCTION_TRACER */
A
Abhishek Sagar 已提交
330

L
Linus Torvalds 已提交
331 332 333 334 335 336 337
/*=============================================================================
 * SWI handler
 *-----------------------------------------------------------------------------
 */

	.align	5
ENTRY(vector_swi)
338 339
	sub	sp, sp, #S_FRAME_SIZE
	stmia	sp, {r0 - r12}			@ Calling r0 - r12
340 341 342 343
 ARM(	add	r8, sp, #S_PC		)
 ARM(	stmdb	r8, {sp, lr}^		)	@ Calling sp, lr
 THUMB(	mov	r8, sp			)
 THUMB(	store_user_sp_lr r8, r10, S_SP	)	@ calling sp, lr
344 345 346 347
	mrs	r8, spsr			@ called from non-FIQ mode, so ok.
	str	lr, [sp, #S_PC]			@ Save calling PC
	str	r8, [sp, #S_PSR]		@ Save CPSR
	str	r0, [sp, #S_OLD_R0]		@ Save OLD_R0
L
Linus Torvalds 已提交
348
	zero_fp
349 350 351 352

	/*
	 * Get the system call number.
	 */
353

354
#if defined(CONFIG_OABI_COMPAT)
355

356 357 358 359 360 361 362 363 364 365 366
	/*
	 * If we have CONFIG_OABI_COMPAT then we need to look at the swi
	 * value to determine if it is an EABI or an old ABI call.
	 */
#ifdef CONFIG_ARM_THUMB
	tst	r8, #PSR_T_BIT
	movne	r10, #0				@ no thumb OABI emulation
	ldreq	r10, [lr, #-4]			@ get SWI instruction
#else
	ldr	r10, [lr, #-4]			@ get SWI instruction
#endif
367 368 369
#ifdef CONFIG_CPU_ENDIAN_BE8
	rev	r10, r10			@ little endian instruction
#endif
370 371 372 373 374 375

#elif defined(CONFIG_AEABI)

	/*
	 * Pure EABI user space always put syscall number into scno (r7).
	 */
376
#elif defined(CONFIG_ARM_THUMB)
377
	/* Legacy ABI only, possibly thumb mode. */
378 379 380
	tst	r8, #PSR_T_BIT			@ this is SPSR from save_user_regs
	addne	scno, r7, #__NR_SYSCALL_BASE	@ put OS number in
	ldreq	scno, [lr, #-4]
381

382
#else
383
	/* Legacy ABI only. */
384 385
	ldr	scno, [lr, #-4]			@ get SWI instruction
#endif
L
Linus Torvalds 已提交
386 387 388 389 390 391

#ifdef CONFIG_ALIGNMENT_TRAP
	ldr	ip, __cr_alignment
	ldr	ip, [ip]
	mcr	p15, 0, ip, c1, c0		@ update control register
#endif
392
	enable_irq
L
Linus Torvalds 已提交
393 394

	get_thread_info tsk
395 396 397 398 399 400 401 402 403 404 405 406 407
	adr	tbl, sys_call_table		@ load syscall table pointer

#if defined(CONFIG_OABI_COMPAT)
	/*
	 * If the swi argument is zero, this is an EABI call and we do nothing.
	 *
	 * If this is an old ABI call, get the syscall number into scno and
	 * get the old ABI syscall table address.
	 */
	bics	r10, r10, #0xff000000
	eorne	scno, r10, #__NR_OABI_SYSCALL_BASE
	ldrne	tbl, =sys_oabi_call_table
#elif !defined(CONFIG_AEABI)
L
Linus Torvalds 已提交
408
	bic	scno, scno, #0xff000000		@ mask off SWI op-code
409
	eor	scno, scno, #__NR_SYSCALL_BASE	@ check OS number
410
#endif
411

N
Nicolas Pitre 已提交
412
	ldr	r10, [tsk, #TI_FLAGS]		@ check for syscall tracing
413
	stmdb	sp!, {r4, r5}			@ push fifth and sixth args
N
Nicolas Pitre 已提交
414 415 416 417 418 419 420 421 422 423 424

#ifdef CONFIG_SECCOMP
	tst	r10, #_TIF_SECCOMP
	beq	1f
	mov	r0, scno
	bl	__secure_computing	
	add	r0, sp, #S_R0 + S_OFF		@ pointer to regs
	ldmia	r0, {r0 - r3}			@ have to reload r0 - r3
1:
#endif

425
	tst	r10, #_TIF_SYSCALL_WORK		@ are we tracing syscalls?
L
Linus Torvalds 已提交
426 427 428
	bne	__sys_trace

	cmp	scno, #NR_syscalls		@ check upper syscall limit
429
	adr	lr, BSYM(ret_fast_syscall)	@ return address
L
Linus Torvalds 已提交
430 431 432 433
	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine

	add	r1, sp, #S_OFF
2:	mov	why, #0				@ no longer a real syscall
434 435
	cmp	scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
	eor	r0, scno, #__NR_SYSCALL_BASE	@ put OS number back
L
Linus Torvalds 已提交
436 437
	bcs	arm_syscall	
	b	sys_ni_syscall			@ not private func
438
ENDPROC(vector_swi)
L
Linus Torvalds 已提交
439 440 441 442 443 444

	/*
	 * This is the really slow path.  We're going to be doing
	 * context switches, and waiting for our parent to respond.
	 */
__sys_trace:
445
	mov	r2, scno
L
Linus Torvalds 已提交
446 447 448 449
	add	r1, sp, #S_OFF
	mov	r0, #0				@ trace entry [IP = 0]
	bl	syscall_trace

450
	adr	lr, BSYM(__sys_trace_return)	@ return address
451
	mov	scno, r0			@ syscall number (possibly new)
L
Linus Torvalds 已提交
452 453 454 455 456 457 458 459
	add	r1, sp, #S_R0 + S_OFF		@ pointer to regs
	cmp	scno, #NR_syscalls		@ check upper syscall limit
	ldmccia	r1, {r0 - r3}			@ have to reload r0 - r3
	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
	b	2b

__sys_trace_return:
	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
460
	mov	r2, scno
L
Linus Torvalds 已提交
461 462 463 464 465 466 467 468 469 470
	mov	r1, sp
	mov	r0, #1				@ trace exit [IP = 1]
	bl	syscall_trace
	b	ret_slow_syscall

	.align	5
#ifdef CONFIG_ALIGNMENT_TRAP
	.type	__cr_alignment, #object
__cr_alignment:
	.word	cr_alignment
471 472 473 474 475 476 477 478 479 480 481 482
#endif
	.ltorg

/*
 * This is the syscall table declaration for native ABI syscalls.
 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
 */
#define ABI(native, compat) native
#ifdef CONFIG_AEABI
#define OBSOLETE(syscall) sys_ni_syscall
#else
#define OBSOLETE(syscall) syscall
L
Linus Torvalds 已提交
483 484 485 486 487
#endif

	.type	sys_call_table, #object
ENTRY(sys_call_table)
#include "calls.S"
488 489
#undef ABI
#undef OBSOLETE
L
Linus Torvalds 已提交
490 491 492 493 494

/*============================================================================
 * Special system call wrappers
 */
@ r0 = syscall number
495
@ r8 = syscall table
L
Linus Torvalds 已提交
496
sys_syscall:
497
		bic	scno, r0, #__NR_OABI_SYSCALL_BASE
L
Linus Torvalds 已提交
498 499 500 501 502 503 504 505 506
		cmp	scno, #__NR_syscall - __NR_SYSCALL_BASE
		cmpne	scno, #NR_syscalls	@ check range
		stmloia	sp, {r5, r6}		@ shuffle args
		movlo	r0, r1
		movlo	r1, r2
		movlo	r2, r3
		movlo	r3, r4
		ldrlo	pc, [tbl, scno, lsl #2]
		b	sys_ni_syscall
507
ENDPROC(sys_syscall)
L
Linus Torvalds 已提交
508 509 510 511

sys_fork_wrapper:
		add	r0, sp, #S_OFF
		b	sys_fork
512
ENDPROC(sys_fork_wrapper)
L
Linus Torvalds 已提交
513 514 515 516

sys_vfork_wrapper:
		add	r0, sp, #S_OFF
		b	sys_vfork
517
ENDPROC(sys_vfork_wrapper)
L
Linus Torvalds 已提交
518 519 520 521

sys_execve_wrapper:
		add	r3, sp, #S_OFF
		b	sys_execve
522
ENDPROC(sys_execve_wrapper)
L
Linus Torvalds 已提交
523 524 525 526 527

sys_clone_wrapper:
		add	ip, sp, #S_OFF
		str	ip, [sp, #4]
		b	sys_clone
528
ENDPROC(sys_clone_wrapper)
L
Linus Torvalds 已提交
529 530 531

sys_sigreturn_wrapper:
		add	r0, sp, #S_OFF
A
Al Viro 已提交
532
		mov	why, #0		@ prevent syscall restart handling
L
Linus Torvalds 已提交
533
		b	sys_sigreturn
534
ENDPROC(sys_sigreturn_wrapper)
L
Linus Torvalds 已提交
535 536 537

sys_rt_sigreturn_wrapper:
		add	r0, sp, #S_OFF
A
Al Viro 已提交
538
		mov	why, #0		@ prevent syscall restart handling
L
Linus Torvalds 已提交
539
		b	sys_rt_sigreturn
540
ENDPROC(sys_rt_sigreturn_wrapper)
L
Linus Torvalds 已提交
541 542 543 544

sys_sigaltstack_wrapper:
		ldr	r2, [sp, #S_OFF + S_SP]
		b	do_sigaltstack
545
ENDPROC(sys_sigaltstack_wrapper)
L
Linus Torvalds 已提交
546

547 548 549 550
sys_statfs64_wrapper:
		teq	r1, #88
		moveq	r1, #84
		b	sys_statfs64
551
ENDPROC(sys_statfs64_wrapper)
552 553 554 555 556

sys_fstatfs64_wrapper:
		teq	r1, #88
		moveq	r1, #84
		b	sys_fstatfs64
557
ENDPROC(sys_fstatfs64_wrapper)
558

L
Linus Torvalds 已提交
559 560 561 562 563 564 565 566 567
/*
 * Note: off_4k (r5) is always units of 4K.  If we can't do the requested
 * offset, we return EINVAL.
 */
sys_mmap2:
#if PAGE_SHIFT > 12
		tst	r5, #PGOFF_MASK
		moveq	r5, r5, lsr #PAGE_SHIFT - 12
		streq	r5, [sp, #4]
A
Al Viro 已提交
568
		beq	sys_mmap_pgoff
L
Linus Torvalds 已提交
569
		mov	r0, #-EINVAL
R
Russell King 已提交
570
		mov	pc, lr
L
Linus Torvalds 已提交
571 572
#else
		str	r5, [sp, #4]
A
Al Viro 已提交
573
		b	sys_mmap_pgoff
L
Linus Torvalds 已提交
574
#endif
575
ENDPROC(sys_mmap2)
576 577

#ifdef CONFIG_OABI_COMPAT
578

579 580 581 582 583 584 585
/*
 * These are syscalls with argument register differences
 */

sys_oabi_pread64:
		stmia	sp, {r3, r4}
		b	sys_pread64
586
ENDPROC(sys_oabi_pread64)
587 588 589 590

sys_oabi_pwrite64:
		stmia	sp, {r3, r4}
		b	sys_pwrite64
591
ENDPROC(sys_oabi_pwrite64)
592 593 594 595 596

sys_oabi_truncate64:
		mov	r3, r2
		mov	r2, r1
		b	sys_truncate64
597
ENDPROC(sys_oabi_truncate64)
598 599 600 601 602

sys_oabi_ftruncate64:
		mov	r3, r2
		mov	r2, r1
		b	sys_ftruncate64
603
ENDPROC(sys_oabi_ftruncate64)
604 605 606 607 608 609

sys_oabi_readahead:
		str	r3, [sp]
		mov	r3, r2
		mov	r2, r1
		b	sys_readahead
610
ENDPROC(sys_oabi_readahead)
611

612 613 614 615 616 617 618 619 620 621 622 623 624
/*
 * Let's declare a second syscall table for old ABI binaries
 * using the compatibility syscall entries.
 */
#define ABI(native, compat) compat
#define OBSOLETE(syscall) syscall

	.type	sys_oabi_call_table, #object
ENTRY(sys_oabi_call_table)
#include "calls.S"
#undef ABI
#undef OBSOLETE

625 626
#endif