entry-common.S 14.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11
/*
 *  linux/arch/arm/kernel/entry-common.S
 *
 *  Copyright (C) 2000 Russell King
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <asm/unistd.h>
12
#include <asm/ftrace.h>
13
#include <asm/unwind.h>
L
Linus Torvalds 已提交
14

15 16 17 18 19 20 21
#ifdef CONFIG_NEED_RET_TO_USER
#include <mach/entry-macro.S>
#else
	.macro  arch_ret_to_user, tmp1, tmp2
	.endm
#endif

L
Linus Torvalds 已提交
22 23 24 25 26 27 28 29 30 31
#include "entry-header.S"


	.align	5
/*
 * This is the fast syscall return path.  We do as little as
 * possible here, and this includes saving r0 back into the SVC
 * stack.
 */
ret_fast_syscall:
32 33
 UNWIND(.fnstart	)
 UNWIND(.cantunwind	)
34
	disable_irq				@ disable interrupts
L
Linus Torvalds 已提交
35 36 37
	ldr	r1, [tsk, #TI_FLAGS]
	tst	r1, #_TIF_WORK_MASK
	bne	fast_work_pending
38
	asm_trace_hardirqs_on
39

40 41
	/* perform architecture specific actions before user return */
	arch_ret_to_user r1, lr
42
	ct_user_enter
43

44
	restore_user_regs fast = 1, offset = S_OFF
45
 UNWIND(.fnend		)
L
Linus Torvalds 已提交
46 47 48 49 50 51 52 53 54

/*
 * Ok, we need to do extra processing, enter the slow path.
 */
fast_work_pending:
	str	r0, [sp, #S_R0+S_OFF]!		@ returned r0
work_pending:
	mov	r0, sp				@ 'regs'
	mov	r2, why				@ 'syscall'
55
	bl	do_work_pending
56
	cmp	r0, #0
57
	beq	no_work_pending
58
	movlt	scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
59 60 61
	ldmia	sp, {r0 - r6}			@ have to reload r0 - r6
	b	local_restart			@ ... and off we go

L
Linus Torvalds 已提交
62 63 64 65 66
/*
 * "slow" syscall return path.  "why" tells us if this was a real syscall.
 */
ENTRY(ret_to_user)
ret_slow_syscall:
67
	disable_irq				@ disable interrupts
68
ENTRY(ret_to_user_from_irq)
L
Linus Torvalds 已提交
69 70 71 72
	ldr	r1, [tsk, #TI_FLAGS]
	tst	r1, #_TIF_WORK_MASK
	bne	work_pending
no_work_pending:
73
	asm_trace_hardirqs_on
74

75 76
	/* perform architecture specific actions before user return */
	arch_ret_to_user r1, lr
77
	ct_user_enter save = 0
78

79
	restore_user_regs fast = 0, offset = 0
80
ENDPROC(ret_to_user_from_irq)
81
ENDPROC(ret_to_user)
L
Linus Torvalds 已提交
82 83 84 85 86 87

/*
 * This is how we return from a fork.
 */
ENTRY(ret_from_fork)
	bl	schedule_tail
88 89
	cmp	r5, #0
	movne	r0, r4
90
	adrne	lr, BSYM(1f)
91
	movne	pc, r5
92
1:	get_thread_info tsk
L
Linus Torvalds 已提交
93
	b	ret_slow_syscall
94
ENDPROC(ret_from_fork)
L
Linus Torvalds 已提交
95

96 97
	.equ NR_syscalls,0
#define CALL(x) .equ NR_syscalls,NR_syscalls+1
L
Linus Torvalds 已提交
98
#include "calls.S"
99 100 101 102 103 104 105 106 107

/*
 * Ensure that the system call table is equal to __NR_syscalls,
 * which is the value the rest of the system sees
 */
.ifne NR_syscalls - __NR_syscalls
.error "__NR_syscalls is not equal to the size of the syscall table"
.endif

108 109
#undef CALL
#define CALL(x) .long x
L
Linus Torvalds 已提交
110

111
#ifdef CONFIG_FUNCTION_TRACER
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
/*
 * When compiling with -pg, gcc inserts a call to the mcount routine at the
 * start of every function.  In mcount, apart from the function's address (in
 * lr), we need to get hold of the function's caller's address.
 *
 * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
 *
 *	bl	mcount
 *
 * These versions have the limitation that in order for the mcount routine to
 * be able to determine the function's caller's address, an APCS-style frame
 * pointer (which is set up with something like the code below) is required.
 *
 *	mov     ip, sp
 *	push    {fp, ip, lr, pc}
 *	sub     fp, ip, #4
 *
 * With EABI, these frame pointers are not available unless -mapcs-frame is
 * specified, and if building as Thumb-2, not even then.
 *
 * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
 * with call sites like:
 *
 *	push	{lr}
 *	bl	__gnu_mcount_nc
 *
 * With these compilers, frame pointers are not necessary.
 *
 * mcount can be thought of as a function called in the middle of a subroutine
 * call.  As such, it needs to be transparent for both the caller and the
 * callee: the original lr needs to be restored when leaving mcount, and no
 * registers should be clobbered.  (In the __gnu_mcount_nc implementation, we
 * clobber the ip register.  This is OK because the ARM calling convention
 * allows it to be clobbered in subroutines and doesn't use it to hold
 * parameters.)
147 148 149 150
 *
 * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
 * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
 * arch/arm/kernel/ftrace.c).
151
 */
152 153 154 155 156 157 158

#ifndef CONFIG_OLD_MCOUNT
#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
#endif
#endif

159 160 161 162 163
.macro mcount_adjust_addr rd, rn
	bic	\rd, \rn, #1		@ clear the Thumb bit if present
	sub	\rd, \rd, #MCOUNT_INSN_SIZE
.endm

164 165 166 167 168 169 170
.macro __mcount suffix
	mcount_enter
	ldr	r0, =ftrace_trace_function
	ldr	r2, [r0]
	adr	r0, .Lftrace_stub
	cmp	r0, r2
	bne	1f
171

172 173 174 175 176 177 178 179 180 181 182 183 184
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	ldr     r1, =ftrace_graph_return
	ldr     r2, [r1]
	cmp     r0, r2
	bne     ftrace_graph_caller\suffix

	ldr     r1, =ftrace_graph_entry
	ldr     r2, [r1]
	ldr     r0, =ftrace_graph_entry_stub
	cmp     r0, r2
	bne     ftrace_graph_caller\suffix
#endif

185
	mcount_exit
186

187
1: 	mcount_get_lr	r1			@ lr of instrumented func
188
	mcount_adjust_addr	r0, lr		@ instrumented function
189 190 191 192
	adr	lr, BSYM(2f)
	mov	pc, r2
2:	mcount_exit
.endm
A
Abhishek Sagar 已提交
193

194 195
.macro __ftrace_caller suffix
	mcount_enter
A
Abhishek Sagar 已提交
196

197
	mcount_get_lr	r1			@ lr of instrumented func
198
	mcount_adjust_addr	r0, lr		@ instrumented function
199 200 201

	.globl ftrace_call\suffix
ftrace_call\suffix:
202
	bl	ftrace_stub
203

204 205 206 207 208 209
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	.globl ftrace_graph_call\suffix
ftrace_graph_call\suffix:
	mov	r0, r0
#endif

210 211
	mcount_exit
.endm
A
Abhishek Sagar 已提交
212

213 214
.macro __ftrace_graph_caller
	sub	r0, fp, #4		@ &lr of instrumented routine (&parent)
215 216 217
#ifdef CONFIG_DYNAMIC_FTRACE
	@ called from __ftrace_caller, saved in mcount_enter
	ldr	r1, [sp, #16]		@ instrumented routine (func)
218
	mcount_adjust_addr	r1, r1
219 220
#else
	@ called from __mcount, untouched in lr
221
	mcount_adjust_addr	r1, lr	@ instrumented routine (func)
222
#endif
223 224 225 226
	mov	r2, fp			@ frame pointer
	bl	prepare_ftrace_return
	mcount_exit
.endm
A
Abhishek Sagar 已提交
227

228
#ifdef CONFIG_OLD_MCOUNT
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
/*
 * mcount
 */

.macro mcount_enter
	stmdb	sp!, {r0-r3, lr}
.endm

.macro mcount_get_lr reg
	ldr	\reg, [fp, #-4]
.endm

.macro mcount_exit
	ldr	lr, [fp, #-4]
	ldmia	sp!, {r0-r3, pc}
.endm

246
ENTRY(mcount)
247
#ifdef CONFIG_DYNAMIC_FTRACE
248 249 250
	stmdb	sp!, {lr}
	ldr	lr, [fp, #-4]
	ldmia	sp!, {pc}
251 252 253
#else
	__mcount _old
#endif
254
ENDPROC(mcount)
A
Abhishek Sagar 已提交
255

256
#ifdef CONFIG_DYNAMIC_FTRACE
257
ENTRY(ftrace_caller_old)
258
	__ftrace_caller _old
259 260
ENDPROC(ftrace_caller_old)
#endif
A
Abhishek Sagar 已提交
261

262 263 264 265 266
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller_old)
	__ftrace_graph_caller
ENDPROC(ftrace_graph_caller_old)
#endif
A
Abhishek Sagar 已提交
267

268 269 270 271
.purgem mcount_enter
.purgem mcount_get_lr
.purgem mcount_exit
#endif
A
Abhishek Sagar 已提交
272

273 274 275 276 277
/*
 * __gnu_mcount_nc
 */

.macro mcount_enter
278 279 280 281 282
/*
 * This pad compensates for the push {lr} at the call site.  Note that we are
 * unable to unwind through a function which does not otherwise save its lr.
 */
 UNWIND(.pad	#4)
283
	stmdb	sp!, {r0-r3, lr}
284
 UNWIND(.save	{r0-r3, lr})
285 286 287 288 289 290 291
.endm

.macro mcount_get_lr reg
	ldr	\reg, [sp, #20]
.endm

.macro mcount_exit
292 293
	ldmia	sp!, {r0-r3, ip, lr}
	mov	pc, ip
294
.endm
295

296
ENTRY(__gnu_mcount_nc)
297
UNWIND(.fnstart)
298 299 300
#ifdef CONFIG_DYNAMIC_FTRACE
	mov	ip, lr
	ldmia	sp!, {lr}
301
	mov	pc, ip
302 303 304
#else
	__mcount
#endif
305
UNWIND(.fnend)
306
ENDPROC(__gnu_mcount_nc)
307

308 309
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(ftrace_caller)
310
UNWIND(.fnstart)
311
	__ftrace_caller
312
UNWIND(.fnend)
313
ENDPROC(ftrace_caller)
314
#endif
A
Abhishek Sagar 已提交
315

316 317
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
318
UNWIND(.fnstart)
319
	__ftrace_graph_caller
320
UNWIND(.fnend)
321
ENDPROC(ftrace_graph_caller)
322
#endif
A
Abhishek Sagar 已提交
323

324 325 326
.purgem mcount_enter
.purgem mcount_get_lr
.purgem mcount_exit
A
Abhishek Sagar 已提交
327

328 329 330 331 332 333 334 335 336 337
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	.globl return_to_handler
return_to_handler:
	stmdb	sp!, {r0-r3}
	mov	r0, fp			@ frame pointer
	bl	ftrace_return_to_handler
	mov	lr, r0			@ r0 has real ret addr
	ldmia	sp!, {r0-r3}
	mov	pc, lr
#endif
A
Abhishek Sagar 已提交
338

339
ENTRY(ftrace_stub)
340
.Lftrace_stub:
341
	mov	pc, lr
342
ENDPROC(ftrace_stub)
A
Abhishek Sagar 已提交
343

344
#endif /* CONFIG_FUNCTION_TRACER */
A
Abhishek Sagar 已提交
345

L
Linus Torvalds 已提交
346 347 348 349 350 351 352
/*=============================================================================
 * SWI handler
 *-----------------------------------------------------------------------------
 */

	.align	5
ENTRY(vector_swi)
353 354 355
#ifdef CONFIG_CPU_V7M
	v7m_exception_entry
#else
356 357
	sub	sp, sp, #S_FRAME_SIZE
	stmia	sp, {r0 - r12}			@ Calling r0 - r12
358 359 360 361
 ARM(	add	r8, sp, #S_PC		)
 ARM(	stmdb	r8, {sp, lr}^		)	@ Calling sp, lr
 THUMB(	mov	r8, sp			)
 THUMB(	store_user_sp_lr r8, r10, S_SP	)	@ calling sp, lr
362 363 364 365
	mrs	r8, spsr			@ called from non-FIQ mode, so ok.
	str	lr, [sp, #S_PC]			@ Save calling PC
	str	r8, [sp, #S_PSR]		@ Save CPSR
	str	r0, [sp, #S_OLD_R0]		@ Save OLD_R0
366
#endif
L
Linus Torvalds 已提交
367
	zero_fp
368

369 370 371 372 373 374 375 376 377 378
#ifdef CONFIG_ALIGNMENT_TRAP
	ldr	ip, __cr_alignment
	ldr	ip, [ip]
	mcr	p15, 0, ip, c1, c0		@ update control register
#endif

	enable_irq
	ct_user_exit
	get_thread_info tsk

379 380 381
	/*
	 * Get the system call number.
	 */
382

383
#if defined(CONFIG_OABI_COMPAT)
384

385 386 387 388 389 390 391
	/*
	 * If we have CONFIG_OABI_COMPAT then we need to look at the swi
	 * value to determine if it is an EABI or an old ABI call.
	 */
#ifdef CONFIG_ARM_THUMB
	tst	r8, #PSR_T_BIT
	movne	r10, #0				@ no thumb OABI emulation
392
 USER(	ldreq	r10, [lr, #-4]		)	@ get SWI instruction
393
#else
394
 USER(	ldr	r10, [lr, #-4]		)	@ get SWI instruction
395
#endif
396 397 398
#ifdef CONFIG_CPU_ENDIAN_BE8
	rev	r10, r10			@ little endian instruction
#endif
399 400 401 402 403 404

#elif defined(CONFIG_AEABI)

	/*
	 * Pure EABI user space always put syscall number into scno (r7).
	 */
405
#elif defined(CONFIG_ARM_THUMB)
406
	/* Legacy ABI only, possibly thumb mode. */
407 408
	tst	r8, #PSR_T_BIT			@ this is SPSR from save_user_regs
	addne	scno, r7, #__NR_SYSCALL_BASE	@ put OS number in
409
 USER(	ldreq	scno, [lr, #-4]		)
410

411
#else
412
	/* Legacy ABI only. */
413
 USER(	ldr	scno, [lr, #-4]		)	@ get SWI instruction
414
#endif
L
Linus Torvalds 已提交
415

416 417 418 419 420 421 422 423 424 425 426 427 428
	adr	tbl, sys_call_table		@ load syscall table pointer

#if defined(CONFIG_OABI_COMPAT)
	/*
	 * If the swi argument is zero, this is an EABI call and we do nothing.
	 *
	 * If this is an old ABI call, get the syscall number into scno and
	 * get the old ABI syscall table address.
	 */
	bics	r10, r10, #0xff000000
	eorne	scno, r10, #__NR_OABI_SYSCALL_BASE
	ldrne	tbl, =sys_oabi_call_table
#elif !defined(CONFIG_AEABI)
L
Linus Torvalds 已提交
429
	bic	scno, scno, #0xff000000		@ mask off SWI op-code
430
	eor	scno, scno, #__NR_SYSCALL_BASE	@ check OS number
431
#endif
432

433
local_restart:
N
Nicolas Pitre 已提交
434
	ldr	r10, [tsk, #TI_FLAGS]		@ check for syscall tracing
435
	stmdb	sp!, {r4, r5}			@ push fifth and sixth args
N
Nicolas Pitre 已提交
436

437
	tst	r10, #_TIF_SYSCALL_WORK		@ are we tracing syscalls?
L
Linus Torvalds 已提交
438 439 440
	bne	__sys_trace

	cmp	scno, #NR_syscalls		@ check upper syscall limit
441
	adr	lr, BSYM(ret_fast_syscall)	@ return address
L
Linus Torvalds 已提交
442 443 444
	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine

	add	r1, sp, #S_OFF
445
2:	cmp	scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
446
	eor	r0, scno, #__NR_SYSCALL_BASE	@ put OS number back
447
	bcs	arm_syscall
448
	mov	why, #0				@ no longer a real syscall
L
Linus Torvalds 已提交
449
	b	sys_ni_syscall			@ not private func
450 451 452 453 454 455 456 457 458 459 460 461 462 463 464

#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
	/*
	 * We failed to handle a fault trying to access the page
	 * containing the swi instruction, but we're not really in a
	 * position to return -EFAULT. Instead, return back to the
	 * instruction and re-enter the user fault handling path trying
	 * to page it in. This will likely result in sending SEGV to the
	 * current task.
	 */
9001:
	sub	lr, lr, #4
	str	lr, [sp, #S_PC]
	b	ret_fast_syscall
#endif
465
ENDPROC(vector_swi)
L
Linus Torvalds 已提交
466 467 468 469 470 471

	/*
	 * This is the really slow path.  We're going to be doing
	 * context switches, and waiting for our parent to respond.
	 */
__sys_trace:
472 473 474
	mov	r1, scno
	add	r0, sp, #S_OFF
	bl	syscall_trace_enter
L
Linus Torvalds 已提交
475

476
	adr	lr, BSYM(__sys_trace_return)	@ return address
477
	mov	scno, r0			@ syscall number (possibly new)
L
Linus Torvalds 已提交
478 479
	add	r1, sp, #S_R0 + S_OFF		@ pointer to regs
	cmp	scno, #NR_syscalls		@ check upper syscall limit
480 481
	ldmccia	r1, {r0 - r6}			@ have to reload r0 - r6
	stmccia	sp, {r4, r5}			@ and update the stack args
L
Linus Torvalds 已提交
482
	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
483 484 485 486
	cmp	scno, #-1			@ skip the syscall?
	bne	2b
	add	sp, sp, #S_OFF			@ restore stack
	b	ret_slow_syscall
L
Linus Torvalds 已提交
487 488 489

__sys_trace_return:
	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
490 491
	mov	r0, sp
	bl	syscall_trace_exit
L
Linus Torvalds 已提交
492 493 494 495 496 497 498
	b	ret_slow_syscall

	.align	5
#ifdef CONFIG_ALIGNMENT_TRAP
	.type	__cr_alignment, #object
__cr_alignment:
	.word	cr_alignment
499 500 501 502 503 504 505 506 507 508 509 510
#endif
	.ltorg

/*
 * This is the syscall table declaration for native ABI syscalls.
 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
 */
#define ABI(native, compat) native
#ifdef CONFIG_AEABI
#define OBSOLETE(syscall) sys_ni_syscall
#else
#define OBSOLETE(syscall) syscall
L
Linus Torvalds 已提交
511 512 513 514 515
#endif

	.type	sys_call_table, #object
ENTRY(sys_call_table)
#include "calls.S"
516 517
#undef ABI
#undef OBSOLETE
L
Linus Torvalds 已提交
518 519 520 521 522

/*============================================================================
 * Special system call wrappers
 */
@ r0 = syscall number
523
@ r8 = syscall table
L
Linus Torvalds 已提交
524
sys_syscall:
525
		bic	scno, r0, #__NR_OABI_SYSCALL_BASE
L
Linus Torvalds 已提交
526 527 528 529 530 531 532 533 534
		cmp	scno, #__NR_syscall - __NR_SYSCALL_BASE
		cmpne	scno, #NR_syscalls	@ check range
		stmloia	sp, {r5, r6}		@ shuffle args
		movlo	r0, r1
		movlo	r1, r2
		movlo	r2, r3
		movlo	r3, r4
		ldrlo	pc, [tbl, scno, lsl #2]
		b	sys_ni_syscall
535
ENDPROC(sys_syscall)
L
Linus Torvalds 已提交
536 537 538

sys_sigreturn_wrapper:
		add	r0, sp, #S_OFF
A
Al Viro 已提交
539
		mov	why, #0		@ prevent syscall restart handling
L
Linus Torvalds 已提交
540
		b	sys_sigreturn
541
ENDPROC(sys_sigreturn_wrapper)
L
Linus Torvalds 已提交
542 543 544

sys_rt_sigreturn_wrapper:
		add	r0, sp, #S_OFF
A
Al Viro 已提交
545
		mov	why, #0		@ prevent syscall restart handling
L
Linus Torvalds 已提交
546
		b	sys_rt_sigreturn
547
ENDPROC(sys_rt_sigreturn_wrapper)
L
Linus Torvalds 已提交
548

549 550 551 552
sys_statfs64_wrapper:
		teq	r1, #88
		moveq	r1, #84
		b	sys_statfs64
553
ENDPROC(sys_statfs64_wrapper)
554 555 556 557 558

sys_fstatfs64_wrapper:
		teq	r1, #88
		moveq	r1, #84
		b	sys_fstatfs64
559
ENDPROC(sys_fstatfs64_wrapper)
560

L
Linus Torvalds 已提交
561 562 563 564 565 566 567 568 569
/*
 * Note: off_4k (r5) is always units of 4K.  If we can't do the requested
 * offset, we return EINVAL.
 */
sys_mmap2:
#if PAGE_SHIFT > 12
		tst	r5, #PGOFF_MASK
		moveq	r5, r5, lsr #PAGE_SHIFT - 12
		streq	r5, [sp, #4]
A
Al Viro 已提交
570
		beq	sys_mmap_pgoff
L
Linus Torvalds 已提交
571
		mov	r0, #-EINVAL
R
Russell King 已提交
572
		mov	pc, lr
L
Linus Torvalds 已提交
573 574
#else
		str	r5, [sp, #4]
A
Al Viro 已提交
575
		b	sys_mmap_pgoff
L
Linus Torvalds 已提交
576
#endif
577
ENDPROC(sys_mmap2)
578 579

#ifdef CONFIG_OABI_COMPAT
580

581 582 583 584 585 586 587
/*
 * These are syscalls with argument register differences
 */

sys_oabi_pread64:
		stmia	sp, {r3, r4}
		b	sys_pread64
588
ENDPROC(sys_oabi_pread64)
589 590 591 592

sys_oabi_pwrite64:
		stmia	sp, {r3, r4}
		b	sys_pwrite64
593
ENDPROC(sys_oabi_pwrite64)
594 595 596 597 598

sys_oabi_truncate64:
		mov	r3, r2
		mov	r2, r1
		b	sys_truncate64
599
ENDPROC(sys_oabi_truncate64)
600 601 602 603 604

sys_oabi_ftruncate64:
		mov	r3, r2
		mov	r2, r1
		b	sys_ftruncate64
605
ENDPROC(sys_oabi_ftruncate64)
606 607 608 609 610 611

sys_oabi_readahead:
		str	r3, [sp]
		mov	r3, r2
		mov	r2, r1
		b	sys_readahead
612
ENDPROC(sys_oabi_readahead)
613

614 615 616 617 618 619 620 621 622 623 624 625 626
/*
 * Let's declare a second syscall table for old ABI binaries
 * using the compatibility syscall entries.
 */
#define ABI(native, compat) compat
#define OBSOLETE(syscall) syscall

	.type	sys_oabi_call_table, #object
ENTRY(sys_oabi_call_table)
#include "calls.S"
#undef ABI
#undef OBSOLETE

627 628
#endif