entry_64_compat.S 13.1 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
L
Linus Torvalds 已提交
2
/*
3 4
 * Compatibility mode system call entry point for x86-64.
 *
L
Linus Torvalds 已提交
5
 * Copyright 2000-2002 Andi Kleen, SuSE Labs.
6
 */
7
#include "calling.h"
8
#include <asm/asm-offsets.h>
L
Linus Torvalds 已提交
9 10
#include <asm/current.h>
#include <asm/errno.h>
11 12
#include <asm/ia32_unistd.h>
#include <asm/thread_info.h>
L
Linus Torvalds 已提交
13
#include <asm/segment.h>
14
#include <asm/irqflags.h>
15
#include <asm/asm.h>
16
#include <asm/smap.h>
L
Linus Torvalds 已提交
17
#include <linux/linkage.h>
18
#include <linux/err.h>
L
Linus Torvalds 已提交
19

J
Jiri Olsa 已提交
20 21
	.section .entry.text, "ax"

L
Linus Torvalds 已提交
22
/*
23
 * 32-bit SYSENTER entry.
L
Linus Torvalds 已提交
24
 *
25 26 27 28 29 30 31 32 33 34 35
 * 32-bit system calls through the vDSO's __kernel_vsyscall enter here
 * on 64-bit kernels running on Intel CPUs.
 *
 * The SYSENTER instruction, in principle, should *only* occur in the
 * vDSO.  In practice, a small number of Android devices were shipped
 * with a copy of Bionic that inlined a SYSENTER instruction.  This
 * never happened in any of Google's Bionic versions -- it only happened
 * in a narrow range of Intel-provided versions.
 *
 * SYSENTER loads SS, RSP, CS, and RIP from previously programmed MSRs.
 * IF and VM in RFLAGS are cleared (IOW: interrupts are off).
36
 * SYSENTER does not save anything on the stack,
37
 * and does not save old RIP (!!!), RSP, or RFLAGS.
38
 *
L
Linus Torvalds 已提交
39
 * Arguments:
40 41 42 43 44 45 46 47 48
 * eax  system call number
 * ebx  arg1
 * ecx  arg2
 * edx  arg3
 * esi  arg4
 * edi  arg5
 * ebp  user stack
 * 0(%ebp) arg6
 */
49
ENTRY(entry_SYSENTER_compat)
50
	/* Interrupts are off on entry. */
51
	SWAPGS
52 53 54 55

	/* We are about to clobber %rsp anyway, clobbering here is OK */
	SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp

56
	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
57

58 59 60 61 62 63 64
	/*
	 * User tracing code (ptrace or signal handlers) might assume that
	 * the saved RAX contains a 32-bit number when we're invoking a 32-bit
	 * syscall.  Just in case the high bits are nonzero, zero-extend
	 * the syscall number.  (This could almost certainly be deleted
	 * with no ill effects.)
	 */
65 66
	movl	%eax, %eax

67
	/* Construct struct pt_regs on stack */
68
	pushq	$__USER32_DS		/* pt_regs->ss */
69
	pushq	%rbp			/* pt_regs->sp (stashed in bp) */
70 71 72 73 74 75 76 77 78

	/*
	 * Push flags.  This is nasty.  First, interrupts are currently
	 * off, but we need pt_regs->flags to have IF set.  Second, even
	 * if TF was set when SYSENTER started, it's clear by now.  We fix
	 * that later using TIF_SINGLESTEP.
	 */
	pushfq				/* pt_regs->flags (except IF = 0) */
	orl	$X86_EFLAGS_IF, (%rsp)	/* Fix saved flags */
79
	pushq	$__USER32_CS		/* pt_regs->cs */
80
	pushq	$0			/* pt_regs->ip = 0 (placeholder) */
81 82 83 84
	pushq	%rax			/* pt_regs->orig_ax */
	pushq	%rdi			/* pt_regs->di */
	pushq	%rsi			/* pt_regs->si */
	pushq	%rdx			/* pt_regs->dx */
85
	pushq	%rcx			/* pt_regs->cx */
86
	pushq	$-ENOSYS		/* pt_regs->ax */
87
	pushq   $0			/* pt_regs->r8  = 0 */
88
	xorl	%r8d, %r8d		/* nospec   r8 */
89
	pushq   $0			/* pt_regs->r9  = 0 */
90
	xorl	%r9d, %r9d		/* nospec   r9 */
91
	pushq   $0			/* pt_regs->r10 = 0 */
92
	xorl	%r10d, %r10d		/* nospec   r10 */
93
	pushq   $0			/* pt_regs->r11 = 0 */
94
	xorl	%r11d, %r11d		/* nospec   r11 */
95
	pushq   %rbx                    /* pt_regs->rbx */
96
	xorl	%ebx, %ebx		/* nospec   rbx */
97
	pushq   %rbp                    /* pt_regs->rbp (will be overwritten) */
98
	xorl	%ebp, %ebp		/* nospec   rbp */
99
	pushq   $0			/* pt_regs->r12 = 0 */
100
	xorl	%r12d, %r12d		/* nospec   r12 */
101
	pushq   $0			/* pt_regs->r13 = 0 */
102
	xorl	%r13d, %r13d		/* nospec   r13 */
103
	pushq   $0			/* pt_regs->r14 = 0 */
104
	xorl	%r14d, %r14d		/* nospec   r14 */
105
	pushq   $0			/* pt_regs->r15 = 0 */
106
	xorl	%r15d, %r15d		/* nospec   r15 */
L
Linus Torvalds 已提交
107
	cld
108

109
	/*
110
	 * SYSENTER doesn't filter flags, so we need to clear NT and AC
111
	 * ourselves.  To save a few cycles, we can check whether
112
	 * either was set instead of doing an unconditional popfq.
113 114
	 * This needs to happen before enabling interrupts so that
	 * we don't get preempted with NT set.
115
	 *
116 117 118 119 120 121
	 * If TF is set, we will single-step all the way to here -- do_debug
	 * will ignore all the traps.  (Yes, this is slow, but so is
	 * single-stepping in general.  This allows us to avoid having
	 * a more complicated code to handle the case where a user program
	 * forces us to single-step through the SYSENTER entry code.)
	 *
122
	 * NB.: .Lsysenter_fix_flags is a label with the code under it moved
123 124 125 126
	 * out-of-line as an optimization: NT is unlikely to be set in the
	 * majority of the cases and instead of polluting the I$ unnecessarily,
	 * we're keeping that code behind a branch which will predict as
	 * not-taken and therefore its instructions won't be fetched.
127
	 */
128
	testl	$X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, EFLAGS(%rsp)
129 130
	jnz	.Lsysenter_fix_flags
.Lsysenter_flags_fixed:
131

132 133 134 135 136
	/*
	 * User mode is traced as though IRQs are on, and SYSENTER
	 * turned them off.
	 */
	TRACE_IRQS_OFF
137

138 139
	movq	%rsp, %rdi
	call	do_fast_syscall_32
140 141 142
	/* XEN PV guests always use IRET path */
	ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
		    "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
143
	jmp	sysret32_from_system_call
L
Linus Torvalds 已提交
144

145
.Lsysenter_fix_flags:
146
	pushq	$X86_EFLAGS_FIXED
147
	popfq
148
	jmp	.Lsysenter_flags_fixed
149
GLOBAL(__end_entry_SYSENTER_compat)
150
ENDPROC(entry_SYSENTER_compat)
L
Linus Torvalds 已提交
151 152

/*
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
 * 32-bit SYSCALL entry.
 *
 * 32-bit system calls through the vDSO's __kernel_vsyscall enter here
 * on 64-bit kernels running on AMD CPUs.
 *
 * The SYSCALL instruction, in principle, should *only* occur in the
 * vDSO.  In practice, it appears that this really is the case.
 * As evidence:
 *
 *  - The calling convention for SYSCALL has changed several times without
 *    anyone noticing.
 *
 *  - Prior to the in-kernel X86_BUG_SYSRET_SS_ATTRS fixup, anything
 *    user task that did SYSCALL without immediately reloading SS
 *    would randomly crash.
L
Linus Torvalds 已提交
168
 *
169 170 171 172 173 174
 *  - Most programmers do not directly target AMD CPUs, and the 32-bit
 *    SYSCALL instruction does not exist on Intel CPUs.  Even on AMD
 *    CPUs, Linux disables the SYSCALL instruction on 32-bit kernels
 *    because the SYSCALL instruction in legacy/native 32-bit mode (as
 *    opposed to compat mode) is sufficiently poorly designed as to be
 *    essentially unusable.
175
 *
176 177 178 179 180 181 182
 * 32-bit SYSCALL saves RIP to RCX, clears RFLAGS.RF, then saves
 * RFLAGS to R11, then loads new SS, CS, and RIP from previously
 * programmed MSRs.  RFLAGS gets masked by a value from another MSR
 * (so CLD and CLAC are not needed).  SYSCALL does not save anything on
 * the stack and does not change RSP.
 *
 * Note: RFLAGS saving+masking-with-MSR happens only in Long mode
183
 * (in legacy 32-bit mode, IF, RF and VM bits are cleared and that's it).
184
 * Don't get confused: RFLAGS saving+masking depends on Long Mode Active bit
185 186 187
 * (EFER.LMA=1), NOT on bitness of userspace where SYSCALL executes
 * or target CS descriptor's L bit (SYSCALL does not read segment descriptors).
 *
L
Linus Torvalds 已提交
188
 * Arguments:
189 190 191 192 193 194 195 196 197 198
 * eax  system call number
 * ecx  return address
 * ebx  arg1
 * ebp  arg2	(note: not saved in the stack frame, should not be touched)
 * edx  arg3
 * esi  arg4
 * edi  arg5
 * esp  user stack
 * 0(%esp) arg6
 */
199
ENTRY(entry_SYSCALL_compat)
200
	/* Interrupts are off on entry. */
201
	swapgs
202

203
	/* Stash user ESP */
204
	movl	%esp, %r8d
205 206 207 208 209

	/* Use %rsp as scratch reg. User ESP is stashed in r8 */
	SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp

	/* Switch to the kernel stack */
210
	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
211

212
	/* Construct struct pt_regs on stack */
213 214 215 216 217
	pushq	$__USER32_DS		/* pt_regs->ss */
	pushq	%r8			/* pt_regs->sp */
	pushq	%r11			/* pt_regs->flags */
	pushq	$__USER32_CS		/* pt_regs->cs */
	pushq	%rcx			/* pt_regs->ip */
218 219
GLOBAL(entry_SYSCALL_compat_after_hwframe)
	movl	%eax, %eax		/* discard orig_ax high bits */
220 221 222 223
	pushq	%rax			/* pt_regs->orig_ax */
	pushq	%rdi			/* pt_regs->di */
	pushq	%rsi			/* pt_regs->si */
	pushq	%rdx			/* pt_regs->dx */
224
	pushq	%rbp			/* pt_regs->cx (stashed in bp) */
225
	pushq	$-ENOSYS		/* pt_regs->ax */
226
	pushq   $0			/* pt_regs->r8  = 0 */
227
	xorl	%r8d, %r8d		/* nospec   r8 */
228
	pushq   $0			/* pt_regs->r9  = 0 */
229
	xorl	%r9d, %r9d		/* nospec   r9 */
230
	pushq   $0			/* pt_regs->r10 = 0 */
231
	xorl	%r10d, %r10d		/* nospec   r10 */
232
	pushq   $0			/* pt_regs->r11 = 0 */
233
	xorl	%r11d, %r11d		/* nospec   r11 */
234
	pushq   %rbx                    /* pt_regs->rbx */
235
	xorl	%ebx, %ebx		/* nospec   rbx */
236
	pushq   %rbp                    /* pt_regs->rbp (will be overwritten) */
237
	xorl	%ebp, %ebp		/* nospec   rbp */
238
	pushq   $0			/* pt_regs->r12 = 0 */
239
	xorl	%r12d, %r12d		/* nospec   r12 */
240
	pushq   $0			/* pt_regs->r13 = 0 */
241
	xorl	%r13d, %r13d		/* nospec   r13 */
242
	pushq   $0			/* pt_regs->r14 = 0 */
243
	xorl	%r14d, %r14d		/* nospec   r14 */
244
	pushq   $0			/* pt_regs->r15 = 0 */
245
	xorl	%r15d, %r15d		/* nospec   r15 */
246

247 248 249 250 251 252 253 254
	/*
	 * User mode is traced as though IRQs are on, and SYSENTER
	 * turned them off.
	 */
	TRACE_IRQS_OFF

	movq	%rsp, %rdi
	call	do_fast_syscall_32
255 256 257
	/* XEN PV guests always use IRET path */
	ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
		    "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287

	/* Opportunistic SYSRET */
sysret32_from_system_call:
	TRACE_IRQS_ON			/* User mode traces as IRQs on. */
	movq	RBX(%rsp), %rbx		/* pt_regs->rbx */
	movq	RBP(%rsp), %rbp		/* pt_regs->rbp */
	movq	EFLAGS(%rsp), %r11	/* pt_regs->flags (in r11) */
	movq	RIP(%rsp), %rcx		/* pt_regs->ip (in rcx) */
	addq	$RAX, %rsp		/* Skip r8-r15 */
	popq	%rax			/* pt_regs->rax */
	popq	%rdx			/* Skip pt_regs->cx */
	popq	%rdx			/* pt_regs->dx */
	popq	%rsi			/* pt_regs->si */
	popq	%rdi			/* pt_regs->di */

        /*
         * USERGS_SYSRET32 does:
         *  GSBASE = user's GS base
         *  EIP = ECX
         *  RFLAGS = R11
         *  CS = __USER32_CS
         *  SS = __USER_DS
         *
	 * ECX will not match pt_regs->cx, but we're returning to a vDSO
	 * trampoline that will fix up RCX, so this is okay.
	 *
	 * R12-R15 are callee-saved, so they contain whatever was in them
	 * when the system call started, which is already known to user
	 * code.  We zero R8-R10 to avoid info leaks.
         */
288 289 290 291 292 293 294 295 296
	movq	RSP-ORIG_RAX(%rsp), %rsp

	/*
	 * The original userspace %rsp (RSP-ORIG_RAX(%rsp)) is stored
	 * on the process stack which is not mapped to userspace and
	 * not readable after we SWITCH_TO_USER_CR3.  Delay the CR3
	 * switch until after after the last reference to the process
	 * stack.
	 *
297
	 * %r8/%r9 are zeroed before the sysret, thus safe to clobber.
298
	 */
299
	SWITCH_TO_USER_CR3_NOSTACK scratch_reg=%r8 scratch_reg2=%r9
300

301 302 303
	xorl	%r8d, %r8d
	xorl	%r9d, %r9d
	xorl	%r10d, %r10d
304 305
	swapgs
	sysretl
306
END(entry_SYSCALL_compat)
307

308
/*
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
 * 32-bit legacy system call entry.
 *
 * 32-bit x86 Linux system calls traditionally used the INT $0x80
 * instruction.  INT $0x80 lands here.
 *
 * This entry point can be used by 32-bit and 64-bit programs to perform
 * 32-bit system calls.  Instances of INT $0x80 can be found inline in
 * various programs and libraries.  It is also used by the vDSO's
 * __kernel_vsyscall fallback for hardware that doesn't support a faster
 * entry method.  Restarted 32-bit system calls also fall back to INT
 * $0x80 regardless of what instruction was originally used to do the
 * system call.
 *
 * This is considered a slow path.  It is not used by most libc
 * implementations on modern hardware except during process startup.
L
Linus Torvalds 已提交
324
 *
325 326 327 328 329 330 331
 * Arguments:
 * eax  system call number
 * ebx  arg1
 * ecx  arg2
 * edx  arg3
 * esi  arg4
 * edi  arg5
332
 * ebp  arg6
333
 */
334
ENTRY(entry_INT80_compat)
335
	/*
336
	 * Interrupts are off on entry.
337
	 */
338
	ASM_CLAC			/* Do this early to minimize exposure */
339 340
	SWAPGS

341 342 343 344 345 346 347
	/*
	 * User tracing code (ptrace or signal handlers) might assume that
	 * the saved RAX contains a 32-bit number when we're invoking a 32-bit
	 * syscall.  Just in case the high bits are nonzero, zero-extend
	 * the syscall number.  (This could almost certainly be deleted
	 * with no ill effects.)
	 */
348
	movl	%eax, %eax
349

350
	/* switch to thread stack expects orig_ax and rdi to be pushed */
351
	pushq	%rax			/* pt_regs->orig_ax */
352 353 354 355 356 357 358 359 360 361 362 363 364
	pushq	%rdi			/* pt_regs->di */

	/* Need to switch before accessing the thread stack. */
	SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
	movq	%rsp, %rdi
	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp

	pushq	6*8(%rdi)		/* regs->ss */
	pushq	5*8(%rdi)		/* regs->rsp */
	pushq	4*8(%rdi)		/* regs->eflags */
	pushq	3*8(%rdi)		/* regs->cs */
	pushq	2*8(%rdi)		/* regs->ip */
	pushq	1*8(%rdi)		/* regs->orig_ax */
365

366
	pushq	(%rdi)			/* pt_regs->di */
367 368 369 370
	pushq	%rsi			/* pt_regs->si */
	pushq	%rdx			/* pt_regs->dx */
	pushq	%rcx			/* pt_regs->cx */
	pushq	$-ENOSYS		/* pt_regs->ax */
371
	pushq   $0			/* pt_regs->r8  = 0 */
372
	xorl	%r8d, %r8d		/* nospec   r8 */
373
	pushq   $0			/* pt_regs->r9  = 0 */
374
	xorl	%r9d, %r9d		/* nospec   r9 */
375
	pushq   $0			/* pt_regs->r10 = 0 */
376
	xorl	%r10d, %r10d		/* nospec   r10 */
377
	pushq   $0			/* pt_regs->r11 = 0 */
378
	xorl	%r11d, %r11d		/* nospec   r11 */
379
	pushq   %rbx                    /* pt_regs->rbx */
380
	xorl	%ebx, %ebx		/* nospec   rbx */
381
	pushq   %rbp                    /* pt_regs->rbp */
382
	xorl	%ebp, %ebp		/* nospec   rbp */
383
	pushq   %r12                    /* pt_regs->r12 */
384
	xorl	%r12d, %r12d		/* nospec   r12 */
385
	pushq   %r13                    /* pt_regs->r13 */
386
	xorl	%r13d, %r13d		/* nospec   r13 */
387
	pushq   %r14                    /* pt_regs->r14 */
388
	xorl	%r14d, %r14d		/* nospec   r14 */
389
	pushq   %r15                    /* pt_regs->r15 */
390
	xorl	%r15d, %r15d		/* nospec   r15 */
L
Linus Torvalds 已提交
391
	cld
392

393
	/*
394 395
	 * User mode is traced as though IRQs are on, and the interrupt
	 * gate turned them off.
396
	 */
397 398 399
	TRACE_IRQS_OFF

	movq	%rsp, %rdi
400
	call	do_int80_syscall_32
401
.Lsyscall_32_done:
402 403 404

	/* Go back to user mode. */
	TRACE_IRQS_ON
405
	jmp	swapgs_restore_regs_and_return_to_usermode
406
END(entry_INT80_compat)