entry_64_compat.S 13.5 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
L
Linus Torvalds 已提交
2
/*
3 4
 * Compatibility mode system call entry point for x86-64.
 *
L
Linus Torvalds 已提交
5
 * Copyright 2000-2002 Andi Kleen, SuSE Labs.
6
 */
7
#include "calling.h"
8
#include <asm/asm-offsets.h>
L
Linus Torvalds 已提交
9 10
#include <asm/current.h>
#include <asm/errno.h>
11 12
#include <asm/ia32_unistd.h>
#include <asm/thread_info.h>
L
Linus Torvalds 已提交
13
#include <asm/segment.h>
14
#include <asm/irqflags.h>
15
#include <asm/asm.h>
16
#include <asm/smap.h>
L
Linus Torvalds 已提交
17
#include <linux/linkage.h>
18
#include <linux/err.h>
L
Linus Torvalds 已提交
19

J
Jiri Olsa 已提交
20 21
	.section .entry.text, "ax"

L
Linus Torvalds 已提交
22
/*
23
 * 32-bit SYSENTER entry.
L
Linus Torvalds 已提交
24
 *
25 26 27 28 29 30 31 32 33 34 35
 * 32-bit system calls through the vDSO's __kernel_vsyscall enter here
 * on 64-bit kernels running on Intel CPUs.
 *
 * The SYSENTER instruction, in principle, should *only* occur in the
 * vDSO.  In practice, a small number of Android devices were shipped
 * with a copy of Bionic that inlined a SYSENTER instruction.  This
 * never happened in any of Google's Bionic versions -- it only happened
 * in a narrow range of Intel-provided versions.
 *
 * SYSENTER loads SS, RSP, CS, and RIP from previously programmed MSRs.
 * IF and VM in RFLAGS are cleared (IOW: interrupts are off).
36
 * SYSENTER does not save anything on the stack,
37
 * and does not save old RIP (!!!), RSP, or RFLAGS.
38
 *
L
Linus Torvalds 已提交
39
 * Arguments:
40 41 42 43 44 45 46 47 48
 * eax  system call number
 * ebx  arg1
 * ecx  arg2
 * edx  arg3
 * esi  arg4
 * edi  arg5
 * ebp  user stack
 * 0(%ebp) arg6
 */
49
ENTRY(entry_SYSENTER_compat)
50
	/* Interrupts are off on entry. */
51
	SWAPGS
52 53 54 55

	/* We are about to clobber %rsp anyway, clobbering here is OK */
	SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp

56
	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
57

58 59 60 61 62 63 64
	/*
	 * User tracing code (ptrace or signal handlers) might assume that
	 * the saved RAX contains a 32-bit number when we're invoking a 32-bit
	 * syscall.  Just in case the high bits are nonzero, zero-extend
	 * the syscall number.  (This could almost certainly be deleted
	 * with no ill effects.)
	 */
65 66
	movl	%eax, %eax

67
	/* Construct struct pt_regs on stack */
68
	pushq	$__USER32_DS		/* pt_regs->ss */
69
	pushq	%rbp			/* pt_regs->sp (stashed in bp) */
70 71 72 73 74 75 76 77 78

	/*
	 * Push flags.  This is nasty.  First, interrupts are currently
	 * off, but we need pt_regs->flags to have IF set.  Second, even
	 * if TF was set when SYSENTER started, it's clear by now.  We fix
	 * that later using TIF_SINGLESTEP.
	 */
	pushfq				/* pt_regs->flags (except IF = 0) */
	orl	$X86_EFLAGS_IF, (%rsp)	/* Fix saved flags */
79
	pushq	$__USER32_CS		/* pt_regs->cs */
80
	pushq	$0			/* pt_regs->ip = 0 (placeholder) */
81 82 83 84
	pushq	%rax			/* pt_regs->orig_ax */
	pushq	%rdi			/* pt_regs->di */
	pushq	%rsi			/* pt_regs->si */
	pushq	%rdx			/* pt_regs->dx */
85
	pushq	%rcx			/* pt_regs->cx */
86
	pushq	$-ENOSYS		/* pt_regs->ax */
87
	pushq   $0			/* pt_regs->r8  = 0 */
88
	xorl	%r8d, %r8d		/* nospec   r8 */
89
	pushq   $0			/* pt_regs->r9  = 0 */
90
	xorl	%r9d, %r9d		/* nospec   r9 */
91
	pushq   $0			/* pt_regs->r10 = 0 */
92
	xorl	%r10d, %r10d		/* nospec   r10 */
93
	pushq   $0			/* pt_regs->r11 = 0 */
94
	xorl	%r11d, %r11d		/* nospec   r11 */
95
	pushq   %rbx                    /* pt_regs->rbx */
96
	xorl	%ebx, %ebx		/* nospec   rbx */
97
	pushq   %rbp                    /* pt_regs->rbp (will be overwritten) */
98
	xorl	%ebp, %ebp		/* nospec   rbp */
99
	pushq   $0			/* pt_regs->r12 = 0 */
100
	xorl	%r12d, %r12d		/* nospec   r12 */
101
	pushq   $0			/* pt_regs->r13 = 0 */
102
	xorl	%r13d, %r13d		/* nospec   r13 */
103
	pushq   $0			/* pt_regs->r14 = 0 */
104
	xorl	%r14d, %r14d		/* nospec   r14 */
105
	pushq   $0			/* pt_regs->r15 = 0 */
106
	xorl	%r15d, %r15d		/* nospec   r15 */
L
Linus Torvalds 已提交
107
	cld
108

109
	/*
110
	 * SYSENTER doesn't filter flags, so we need to clear NT and AC
111
	 * ourselves.  To save a few cycles, we can check whether
112
	 * either was set instead of doing an unconditional popfq.
113 114
	 * This needs to happen before enabling interrupts so that
	 * we don't get preempted with NT set.
115
	 *
116 117 118 119 120 121
	 * If TF is set, we will single-step all the way to here -- do_debug
	 * will ignore all the traps.  (Yes, this is slow, but so is
	 * single-stepping in general.  This allows us to avoid having
	 * a more complicated code to handle the case where a user program
	 * forces us to single-step through the SYSENTER entry code.)
	 *
122
	 * NB.: .Lsysenter_fix_flags is a label with the code under it moved
123 124 125 126
	 * out-of-line as an optimization: NT is unlikely to be set in the
	 * majority of the cases and instead of polluting the I$ unnecessarily,
	 * we're keeping that code behind a branch which will predict as
	 * not-taken and therefore its instructions won't be fetched.
127
	 */
128
	testl	$X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, EFLAGS(%rsp)
129 130
	jnz	.Lsysenter_fix_flags
.Lsysenter_flags_fixed:
131

132 133 134 135 136
	/*
	 * User mode is traced as though IRQs are on, and SYSENTER
	 * turned them off.
	 */
	TRACE_IRQS_OFF
137

138 139
	movq	%rsp, %rdi
	call	do_fast_syscall_32
140 141 142
	/* XEN PV guests always use IRET path */
	ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
		    "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
143
	jmp	sysret32_from_system_call
L
Linus Torvalds 已提交
144

145
.Lsysenter_fix_flags:
146
	pushq	$X86_EFLAGS_FIXED
147
	popfq
148
	jmp	.Lsysenter_flags_fixed
149
GLOBAL(__end_entry_SYSENTER_compat)
150
ENDPROC(entry_SYSENTER_compat)
L
Linus Torvalds 已提交
151 152

/*
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
 * 32-bit SYSCALL entry.
 *
 * 32-bit system calls through the vDSO's __kernel_vsyscall enter here
 * on 64-bit kernels running on AMD CPUs.
 *
 * The SYSCALL instruction, in principle, should *only* occur in the
 * vDSO.  In practice, it appears that this really is the case.
 * As evidence:
 *
 *  - The calling convention for SYSCALL has changed several times without
 *    anyone noticing.
 *
 *  - Prior to the in-kernel X86_BUG_SYSRET_SS_ATTRS fixup, anything
 *    user task that did SYSCALL without immediately reloading SS
 *    would randomly crash.
L
Linus Torvalds 已提交
168
 *
169 170 171 172 173 174
 *  - Most programmers do not directly target AMD CPUs, and the 32-bit
 *    SYSCALL instruction does not exist on Intel CPUs.  Even on AMD
 *    CPUs, Linux disables the SYSCALL instruction on 32-bit kernels
 *    because the SYSCALL instruction in legacy/native 32-bit mode (as
 *    opposed to compat mode) is sufficiently poorly designed as to be
 *    essentially unusable.
175
 *
176 177 178 179 180 181 182
 * 32-bit SYSCALL saves RIP to RCX, clears RFLAGS.RF, then saves
 * RFLAGS to R11, then loads new SS, CS, and RIP from previously
 * programmed MSRs.  RFLAGS gets masked by a value from another MSR
 * (so CLD and CLAC are not needed).  SYSCALL does not save anything on
 * the stack and does not change RSP.
 *
 * Note: RFLAGS saving+masking-with-MSR happens only in Long mode
183
 * (in legacy 32-bit mode, IF, RF and VM bits are cleared and that's it).
184
 * Don't get confused: RFLAGS saving+masking depends on Long Mode Active bit
185 186 187
 * (EFER.LMA=1), NOT on bitness of userspace where SYSCALL executes
 * or target CS descriptor's L bit (SYSCALL does not read segment descriptors).
 *
L
Linus Torvalds 已提交
188
 * Arguments:
189 190 191 192 193 194 195 196 197 198
 * eax  system call number
 * ecx  return address
 * ebx  arg1
 * ebp  arg2	(note: not saved in the stack frame, should not be touched)
 * edx  arg3
 * esi  arg4
 * edi  arg5
 * esp  user stack
 * 0(%esp) arg6
 */
199
ENTRY(entry_SYSCALL_compat)
200
	/* Interrupts are off on entry. */
201
	swapgs
202

203
	/* Stash user ESP */
204
	movl	%esp, %r8d
205 206 207 208 209

	/* Use %rsp as scratch reg. User ESP is stashed in r8 */
	SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp

	/* Switch to the kernel stack */
210
	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
211

212
	/* Construct struct pt_regs on stack */
213 214 215 216 217
	pushq	$__USER32_DS		/* pt_regs->ss */
	pushq	%r8			/* pt_regs->sp */
	pushq	%r11			/* pt_regs->flags */
	pushq	$__USER32_CS		/* pt_regs->cs */
	pushq	%rcx			/* pt_regs->ip */
218 219
GLOBAL(entry_SYSCALL_compat_after_hwframe)
	movl	%eax, %eax		/* discard orig_ax high bits */
220 221 222
	pushq	%rax			/* pt_regs->orig_ax */
	pushq	%rdi			/* pt_regs->di */
	pushq	%rsi			/* pt_regs->si */
223
	xorl	%esi, %esi		/* nospec   si */
224
	pushq	%rdx			/* pt_regs->dx */
225
	xorl	%edx, %edx		/* nospec   dx */
226
	pushq	%rbp			/* pt_regs->cx (stashed in bp) */
227
	xorl	%ecx, %ecx		/* nospec   cx */
228
	pushq	$-ENOSYS		/* pt_regs->ax */
229
	pushq   $0			/* pt_regs->r8  = 0 */
230
	xorl	%r8d, %r8d		/* nospec   r8 */
231
	pushq   $0			/* pt_regs->r9  = 0 */
232
	xorl	%r9d, %r9d		/* nospec   r9 */
233
	pushq   $0			/* pt_regs->r10 = 0 */
234
	xorl	%r10d, %r10d		/* nospec   r10 */
235
	pushq   $0			/* pt_regs->r11 = 0 */
236
	xorl	%r11d, %r11d		/* nospec   r11 */
237
	pushq   %rbx                    /* pt_regs->rbx */
238
	xorl	%ebx, %ebx		/* nospec   rbx */
239
	pushq   %rbp                    /* pt_regs->rbp (will be overwritten) */
240
	xorl	%ebp, %ebp		/* nospec   rbp */
241
	pushq   $0			/* pt_regs->r12 = 0 */
242
	xorl	%r12d, %r12d		/* nospec   r12 */
243
	pushq   $0			/* pt_regs->r13 = 0 */
244
	xorl	%r13d, %r13d		/* nospec   r13 */
245
	pushq   $0			/* pt_regs->r14 = 0 */
246
	xorl	%r14d, %r14d		/* nospec   r14 */
247
	pushq   $0			/* pt_regs->r15 = 0 */
248
	xorl	%r15d, %r15d		/* nospec   r15 */
249

250 251 252 253 254 255 256 257
	/*
	 * User mode is traced as though IRQs are on, and SYSENTER
	 * turned them off.
	 */
	TRACE_IRQS_OFF

	movq	%rsp, %rdi
	call	do_fast_syscall_32
258 259 260
	/* XEN PV guests always use IRET path */
	ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
		    "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290

	/* Opportunistic SYSRET */
sysret32_from_system_call:
	TRACE_IRQS_ON			/* User mode traces as IRQs on. */
	movq	RBX(%rsp), %rbx		/* pt_regs->rbx */
	movq	RBP(%rsp), %rbp		/* pt_regs->rbp */
	movq	EFLAGS(%rsp), %r11	/* pt_regs->flags (in r11) */
	movq	RIP(%rsp), %rcx		/* pt_regs->ip (in rcx) */
	addq	$RAX, %rsp		/* Skip r8-r15 */
	popq	%rax			/* pt_regs->rax */
	popq	%rdx			/* Skip pt_regs->cx */
	popq	%rdx			/* pt_regs->dx */
	popq	%rsi			/* pt_regs->si */
	popq	%rdi			/* pt_regs->di */

        /*
         * USERGS_SYSRET32 does:
         *  GSBASE = user's GS base
         *  EIP = ECX
         *  RFLAGS = R11
         *  CS = __USER32_CS
         *  SS = __USER_DS
         *
	 * ECX will not match pt_regs->cx, but we're returning to a vDSO
	 * trampoline that will fix up RCX, so this is okay.
	 *
	 * R12-R15 are callee-saved, so they contain whatever was in them
	 * when the system call started, which is already known to user
	 * code.  We zero R8-R10 to avoid info leaks.
         */
291 292 293 294 295 296 297 298 299
	movq	RSP-ORIG_RAX(%rsp), %rsp

	/*
	 * The original userspace %rsp (RSP-ORIG_RAX(%rsp)) is stored
	 * on the process stack which is not mapped to userspace and
	 * not readable after we SWITCH_TO_USER_CR3.  Delay the CR3
	 * switch until after after the last reference to the process
	 * stack.
	 *
300
	 * %r8/%r9 are zeroed before the sysret, thus safe to clobber.
301
	 */
302
	SWITCH_TO_USER_CR3_NOSTACK scratch_reg=%r8 scratch_reg2=%r9
303

304 305 306
	xorl	%r8d, %r8d
	xorl	%r9d, %r9d
	xorl	%r10d, %r10d
307 308
	swapgs
	sysretl
309
END(entry_SYSCALL_compat)
310

311
/*
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
 * 32-bit legacy system call entry.
 *
 * 32-bit x86 Linux system calls traditionally used the INT $0x80
 * instruction.  INT $0x80 lands here.
 *
 * This entry point can be used by 32-bit and 64-bit programs to perform
 * 32-bit system calls.  Instances of INT $0x80 can be found inline in
 * various programs and libraries.  It is also used by the vDSO's
 * __kernel_vsyscall fallback for hardware that doesn't support a faster
 * entry method.  Restarted 32-bit system calls also fall back to INT
 * $0x80 regardless of what instruction was originally used to do the
 * system call.
 *
 * This is considered a slow path.  It is not used by most libc
 * implementations on modern hardware except during process startup.
L
Linus Torvalds 已提交
327
 *
328 329 330 331 332 333 334
 * Arguments:
 * eax  system call number
 * ebx  arg1
 * ecx  arg2
 * edx  arg3
 * esi  arg4
 * edi  arg5
335
 * ebp  arg6
336
 */
337
ENTRY(entry_INT80_compat)
338
	/*
339
	 * Interrupts are off on entry.
340
	 */
341
	ASM_CLAC			/* Do this early to minimize exposure */
342 343
	SWAPGS

344 345 346 347 348 349 350
	/*
	 * User tracing code (ptrace or signal handlers) might assume that
	 * the saved RAX contains a 32-bit number when we're invoking a 32-bit
	 * syscall.  Just in case the high bits are nonzero, zero-extend
	 * the syscall number.  (This could almost certainly be deleted
	 * with no ill effects.)
	 */
351
	movl	%eax, %eax
352

353
	/* switch to thread stack expects orig_ax and rdi to be pushed */
354
	pushq	%rax			/* pt_regs->orig_ax */
355 356 357 358
	pushq	%rdi			/* pt_regs->di */

	/* Need to switch before accessing the thread stack. */
	SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
359 360
	/* In the Xen PV case we already run on the thread stack. */
	ALTERNATIVE "movq %rsp, %rdi", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
361 362 363 364 365 366 367 368
	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp

	pushq	6*8(%rdi)		/* regs->ss */
	pushq	5*8(%rdi)		/* regs->rsp */
	pushq	4*8(%rdi)		/* regs->eflags */
	pushq	3*8(%rdi)		/* regs->cs */
	pushq	2*8(%rdi)		/* regs->ip */
	pushq	1*8(%rdi)		/* regs->orig_ax */
369
	pushq	(%rdi)			/* pt_regs->di */
370 371
.Lint80_keep_stack:

372
	pushq	%rsi			/* pt_regs->si */
373
	xorl	%esi, %esi		/* nospec   si */
374
	pushq	%rdx			/* pt_regs->dx */
375
	xorl	%edx, %edx		/* nospec   dx */
376
	pushq	%rcx			/* pt_regs->cx */
377
	xorl	%ecx, %ecx		/* nospec   cx */
378
	pushq	$-ENOSYS		/* pt_regs->ax */
379
	pushq   %r8			/* pt_regs->r8 */
380
	xorl	%r8d, %r8d		/* nospec   r8 */
381
	pushq   %r9			/* pt_regs->r9 */
382
	xorl	%r9d, %r9d		/* nospec   r9 */
383
	pushq   %r10			/* pt_regs->r10*/
384
	xorl	%r10d, %r10d		/* nospec   r10 */
385
	pushq   %r11			/* pt_regs->r11 */
386
	xorl	%r11d, %r11d		/* nospec   r11 */
387
	pushq   %rbx                    /* pt_regs->rbx */
388
	xorl	%ebx, %ebx		/* nospec   rbx */
389
	pushq   %rbp                    /* pt_regs->rbp */
390
	xorl	%ebp, %ebp		/* nospec   rbp */
391
	pushq   %r12                    /* pt_regs->r12 */
392
	xorl	%r12d, %r12d		/* nospec   r12 */
393
	pushq   %r13                    /* pt_regs->r13 */
394
	xorl	%r13d, %r13d		/* nospec   r13 */
395
	pushq   %r14                    /* pt_regs->r14 */
396
	xorl	%r14d, %r14d		/* nospec   r14 */
397
	pushq   %r15                    /* pt_regs->r15 */
398
	xorl	%r15d, %r15d		/* nospec   r15 */
L
Linus Torvalds 已提交
399
	cld
400

401
	/*
402 403
	 * User mode is traced as though IRQs are on, and the interrupt
	 * gate turned them off.
404
	 */
405 406 407
	TRACE_IRQS_OFF

	movq	%rsp, %rdi
408
	call	do_int80_syscall_32
409
.Lsyscall_32_done:
410 411 412

	/* Go back to user mode. */
	TRACE_IRQS_ON
413
	jmp	swapgs_restore_regs_and_return_to_usermode
414
END(entry_INT80_compat)