calling.h 9.8 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2
#include <linux/jump_label.h>
3
#include <asm/unwind_hints.h>
4 5
#include <asm/cpufeatures.h>
#include <asm/page_types.h>
6 7 8
#include <asm/percpu.h>
#include <asm/asm-offsets.h>
#include <asm/processor-flags.h>
9

10
/*
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57

 x86 function call convention, 64-bit:
 -------------------------------------
  arguments           |  callee-saved      | extra caller-saved | return
 [callee-clobbered]   |                    | [callee-clobbered] |
 ---------------------------------------------------------------------------
 rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11             | rax, rdx [**]

 ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
   functions when it sees tail-call optimization possibilities) rflags is
   clobbered. Leftover arguments are passed over the stack frame.)

 [*]  In the frame-pointers case rbp is fixed to the stack frame.

 [**] for struct return values wider than 64 bits the return convention is a
      bit more complex: up to 128 bits width we return small structures
      straight in rax, rdx. For structures larger than that (3 words or
      larger) the caller puts a pointer to an on-stack return struct
      [allocated in the caller's stack frame] into the first argument - i.e.
      into rdi. All other arguments shift up by one in this case.
      Fortunately this case is rare in the kernel.

For 32-bit we have the following conventions - kernel is built with
-mregparm=3 and -freg-struct-return:

 x86 function calling convention, 32-bit:
 ----------------------------------------
  arguments         | callee-saved        | extra caller-saved | return
 [callee-clobbered] |                     | [callee-clobbered] |
 -------------------------------------------------------------------------
 eax edx ecx        | ebx edi esi ebp [*] | <none>             | eax, edx [**]

 ( here too esp is obviously invariant across normal function calls. eflags
   is clobbered. Leftover arguments are passed over the stack frame. )

 [*]  In the frame-pointers case ebp is fixed to the stack frame.

 [**] We build with -freg-struct-return, which on 32-bit means similar
      semantics as on 64-bit: edx can be used for a second return value
      (i.e. covering integer and structure sizes up to 64 bits) - after that
      it gets more complex and more expensive: 3-word or larger struct returns
      get done in the caller's frame and the pointer to the return struct goes
      into regparm0, i.e. eax - the other arguments shift up and the
      function's register parameters degenerate to regparm=2 in essence.

*/

58 59
#ifdef CONFIG_X86_64

60
/*
T
Tao Guo 已提交
61 62
 * 64-bit system call stack frame layout defines and helpers,
 * for assembly code:
63
 */
L
Linus Torvalds 已提交
64

65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
/* The layout forms the "struct pt_regs" on the stack: */
/*
 * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
 * unless syscall needs a complete, fully filled "struct pt_regs".
 */
#define R15		0*8
#define R14		1*8
#define R13		2*8
#define R12		3*8
#define RBP		4*8
#define RBX		5*8
/* These regs are callee-clobbered. Always saved on kernel entry. */
#define R11		6*8
#define R10		7*8
#define R9		8*8
#define R8		9*8
#define RAX		10*8
#define RCX		11*8
#define RDX		12*8
#define RSI		13*8
#define RDI		14*8
/*
 * On syscall entry, this is syscall#. On CPU exception, this is error code.
 * On hw interrupt, it's IRQ number:
 */
#define ORIG_RAX	15*8
/* Return frame for iretq */
#define RIP		16*8
#define CS		17*8
#define EFLAGS		18*8
#define RSP		19*8
#define SS		20*8

98 99
#define SIZEOF_PTREGS	21*8

100
.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0
101 102 103 104 105 106 107
	/*
	 * Push registers and sanitize registers of values that a
	 * speculation attack might otherwise want to exploit. The
	 * lower registers are likely clobbered well before they
	 * could be put to use in a speculative execution gadget.
	 * Interleave XOR with PUSH for better uop scheduling:
	 */
108 109 110 111 112
	.if \save_ret
	pushq	%rsi		/* pt_regs->si */
	movq	8(%rsp), %rsi	/* temporarily store the return address in %rsi */
	movq	%rdi, 8(%rsp)	/* pt_regs->di (overwriting original return address) */
	.else
113 114
	pushq   %rdi		/* pt_regs->di */
	pushq   %rsi		/* pt_regs->si */
115
	.endif
116
	pushq	\rdx		/* pt_regs->dx */
117
	xorl	%edx, %edx	/* nospec   dx */
118
	pushq   %rcx		/* pt_regs->cx */
119
	xorl	%ecx, %ecx	/* nospec   cx */
120
	pushq   \rax		/* pt_regs->ax */
121
	pushq   %r8		/* pt_regs->r8 */
122
	xorl	%r8d, %r8d	/* nospec   r8 */
123
	pushq   %r9		/* pt_regs->r9 */
124
	xorl	%r9d, %r9d	/* nospec   r9 */
125
	pushq   %r10		/* pt_regs->r10 */
126
	xorl	%r10d, %r10d	/* nospec   r10 */
127
	pushq   %r11		/* pt_regs->r11 */
128
	xorl	%r11d, %r11d	/* nospec   r11*/
129 130 131 132 133
	pushq	%rbx		/* pt_regs->rbx */
	xorl    %ebx, %ebx	/* nospec   rbx*/
	pushq	%rbp		/* pt_regs->rbp */
	xorl    %ebp, %ebp	/* nospec   rbp*/
	pushq	%r12		/* pt_regs->r12 */
134
	xorl	%r12d, %r12d	/* nospec   r12*/
135
	pushq	%r13		/* pt_regs->r13 */
136
	xorl	%r13d, %r13d	/* nospec   r13*/
137
	pushq	%r14		/* pt_regs->r14 */
138
	xorl	%r14d, %r14d	/* nospec   r14*/
139
	pushq	%r15		/* pt_regs->r15 */
140
	xorl	%r15d, %r15d	/* nospec   r15*/
141
	UNWIND_HINT_REGS
142 143 144
	.if \save_ret
	pushq	%rsi		/* return address on top of stack */
	.endif
145
.endm
146

147
.macro POP_REGS pop_rdi=1 skip_r11rcx=0
148 149 150 151 152 153
	popq %r15
	popq %r14
	popq %r13
	popq %r12
	popq %rbp
	popq %rbx
154 155 156
	.if \skip_r11rcx
	popq %rsi
	.else
157
	popq %r11
158
	.endif
159 160 161 162
	popq %r10
	popq %r9
	popq %r8
	popq %rax
163 164 165
	.if \skip_r11rcx
	popq %rsi
	.else
166
	popq %rcx
167
	.endif
168 169
	popq %rdx
	popq %rsi
170
	.if \pop_rdi
171
	popq %rdi
172
	.endif
173
.endm
174

175 176 177 178 179 180
/*
 * This is a sneaky trick to help the unwinder find pt_regs on the stack.  The
 * frame pointer is replaced with an encoded pointer to pt_regs.  The encoding
 * is just setting the LSB, which makes it an invalid stack address and is also
 * a signal to the unwinder that it's a pt_regs pointer in disguise.
 *
181
 * NOTE: This macro must be used *after* PUSH_AND_CLEAR_REGS because it corrupts
182 183 184 185
 * the original rbp.
 */
.macro ENCODE_FRAME_POINTER ptregs_offset=0
#ifdef CONFIG_FRAME_POINTER
186
	leaq 1+\ptregs_offset(%rsp), %rbp
187 188 189
#endif
.endm

190 191
#ifdef CONFIG_PAGE_TABLE_ISOLATION

192 193 194 195
/*
 * PAGE_TABLE_ISOLATION PGDs are 8k.  Flip bit 12 to switch between the two
 * halves:
 */
196 197 198 199 200
#define PTI_USER_PGTABLE_BIT		PAGE_SHIFT
#define PTI_USER_PGTABLE_MASK		(1 << PTI_USER_PGTABLE_BIT)
#define PTI_USER_PCID_BIT		X86_CR3_PTI_PCID_USER_BIT
#define PTI_USER_PCID_MASK		(1 << PTI_USER_PCID_BIT)
#define PTI_USER_PGTABLE_AND_PCID_MASK  (PTI_USER_PCID_MASK | PTI_USER_PGTABLE_MASK)
201

202 203
.macro SET_NOFLUSH_BIT	reg:req
	bts	$X86_CR3_PCID_NOFLUSH_BIT, \reg
204 205
.endm

206 207 208
.macro ADJUST_KERNEL_CR3 reg:req
	ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID
	/* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */
209
	andq    $(~PTI_USER_PGTABLE_AND_PCID_MASK), \reg
210 211 212
.endm

.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
213
	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
214 215 216
	mov	%cr3, \scratch_reg
	ADJUST_KERNEL_CR3 \scratch_reg
	mov	\scratch_reg, %cr3
217
.Lend_\@:
218 219
.endm

220 221 222 223
#define THIS_CPU_user_pcid_flush_mask   \
	PER_CPU_VAR(cpu_tlbstate) + TLB_STATE_user_pcid_flush_mask

.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
224
	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
225
	mov	%cr3, \scratch_reg
226 227 228 229 230 231 232 233 234 235 236 237 238 239

	ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID

	/*
	 * Test if the ASID needs a flush.
	 */
	movq	\scratch_reg, \scratch_reg2
	andq	$(0x7FF), \scratch_reg		/* mask ASID */
	bt	\scratch_reg, THIS_CPU_user_pcid_flush_mask
	jnc	.Lnoflush_\@

	/* Flush needed, clear the bit */
	btr	\scratch_reg, THIS_CPU_user_pcid_flush_mask
	movq	\scratch_reg2, \scratch_reg
240
	jmp	.Lwrcr3_pcid_\@
241 242 243 244 245

.Lnoflush_\@:
	movq	\scratch_reg2, \scratch_reg
	SET_NOFLUSH_BIT \scratch_reg

246 247 248 249
.Lwrcr3_pcid_\@:
	/* Flip the ASID to the user version */
	orq	$(PTI_USER_PCID_MASK), \scratch_reg

250
.Lwrcr3_\@:
251 252
	/* Flip the PGD to the user version */
	orq     $(PTI_USER_PGTABLE_MASK), \scratch_reg
253
	mov	\scratch_reg, %cr3
254
.Lend_\@:
255 256
.endm

257 258 259 260 261 262
.macro SWITCH_TO_USER_CR3_STACK	scratch_reg:req
	pushq	%rax
	SWITCH_TO_USER_CR3_NOSTACK scratch_reg=\scratch_reg scratch_reg2=%rax
	popq	%rax
.endm

263
.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
264
	ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI
265 266 267
	movq	%cr3, \scratch_reg
	movq	\scratch_reg, \save_reg
	/*
268 269 270
	 * Test the user pagetable bit. If set, then the user page tables
	 * are active. If clear CR3 already has the kernel page table
	 * active.
271
	 */
272 273
	bt	$PTI_USER_PGTABLE_BIT, \scratch_reg
	jnc	.Ldone_\@
274 275 276 277 278 279 280

	ADJUST_KERNEL_CR3 \scratch_reg
	movq	\scratch_reg, %cr3

.Ldone_\@:
.endm

P
Peter Zijlstra 已提交
281
.macro RESTORE_CR3 scratch_reg:req save_reg:req
282
	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
P
Peter Zijlstra 已提交
283 284 285 286 287 288 289

	ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID

	/*
	 * KERNEL pages can always resume with NOFLUSH as we do
	 * explicit flushes.
	 */
290
	bt	$PTI_USER_PGTABLE_BIT, \save_reg
P
Peter Zijlstra 已提交
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
	jnc	.Lnoflush_\@

	/*
	 * Check if there's a pending flush for the user ASID we're
	 * about to set.
	 */
	movq	\save_reg, \scratch_reg
	andq	$(0x7FF), \scratch_reg
	bt	\scratch_reg, THIS_CPU_user_pcid_flush_mask
	jnc	.Lnoflush_\@

	btr	\scratch_reg, THIS_CPU_user_pcid_flush_mask
	jmp	.Lwrcr3_\@

.Lnoflush_\@:
	SET_NOFLUSH_BIT \save_reg

.Lwrcr3_\@:
309 310 311 312 313
	/*
	 * The CR3 write could be avoided when not changing its value,
	 * but would require a CR3 read *and* a scratch register.
	 */
	movq	\save_reg, %cr3
314
.Lend_\@:
315 316 317 318 319 320
.endm

#else /* CONFIG_PAGE_TABLE_ISOLATION=n: */

.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
.endm
321 322 323
.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
.endm
.macro SWITCH_TO_USER_CR3_STACK scratch_reg:req
324 325 326
.endm
.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
.endm
P
Peter Zijlstra 已提交
327
.macro RESTORE_CR3 scratch_reg:req save_reg:req
328 329 330 331
.endm

#endif

332 333
#endif /* CONFIG_X86_64 */

334 335 336 337 338 339
/*
 * This does 'call enter_from_user_mode' unless we can avoid it based on
 * kernel config or using the static jump infrastructure.
 */
.macro CALL_enter_from_user_mode
#ifdef CONFIG_CONTEXT_TRACKING
340
#ifdef CONFIG_JUMP_LABEL
341 342 343 344 345 346
	STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0
#endif
	call enter_from_user_mode
.Lafter_call_\@:
#endif
.endm