internal.h 15.4 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4 5 6 7 8 9 10
/*
 * Copyright (C) 1994 Linus Torvalds
 *
 * Pentium III FXSR, SSE support
 * General FPU state handling cleanups
 *	Gareth Hughes <gareth@valinux.com>, May 2000
 * x86-64 work by Andi Kleen 2002
 */

11 12
#ifndef _ASM_X86_FPU_INTERNAL_H
#define _ASM_X86_FPU_INTERNAL_H
13

14
#include <linux/compat.h>
15
#include <linux/sched.h>
16
#include <linux/slab.h>
R
Rik van Riel 已提交
17
#include <linux/mm.h>
18

19
#include <asm/user.h>
20
#include <asm/fpu/api.h>
21
#include <asm/fpu/xstate.h>
22
#include <asm/fpu/xcr.h>
23
#include <asm/cpufeature.h>
24
#include <asm/trace/fpu.h>
25

26 27 28
/*
 * High level FPU state handling functions:
 */
29 30
extern void fpu__prepare_read(struct fpu *fpu);
extern void fpu__prepare_write(struct fpu *fpu);
31
extern void fpu__save(struct fpu *fpu);
32
extern int  fpu__restore_sig(void __user *buf, int ia32_frame);
33
extern void fpu__drop(struct fpu *fpu);
34
extern int  fpu__copy(struct task_struct *dst, struct task_struct *src);
35 36
extern void fpu__clear_user_states(struct fpu *fpu);
extern void fpu__clear_all(struct fpu *fpu);
I
Ingo Molnar 已提交
37
extern int  fpu__exception_code(struct fpu *fpu, int trap_nr);
38

I
Ingo Molnar 已提交
39 40 41 42 43 44 45
/*
 * Boot time FPU initialization functions:
 */
extern void fpu__init_cpu(void);
extern void fpu__init_system_xstate(void);
extern void fpu__init_cpu_xstate(void);
extern void fpu__init_system(struct cpuinfo_x86 *c);
46 47
extern void fpu__init_check_bugs(void);
extern void fpu__resume_cpu(void);
48
extern u64 fpu__get_supported_xfeatures_mask(void);
49

50 51 52 53 54 55
/*
 * Debugging facility:
 */
#ifdef CONFIG_X86_DEBUG_FPU
# define WARN_ON_FPU(x) WARN_ON_ONCE(x)
#else
56
# define WARN_ON_FPU(x) ({ (void)(x); 0; })
57 58
#endif

59
/*
I
Ingo Molnar 已提交
60
 * FPU related CPU feature flag helper routines:
61
 */
62 63
static __always_inline __pure bool use_xsaveopt(void)
{
64
	return static_cpu_has(X86_FEATURE_XSAVEOPT);
65 66 67 68
}

static __always_inline __pure bool use_xsave(void)
{
69
	return static_cpu_has(X86_FEATURE_XSAVE);
70 71 72 73
}

static __always_inline __pure bool use_fxsr(void)
{
74
	return static_cpu_has(X86_FEATURE_FXSR);
75 76
}

I
Ingo Molnar 已提交
77 78 79 80 81 82 83 84 85 86 87 88
/*
 * fpstate handling functions:
 */

extern union fpregs_state init_fpstate;

extern void fpstate_init(union fpregs_state *state);
#ifdef CONFIG_MATH_EMULATION
extern void fpstate_init_soft(struct swregs_state *soft);
#else
static inline void fpstate_init_soft(struct swregs_state *soft) {}
#endif
89 90 91 92 93 94 95

static inline void fpstate_init_xstate(struct xregs_state *xsave)
{
	/*
	 * XRSTORS requires these bits set in xcomp_bv, or it will
	 * trigger #GP:
	 */
96
	xsave->header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT | xfeatures_mask_all;
97 98
}

I
Ingo Molnar 已提交
99 100 101 102 103
static inline void fpstate_init_fxstate(struct fxregs_state *fx)
{
	fx->cwd = 0x37f;
	fx->mxcsr = MXCSR_DEFAULT;
}
104
extern void fpstate_sanitize_xstate(struct fpu *fpu);
105

106 107 108
#define user_insn(insn, output, input...)				\
({									\
	int err;							\
109 110 111
									\
	might_fault();							\
									\
112 113 114 115 116 117 118 119 120 121 122 123 124
	asm volatile(ASM_STAC "\n"					\
		     "1:" #insn "\n\t"					\
		     "2: " ASM_CLAC "\n"				\
		     ".section .fixup,\"ax\"\n"				\
		     "3:  movl $-1,%[err]\n"				\
		     "    jmp  2b\n"					\
		     ".previous\n"					\
		     _ASM_EXTABLE(1b, 3b)				\
		     : [err] "=r" (err), output				\
		     : "0"(0), input);					\
	err;								\
})

125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
#define kernel_insn_err(insn, output, input...)				\
({									\
	int err;							\
	asm volatile("1:" #insn "\n\t"					\
		     "2:\n"						\
		     ".section .fixup,\"ax\"\n"				\
		     "3:  movl $-1,%[err]\n"				\
		     "    jmp  2b\n"					\
		     ".previous\n"					\
		     _ASM_EXTABLE(1b, 3b)				\
		     : [err] "=r" (err), output				\
		     : "0"(0), input);					\
	err;								\
})

140
#define kernel_insn(insn, output, input...)				\
141 142
	asm volatile("1:" #insn "\n\t"					\
		     "2:\n"						\
143 144
		     _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_fprestore)	\
		     : output : input)
145

146
static inline int copy_fregs_to_user(struct fregs_state __user *fx)
147
{
148
	return user_insn(fnsave %[fx]; fwait,  [fx] "=m" (*fx), "m" (*fx));
149 150
}

151
static inline int copy_fxregs_to_user(struct fxregs_state __user *fx)
152
{
153
	if (IS_ENABLED(CONFIG_X86_32))
154
		return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
155
	else
156
		return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
157 158 159

}

160
static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
161
{
162
	if (IS_ENABLED(CONFIG_X86_32))
163
		kernel_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
164 165
	else
		kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
166 167
}

168 169 170 171 172 173 174 175
static inline int copy_kernel_to_fxregs_err(struct fxregs_state *fx)
{
	if (IS_ENABLED(CONFIG_X86_32))
		return kernel_insn_err(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
	else
		return kernel_insn_err(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
}

176
static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
177
{
178
	if (IS_ENABLED(CONFIG_X86_32))
179
		return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
180
	else
181 182 183
		return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
}

184
static inline void copy_kernel_to_fregs(struct fregs_state *fx)
185
{
186
	kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
187 188
}

189 190 191 192 193
static inline int copy_kernel_to_fregs_err(struct fregs_state *fx)
{
	return kernel_insn_err(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
}

194
static inline int copy_user_to_fregs(struct fregs_state __user *fx)
195 196
{
	return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
197 198
}

199
static inline void copy_fxregs_to_kernel(struct fpu *fpu)
200
{
201
	if (IS_ENABLED(CONFIG_X86_32))
202
		asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave));
203
	else
204
		asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
205 206
}

207 208 209 210 211 212 213
/* These macros all use (%edi)/(%rdi) as the single memory argument. */
#define XSAVE		".byte " REX_PREFIX "0x0f,0xae,0x27"
#define XSAVEOPT	".byte " REX_PREFIX "0x0f,0xae,0x37"
#define XSAVES		".byte " REX_PREFIX "0x0f,0xc7,0x2f"
#define XRSTOR		".byte " REX_PREFIX "0x0f,0xae,0x2f"
#define XRSTORS		".byte " REX_PREFIX "0x0f,0xc7,0x1f"

214 215 216 217 218 219 220 221
#define XSTATE_OP(op, st, lmask, hmask, err)				\
	asm volatile("1:" op "\n\t"					\
		     "xor %[err], %[err]\n"				\
		     "2:\n\t"						\
		     ".pushsection .fixup,\"ax\"\n\t"			\
		     "3: movl $-2,%[err]\n\t"				\
		     "jmp 2b\n\t"					\
		     ".popsection\n\t"					\
222
		     _ASM_EXTABLE(1b, 3b)				\
223 224 225 226
		     : [err] "=r" (err)					\
		     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)	\
		     : "memory")

227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
/*
 * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact
 * format and supervisor states in addition to modified optimization in
 * XSAVEOPT.
 *
 * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
 * supports modified optimization which is not supported by XSAVE.
 *
 * We use XSAVE as a fallback.
 *
 * The 661 label is defined in the ALTERNATIVE* macros as the address of the
 * original instruction which gets replaced. We need to use it here as the
 * address of the instruction where we might get an exception at.
 */
#define XSTATE_XSAVE(st, lmask, hmask, err)				\
	asm volatile(ALTERNATIVE_2(XSAVE,				\
				   XSAVEOPT, X86_FEATURE_XSAVEOPT,	\
				   XSAVES,   X86_FEATURE_XSAVES)	\
		     "\n"						\
		     "xor %[err], %[err]\n"				\
		     "3:\n"						\
		     ".pushsection .fixup,\"ax\"\n"			\
		     "4: movl $-2, %[err]\n"				\
		     "jmp 3b\n"						\
		     ".popsection\n"					\
		     _ASM_EXTABLE(661b, 4b)				\
		     : [err] "=r" (err)					\
		     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)	\
		     : "memory")

/*
 * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
 * XSAVE area format.
 */
261
#define XSTATE_XRESTORE(st, lmask, hmask)				\
262 263 264 265
	asm volatile(ALTERNATIVE(XRSTOR,				\
				 XRSTORS, X86_FEATURE_XSAVES)		\
		     "\n"						\
		     "3:\n"						\
266 267
		     _ASM_EXTABLE_HANDLE(661b, 3b, ex_handler_fprestore)\
		     :							\
268 269
		     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)	\
		     : "memory")
270

271 272 273 274
/*
 * This function is called only during boot time when x86 caps are not set
 * up and alternative can not be used yet.
 */
275
static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
276
{
277
	u64 mask = xfeatures_mask_all;
278 279
	u32 lmask = mask;
	u32 hmask = mask >> 32;
280
	int err;
281 282 283

	WARN_ON(system_state != SYSTEM_BOOTING);

284
	if (boot_cpu_has(X86_FEATURE_XSAVES))
285
		XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
286
	else
287
		XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
288 289 290

	/* We should never fault when copying to a kernel buffer: */
	WARN_ON_FPU(err);
291 292 293 294 295 296
}

/*
 * This function is called only during boot time when x86 caps are not set
 * up and alternative can not be used yet.
 */
297
static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
298
{
299
	u64 mask = -1;
300 301
	u32 lmask = mask;
	u32 hmask = mask >> 32;
302
	int err;
303 304 305

	WARN_ON(system_state != SYSTEM_BOOTING);

306
	if (boot_cpu_has(X86_FEATURE_XSAVES))
307
		XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
308
	else
309
		XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
310

311 312 313 314
	/*
	 * We should never fault when copying from a kernel buffer, and the FPU
	 * state we set at boot time should be valid.
	 */
315
	WARN_ON_FPU(err);
316 317 318 319 320
}

/*
 * Save processor xstate to xsave area.
 */
321
static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
322
{
323
	u64 mask = xfeatures_mask_all;
324 325
	u32 lmask = mask;
	u32 hmask = mask >> 32;
326
	int err;
327

328
	WARN_ON_FPU(!alternatives_patched);
329

330
	XSTATE_XSAVE(xstate, lmask, hmask, err);
331

332 333
	/* We should never fault when copying to a kernel buffer: */
	WARN_ON_FPU(err);
334 335 336 337 338
}

/*
 * Restore processor xstate from xsave area.
 */
339
static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
340 341 342 343
{
	u32 lmask = mask;
	u32 hmask = mask >> 32;

344
	XSTATE_XRESTORE(xstate, lmask, hmask);
345 346 347 348 349 350 351 352 353 354 355 356 357 358
}

/*
 * Save xstate to user space xsave area.
 *
 * We don't use modified optimization because xrstor/xrstors might track
 * a different application.
 *
 * We don't use compacted format xsave area for
 * backward compatibility for old applications which don't understand
 * compacted format of xsave area.
 */
static inline int copy_xregs_to_user(struct xregs_state __user *buf)
{
359 360 361
	u64 mask = xfeatures_mask_user();
	u32 lmask = mask;
	u32 hmask = mask >> 32;
362 363 364 365 366 367 368 369 370 371
	int err;

	/*
	 * Clear the xsave header first, so that reserved fields are
	 * initialized to zero.
	 */
	err = __clear_user(&buf->header, sizeof(buf->header));
	if (unlikely(err))
		return -EFAULT;

372
	stac();
373
	XSTATE_OP(XSAVE, buf, lmask, hmask, err);
374 375
	clac();

376 377 378 379 380 381 382 383 384 385 386
	return err;
}

/*
 * Restore xstate from user space xsave area.
 */
static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
{
	struct xregs_state *xstate = ((__force struct xregs_state *)buf);
	u32 lmask = mask;
	u32 hmask = mask >> 32;
387 388 389 390 391
	int err;

	stac();
	XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
	clac();
392 393 394 395

	return err;
}

396 397 398 399 400 401 402 403 404 405
/*
 * Restore xstate from kernel space xsave area, return an error code instead of
 * an exception.
 */
static inline int copy_kernel_to_xregs_err(struct xregs_state *xstate, u64 mask)
{
	u32 lmask = mask;
	u32 hmask = mask >> 32;
	int err;

406 407 408 409
	if (static_cpu_has(X86_FEATURE_XSAVES))
		XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
	else
		XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
410 411 412 413

	return err;
}

414
extern int copy_fpregs_to_fpstate(struct fpu *fpu);
415

416
static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask)
417
{
418
	if (use_xsave()) {
419
		copy_kernel_to_xregs(&fpstate->xsave, mask);
420 421
	} else {
		if (use_fxsr())
422
			copy_kernel_to_fxregs(&fpstate->fxsave);
423
		else
424
			copy_kernel_to_fregs(&fpstate->fsave);
425
	}
426 427
}

428
static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
429
{
430 431 432 433 434
	/*
	 * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
	 * pending. Clear the x87 state here by setting it to fixed values.
	 * "m" is a random variable that should be in L1.
	 */
435
	if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
436 437 438 439
		asm volatile(
			"fnclex\n\t"
			"emms\n\t"
			"fildl %P[addr]"	/* set F?P to defined value */
440
			: : [addr] "m" (fpstate));
441
	}
442

443
	__copy_kernel_to_fpregs(fpstate, -1);
444 445
}

446
extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
I
Ingo Molnar 已提交
447 448 449 450 451 452 453 454

/*
 * FPU context switch related helper methods:
 */

DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);

/*
455 456 457 458 459 460 461 462 463
 * The in-register FPU state for an FPU context on a CPU is assumed to be
 * valid if the fpu->last_cpu matches the CPU, and the fpu_fpregs_owner_ctx
 * matches the FPU.
 *
 * If the FPU register state is valid, the kernel can skip restoring the
 * FPU state from memory.
 *
 * Any code that clobbers the FPU registers or updates the in-memory
 * FPU state for a task MUST let the rest of the kernel know that the
464
 * FPU registers are no longer valid for this task.
465
 *
466 467 468 469
 * Either one of these invalidation functions is enough. Invalidate
 * a resource you control: CPU if using the CPU for something else
 * (with preemption disabled), FPU for the current task, or a task that
 * is prevented from running by the current task.
I
Ingo Molnar 已提交
470
 */
471
static inline void __cpu_invalidate_fpregs_state(void)
I
Ingo Molnar 已提交
472
{
473
	__this_cpu_write(fpu_fpregs_owner_ctx, NULL);
I
Ingo Molnar 已提交
474 475
}

476 477 478 479 480 481
static inline void __fpu_invalidate_fpregs_state(struct fpu *fpu)
{
	fpu->last_cpu = -1;
}

static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu)
I
Ingo Molnar 已提交
482
{
483
	return fpu == this_cpu_read(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
I
Ingo Molnar 已提交
484 485
}

486 487 488 489 490
/*
 * These generally need preemption protection to work,
 * do try to avoid using these on their own:
 */
static inline void fpregs_deactivate(struct fpu *fpu)
491
{
492
	this_cpu_write(fpu_fpregs_owner_ctx, NULL);
493
	trace_x86_fpu_regs_deactivated(fpu);
494 495
}

496
static inline void fpregs_activate(struct fpu *fpu)
497
{
498
	this_cpu_write(fpu_fpregs_owner_ctx, fpu);
499
	trace_x86_fpu_regs_activated(fpu);
500 501
}

502 503 504
/*
 * Internal helper, do not use directly. Use switch_fpu_return() instead.
 */
505
static inline void __fpregs_load_activate(void)
506
{
507 508 509
	struct fpu *fpu = &current->thread.fpu;
	int cpu = smp_processor_id();

510
	if (WARN_ON_ONCE(current->flags & PF_KTHREAD))
511 512
		return;

513
	if (!fpregs_state_valid(fpu, cpu)) {
514
		copy_kernel_to_fpregs(&fpu->state);
515
		fpregs_activate(fpu);
516
		fpu->last_cpu = cpu;
517
	}
518
	clear_thread_flag(TIF_NEED_FPU_LOAD);
519 520
}

521 522 523 524 525
/*
 * FPU state switching for scheduling.
 *
 * This is a two-stage process:
 *
526 527
 *  - switch_fpu_prepare() saves the old state.
 *    This is done within the context of the old process.
528
 *
529 530
 *  - switch_fpu_finish() sets TIF_NEED_FPU_LOAD; the floating point state
 *    will get loaded on return to userspace, or when the kernel needs it.
531
 *
532 533 534 535 536 537 538 539
 * If TIF_NEED_FPU_LOAD is cleared then the CPU's FPU registers
 * are saved in the current thread's FPU register state.
 *
 * If TIF_NEED_FPU_LOAD is set then CPU's FPU registers may not
 * hold current()'s FPU registers. It is required to load the
 * registers before returning to userland or using the content
 * otherwise.
 *
540
 * The FPU context is only stored/restored for a user task and
541
 * PF_KTHREAD is used to distinguish between kernel and user threads.
542
 */
543
static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
544
{
545
	if (static_cpu_has(X86_FEATURE_FPU) && !(current->flags & PF_KTHREAD)) {
546
		if (!copy_fpregs_to_fpstate(old_fpu))
547
			old_fpu->last_cpu = -1;
548
		else
549
			old_fpu->last_cpu = cpu;
550

551
		/* But leave fpu_fpregs_owner_ctx! */
552
		trace_x86_fpu_regs_deactivated(old_fpu);
553
	}
554 555
}

I
Ingo Molnar 已提交
556 557 558 559
/*
 * Misc helper functions:
 */

560
/*
561 562
 * Load PKRU from the FPU context if available. Delay loading of the
 * complete FPU state until the return to userland.
563
 */
564
static inline void switch_fpu_finish(struct fpu *new_fpu)
565
{
R
Rik van Riel 已提交
566 567
	u32 pkru_val = init_pkru_value;
	struct pkru_state *pk;
568

R
Rik van Riel 已提交
569 570
	if (!static_cpu_has(X86_FEATURE_FPU))
		return;
571

572
	set_thread_flag(TIF_NEED_FPU_LOAD);
R
Rik van Riel 已提交
573 574 575

	if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
		return;
576

R
Rik van Riel 已提交
577 578 579 580
	/*
	 * PKRU state is switched eagerly because it needs to be valid before we
	 * return to userland e.g. for a copy_to_user() operation.
	 */
581
	if (!(current->flags & PF_KTHREAD)) {
R
Rik van Riel 已提交
582 583 584 585 586
		pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU);
		if (pk)
			pkru_val = pk->pkru;
	}
	__write_pkru(pkru_val);
587 588
}

589
#endif /* _ASM_X86_FPU_INTERNAL_H */