xsave.c 18.7 KB
Newer Older
1 2 3 4 5
/*
 * xsave/xrstor support.
 *
 * Author: Suresh Siddha <suresh.b.siddha@intel.com>
 */
6 7 8

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

9 10
#include <linux/bootmem.h>
#include <linux/compat.h>
F
Fenghua Yu 已提交
11
#include <linux/cpu.h>
12
#include <asm/i387.h>
13
#include <asm/fpu-internal.h>
14
#include <asm/sigframe.h>
A
Andy Lutomirski 已提交
15
#include <asm/tlbflush.h>
16
#include <asm/xcr.h>
17 18 19 20

/*
 * Supported feature mask by the CPU and the kernel.
 */
21
u64 pcntxt_mask;
22

23 24 25
/*
 * Represents init state for the supported extended state.
 */
26
struct xsave_struct *init_xstate_buf;
27

28
static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32;
F
Fenghua Yu 已提交
29
static unsigned int *xstate_offsets, *xstate_sizes;
30
static unsigned int xstate_comp_offsets[sizeof(pcntxt_mask)*8];
F
Fenghua Yu 已提交
31
static unsigned int xstate_features;
32

33 34 35 36 37 38 39 40 41 42 43 44 45
/*
 * If a processor implementation discern that a processor state component is
 * in its initialized state it may modify the corresponding bit in the
 * xsave_hdr.xstate_bv as '0', with out modifying the corresponding memory
 * layout in the case of xsaveopt. While presenting the xstate information to
 * the user, we always ensure that the memory layout of a feature will be in
 * the init state if the corresponding header bit is zero. This is to ensure
 * that the user doesn't see some stale state in the memory layout during
 * signal handling, debugging etc.
 */
void __sanitize_i387_state(struct task_struct *tsk)
{
	struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave;
46 47
	int feature_bit = 0x2;
	u64 xstate_bv;
48 49 50 51 52 53 54 55

	if (!fx)
		return;

	xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv;

	/*
	 * None of the feature bits are in init state. So nothing else
L
Lucas De Marchi 已提交
56
	 * to do for us, as the memory layout is up to date.
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
	 */
	if ((xstate_bv & pcntxt_mask) == pcntxt_mask)
		return;

	/*
	 * FP is in init state
	 */
	if (!(xstate_bv & XSTATE_FP)) {
		fx->cwd = 0x37f;
		fx->swd = 0;
		fx->twd = 0;
		fx->fop = 0;
		fx->rip = 0;
		fx->rdp = 0;
		memset(&fx->st_space[0], 0, 128);
	}

	/*
	 * SSE is in init state
	 */
	if (!(xstate_bv & XSTATE_SSE))
		memset(&fx->xmm_space[0], 0, 256);

	xstate_bv = (pcntxt_mask & ~xstate_bv) >> 2;

	/*
	 * Update all the other memory layouts for which the corresponding
	 * header bit is in the init state.
	 */
	while (xstate_bv) {
		if (xstate_bv & 0x1) {
			int offset = xstate_offsets[feature_bit];
			int size = xstate_sizes[feature_bit];

			memcpy(((void *) fx) + offset,
			       ((void *) init_xstate_buf) + offset,
			       size);
		}

		xstate_bv >>= 1;
		feature_bit++;
	}
}

101 102 103 104
/*
 * Check for the presence of extended state information in the
 * user fpstate pointer in the sigcontext.
 */
105 106 107
static inline int check_for_xstate(struct i387_fxsave_struct __user *buf,
				   void __user *fpstate,
				   struct _fpx_sw_bytes *fx_sw)
108 109 110 111 112
{
	int min_xstate_size = sizeof(struct i387_fxsave_struct) +
			      sizeof(struct xsave_hdr_struct);
	unsigned int magic2;

113 114
	if (__copy_from_user(fx_sw, &buf->sw_reserved[0], sizeof(*fx_sw)))
		return -1;
115

116 117 118 119 120 121
	/* Check for the first magic field and other error scenarios. */
	if (fx_sw->magic1 != FP_XSTATE_MAGIC1 ||
	    fx_sw->xstate_size < min_xstate_size ||
	    fx_sw->xstate_size > xstate_size ||
	    fx_sw->xstate_size > fx_sw->extended_size)
		return -1;
122 123 124 125 126 127 128

	/*
	 * Check for the presence of second magic word at the end of memory
	 * layout. This detects the case where the user just copied the legacy
	 * fpstate layout with out copying the extended state information
	 * in the memory layout.
	 */
129 130 131
	if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size))
	    || magic2 != FP_XSTATE_MAGIC2)
		return -1;
132 133 134 135

	return 0;
}

136 137 138
/*
 * Signal frame handlers.
 */
139 140 141 142 143 144
static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
{
	if (use_fxsr()) {
		struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave;
		struct user_i387_ia32_struct env;
		struct _fpstate_ia32 __user *fp = buf;
145

146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
		convert_from_fxsr(&env, tsk);

		if (__copy_to_user(buf, &env, sizeof(env)) ||
		    __put_user(xsave->i387.swd, &fp->status) ||
		    __put_user(X86_FXSR_MAGIC, &fp->magic))
			return -1;
	} else {
		struct i387_fsave_struct __user *fp = buf;
		u32 swd;
		if (__get_user(swd, &fp->swd) || __put_user(swd, &fp->status))
			return -1;
	}

	return 0;
}

static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
163
{
164 165 166 167
	struct xsave_struct __user *x = buf;
	struct _fpx_sw_bytes *sw_bytes;
	u32 xstate_bv;
	int err;
168

169 170 171
	/* Setup the bytes not touched by the [f]xsave and reserved for SW. */
	sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
	err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
172

173 174
	if (!use_xsave())
		return err;
175

176
	err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
177

178 179 180 181 182
	/*
	 * Read the xstate_bv which we copied (directly from the cpu or
	 * from the state in task struct) to the user buffers.
	 */
	err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
183

184 185 186 187 188 189 190 191 192 193 194 195
	/*
	 * For legacy compatible, we always set FP/SSE bits in the bit
	 * vector while saving the state to the user context. This will
	 * enable us capturing any changes(during sigreturn) to
	 * the FP/SSE bits by the legacy applications which don't touch
	 * xstate_bv in the xsave header.
	 *
	 * xsave aware apps can change the xstate_bv in the xsave
	 * header as well as change any contents in the memory layout.
	 * xrestore as part of sigreturn will capture all the changes.
	 */
	xstate_bv |= XSTATE_FPSSE;
196

197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
	err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);

	return err;
}

static inline int save_user_xstate(struct xsave_struct __user *buf)
{
	int err;

	if (use_xsave())
		err = xsave_user(buf);
	else if (use_fxsr())
		err = fxsave_user((struct i387_fxsave_struct __user *) buf);
	else
		err = fsave_user((struct i387_fsave_struct __user *) buf);

	if (unlikely(err) && __clear_user(buf, xstate_size))
		err = -EFAULT;
	return err;
}

/*
 * Save the fpu, extended register state to the user signal frame.
 *
 * 'buf_fx' is the 64-byte aligned pointer at which the [f|fx|x]save
 *  state is copied.
 *  'buf' points to the 'buf_fx' or to the fsave header followed by 'buf_fx'.
 *
 *	buf == buf_fx for 64-bit frames and 32-bit fsave frame.
 *	buf != buf_fx for 32-bit frames with fxstate.
 *
 * If the fpu, extended register state is live, save the state directly
 * to the user frame pointed by the aligned pointer 'buf_fx'. Otherwise,
 * copy the thread's fpu state to the user frame starting at 'buf_fx'.
 *
 * If this is a 32-bit frame with fxstate, put a fsave header before
 * the aligned state at 'buf_fx'.
 *
 * For [f]xsave state, update the SW reserved fields in the [f]xsave frame
 * indicating the absence/presence of the extended state to the user.
 */
int save_xstate_sig(void __user *buf, void __user *buf_fx, int size)
{
	struct xsave_struct *xsave = &current->thread.fpu.state->xsave;
	struct task_struct *tsk = current;
	int ia32_fxstate = (buf != buf_fx);

	ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
			 config_enabled(CONFIG_IA32_EMULATION));

	if (!access_ok(VERIFY_WRITE, buf, size))
		return -EACCES;

250
	if (!static_cpu_has(X86_FEATURE_FPU))
251 252 253 254 255 256 257 258 259 260 261
		return fpregs_soft_get(current, NULL, 0,
			sizeof(struct user_i387_ia32_struct), NULL,
			(struct _fpstate_ia32 __user *) buf) ? -1 : 1;

	if (user_has_fpu()) {
		/* Save the live register state to the user directly. */
		if (save_user_xstate(buf_fx))
			return -1;
		/* Update the thread's fxstate to save the fsave header. */
		if (ia32_fxstate)
			fpu_fxsave(&tsk->thread.fpu);
262
	} else {
263
		sanitize_i387_state(tsk);
264
		if (__copy_to_user(buf_fx, xsave, xstate_size))
265 266
			return -1;
	}
267

268 269 270
	/* Save the fsave header for the 32-bit frames. */
	if ((ia32_fxstate || !use_fxsr()) && save_fsave_header(tsk, buf))
		return -1;
271

272 273 274 275 276
	if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate))
		return -1;

	return 0;
}
277

278 279 280 281 282 283 284
static inline void
sanitize_restored_xstate(struct task_struct *tsk,
			 struct user_i387_ia32_struct *ia32_env,
			 u64 xstate_bv, int fx_only)
{
	struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave;
	struct xsave_hdr_struct *xsave_hdr = &xsave->xsave_hdr;
285

286 287
	if (use_xsave()) {
		/* These bits must be zero. */
F
Fenghua Yu 已提交
288
		memset(xsave_hdr->reserved, 0, 48);
289 290

		/*
291 292
		 * Init the state that is not present in the memory
		 * layout and not enabled by the OS.
293
		 */
294 295 296 297 298
		if (fx_only)
			xsave_hdr->xstate_bv = XSTATE_FPSSE;
		else
			xsave_hdr->xstate_bv &= (pcntxt_mask & xstate_bv);
	}
299

300
	if (use_fxsr()) {
301
		/*
302 303
		 * mscsr reserved bits must be masked to zero for security
		 * reasons.
304
		 */
305
		xsave->i387.mxcsr &= mxcsr_feature_mask;
306

307
		convert_to_fxsr(tsk, ia32_env);
308
	}
309 310
}

311
/*
312
 * Restore the extended state if present. Otherwise, restore the FP/SSE state.
313
 */
314
static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
315
{
316 317 318 319
	if (use_xsave()) {
		if ((unsigned long)buf % 64 || fx_only) {
			u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
			xrstor_state(init_xstate_buf, init_bv);
320
			return fxrstor_user(buf);
321 322 323 324 325 326 327
		} else {
			u64 init_bv = pcntxt_mask & ~xbv;
			if (unlikely(init_bv))
				xrstor_state(init_xstate_buf, init_bv);
			return xrestore_user(buf, xbv);
		}
	} else if (use_fxsr()) {
328
		return fxrstor_user(buf);
329
	} else
330
		return frstor_user(buf);
331 332
}

333
int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
334
{
335
	int ia32_fxstate = (buf != buf_fx);
336
	struct task_struct *tsk = current;
337 338 339 340 341 342
	int state_size = xstate_size;
	u64 xstate_bv = 0;
	int fx_only = 0;

	ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
			 config_enabled(CONFIG_IA32_EMULATION));
343 344

	if (!buf) {
345
		drop_init_fpu(tsk);
346
		return 0;
347 348 349 350 351 352 353
	}

	if (!access_ok(VERIFY_READ, buf, size))
		return -EACCES;

	if (!used_math() && init_fpu(tsk))
		return -1;
354

355
	if (!static_cpu_has(X86_FEATURE_FPU))
356 357 358
		return fpregs_soft_set(current, NULL,
				       0, sizeof(struct user_i387_ia32_struct),
				       NULL, buf) != 0;
359

360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
	if (use_xsave()) {
		struct _fpx_sw_bytes fx_sw_user;
		if (unlikely(check_for_xstate(buf_fx, buf_fx, &fx_sw_user))) {
			/*
			 * Couldn't find the extended state information in the
			 * memory layout. Restore just the FP/SSE and init all
			 * the other extended state.
			 */
			state_size = sizeof(struct i387_fxsave_struct);
			fx_only = 1;
		} else {
			state_size = fx_sw_user.xstate_size;
			xstate_bv = fx_sw_user.xstate_bv;
		}
	}

	if (ia32_fxstate) {
		/*
		 * For 32-bit frames with fxstate, copy the user state to the
		 * thread's fpu state, reconstruct fxstate from the fsave
		 * header. Sanitize the copied state etc.
		 */
		struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave;
		struct user_i387_ia32_struct env;
384
		int err = 0;
385

386 387 388 389 390 391 392 393
		/*
		 * Drop the current fpu which clears used_math(). This ensures
		 * that any context-switch during the copy of the new state,
		 * avoids the intermediate state from getting restored/saved.
		 * Thus avoiding the new restored state from getting corrupted.
		 * We will be ready to restore/save the state only after
		 * set_used_math() is again set.
		 */
394
		drop_fpu(tsk);
395 396

		if (__copy_from_user(xsave, buf_fx, state_size) ||
397 398 399 400 401 402
		    __copy_from_user(&env, buf, sizeof(env))) {
			err = -1;
		} else {
			sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only);
			set_used_math();
		}
403

404 405
		if (use_eager_fpu()) {
			preempt_disable();
406
			math_state_restore();
407 408
			preempt_enable();
		}
409 410

		return err;
411
	} else {
412
		/*
413 414
		 * For 64-bit frames and 32-bit fsave frames, restore the user
		 * state to the registers directly (with exceptions handled).
415
		 */
416 417
		user_fpu_begin();
		if (restore_user_xstate(buf_fx, xstate_bv, fx_only)) {
418
			drop_init_fpu(tsk);
419 420
			return -1;
		}
421
	}
422 423

	return 0;
424 425
}

426 427 428 429 430 431 432
/*
 * Prepare the SW reserved portion of the fxsave memory layout, indicating
 * the presence of the extended state information in the memory layout
 * pointed by the fpstate pointer in the sigcontext.
 * This will be saved when ever the FP and extended state context is
 * saved on the user stack during the signal handler delivery to the user.
 */
R
roel kluin 已提交
433
static void prepare_fx_sw_frame(void)
434
{
435 436
	int fsave_header_size = sizeof(struct i387_fsave_struct);
	int size = xstate_size + FP_XSTATE_MAGIC2_SIZE;
437

438 439
	if (config_enabled(CONFIG_X86_32))
		size += fsave_header_size;
440 441

	fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
442
	fx_sw_reserved.extended_size = size;
443
	fx_sw_reserved.xstate_bv = pcntxt_mask;
444 445
	fx_sw_reserved.xstate_size = xstate_size;

446 447 448 449 450
	if (config_enabled(CONFIG_IA32_EMULATION)) {
		fx_sw_reserved_ia32 = fx_sw_reserved;
		fx_sw_reserved_ia32.extended_size += fsave_header_size;
	}
}
451

452 453 454
/*
 * Enable the extended processor state save/restore feature
 */
455
static inline void xstate_enable(void)
456
{
A
Andy Lutomirski 已提交
457
	cr4_set_bits(X86_CR4_OSXSAVE);
458
	xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
459 460
}

461 462 463 464
/*
 * Record the offsets and sizes of different state managed by the xsave
 * memory layout.
 */
465
static void __init setup_xstate_features(void)
466 467 468 469 470 471 472 473
{
	int eax, ebx, ecx, edx, leaf = 0x2;

	xstate_features = fls64(pcntxt_mask);
	xstate_offsets = alloc_bootmem(xstate_features * sizeof(int));
	xstate_sizes = alloc_bootmem(xstate_features * sizeof(int));

	do {
474
		cpuid_count(XSTATE_CPUID, leaf, &eax, &ebx, &ecx, &edx);
475 476 477 478 479 480 481 482 483 484 485

		if (eax == 0)
			break;

		xstate_offsets[leaf] = ebx;
		xstate_sizes[leaf] = eax;

		leaf++;
	} while (1);
}

486 487 488 489 490 491 492 493 494 495
/*
 * This function sets up offsets and sizes of all extended states in
 * xsave area. This supports both standard format and compacted format
 * of the xsave aread.
 *
 * Input: void
 * Output: void
 */
void setup_xstate_comp(void)
{
496
	unsigned int xstate_comp_sizes[sizeof(pcntxt_mask)*8];
497 498
	int i;

499 500 501 502 503 504 505
	/*
	 * The FP xstates and SSE xstates are legacy states. They are always
	 * in the fixed offsets in the xsave area in either compacted form
	 * or standard form.
	 */
	xstate_comp_offsets[0] = 0;
	xstate_comp_offsets[1] = offsetof(struct i387_fxsave_struct, xmm_space);
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531

	if (!cpu_has_xsaves) {
		for (i = 2; i < xstate_features; i++) {
			if (test_bit(i, (unsigned long *)&pcntxt_mask)) {
				xstate_comp_offsets[i] = xstate_offsets[i];
				xstate_comp_sizes[i] = xstate_sizes[i];
			}
		}
		return;
	}

	xstate_comp_offsets[2] = FXSAVE_SIZE + XSAVE_HDR_SIZE;

	for (i = 2; i < xstate_features; i++) {
		if (test_bit(i, (unsigned long *)&pcntxt_mask))
			xstate_comp_sizes[i] = xstate_sizes[i];
		else
			xstate_comp_sizes[i] = 0;

		if (i > 2)
			xstate_comp_offsets[i] = xstate_comp_offsets[i-1]
					+ xstate_comp_sizes[i-1];

	}
}

532 533 534
/*
 * setup the xstate image representing the init state
 */
535
static void __init setup_init_fpu_buf(void)
536
{
537 538 539 540
	/*
	 * Setup init_xstate_buf to represent the init state of
	 * all the features managed by the xsave
	 */
541 542
	init_xstate_buf = alloc_bootmem_align(xstate_size,
					      __alignof__(struct xsave_struct));
543 544 545 546 547 548
	fx_finit(&init_xstate_buf->i387);

	if (!cpu_has_xsave)
		return;

	setup_xstate_features();
549

550 551 552 553 554 555
	if (cpu_has_xsaves) {
		init_xstate_buf->xsave_hdr.xcomp_bv =
						(u64)1 << 63 | pcntxt_mask;
		init_xstate_buf->xsave_hdr.xstate_bv = pcntxt_mask;
	}

556 557 558
	/*
	 * Init all the features state with header_bv being 0x0
	 */
559
	xrstor_state_booting(init_xstate_buf, -1);
560 561 562 563
	/*
	 * Dump the init state again. This is to identify the init state
	 * of any feature which is not represented by all zero's.
	 */
564
	xsave_state_booting(init_xstate_buf, -1);
565 566
}

567
static enum { AUTO, ENABLE, DISABLE } eagerfpu = AUTO;
568 569 570
static int __init eager_fpu_setup(char *s)
{
	if (!strcmp(s, "on"))
571
		eagerfpu = ENABLE;
572
	else if (!strcmp(s, "off"))
573 574 575
		eagerfpu = DISABLE;
	else if (!strcmp(s, "auto"))
		eagerfpu = AUTO;
576 577 578 579
	return 1;
}
__setup("eagerfpu=", eager_fpu_setup);

F
Fenghua Yu 已提交
580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603

/*
 * Calculate total size of enabled xstates in XCR0/pcntxt_mask.
 */
static void __init init_xstate_size(void)
{
	unsigned int eax, ebx, ecx, edx;
	int i;

	if (!cpu_has_xsaves) {
		cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
		xstate_size = ebx;
		return;
	}

	xstate_size = FXSAVE_SIZE + XSAVE_HDR_SIZE;
	for (i = 2; i < 64; i++) {
		if (test_bit(i, (unsigned long *)&pcntxt_mask)) {
			cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
			xstate_size += eax;
		}
	}
}

604 605 606
/*
 * Enable and initialize the xsave feature.
 */
607
static void __init xstate_enable_boot_cpu(void)
608 609 610
{
	unsigned int eax, ebx, ecx, edx;

611 612 613 614 615 616
	if (boot_cpu_data.cpuid_level < XSTATE_CPUID) {
		WARN(1, KERN_ERR "XSTATE_CPUID missing\n");
		return;
	}

	cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
617
	pcntxt_mask = eax + ((u64)edx << 32);
618

619
	if ((pcntxt_mask & XSTATE_FPSSE) != XSTATE_FPSSE) {
620
		pr_err("FP/SSE not shown under xsave features 0x%llx\n",
621
		       pcntxt_mask);
622 623 624 625
		BUG();
	}

	/*
626
	 * Support only the state known to OS.
627
	 */
628
	pcntxt_mask = pcntxt_mask & XCNTXT_MASK;
629

630
	xstate_enable();
631 632 633 634

	/*
	 * Recompute the context size for enabled features
	 */
F
Fenghua Yu 已提交
635
	init_xstate_size();
636

637
	update_regset_xstate_info(xstate_size, pcntxt_mask);
638
	prepare_fx_sw_frame();
639
	setup_init_fpu_buf();
640

641 642 643
	/* Auto enable eagerfpu for xsaveopt */
	if (cpu_has_xsaveopt && eagerfpu != DISABLE)
		eagerfpu = ENABLE;
644

645 646 647 648 649 650 651 652 653 654
	if (pcntxt_mask & XSTATE_EAGER) {
		if (eagerfpu == DISABLE) {
			pr_err("eagerfpu not present, disabling some xstate features: 0x%llx\n",
					pcntxt_mask & XSTATE_EAGER);
			pcntxt_mask &= ~XSTATE_EAGER;
		} else {
			eagerfpu = ENABLE;
		}
	}

F
Fenghua Yu 已提交
655 656 657
	pr_info("enabled xstate_bv 0x%llx, cntxt size 0x%x using %s\n",
		pcntxt_mask, xstate_size,
		cpu_has_xsaves ? "compacted form" : "standard form");
658
}
659

660 661 662 663 664 665 666
/*
 * For the very first instance, this calls xstate_enable_boot_cpu();
 * for all subsequent instances, this calls xstate_enable().
 *
 * This is somewhat obfuscated due to the lack of powerful enough
 * overrides for the section checks.
 */
667
void xsave_init(void)
668
{
669 670 671
	static __refdata void (*next_func)(void) = xstate_enable_boot_cpu;
	void (*this_func)(void);

672 673 674
	if (!cpu_has_xsave)
		return;

675
	this_func = next_func;
676
	next_func = xstate_enable;
677
	this_func();
678
}
679 680 681 682 683 684 685 686 687

static inline void __init eager_fpu_init_bp(void)
{
	current->thread.fpu.state =
	    alloc_bootmem_align(xstate_size, __alignof__(struct xsave_struct));
	if (!init_xstate_buf)
		setup_init_fpu_buf();
}

688
void eager_fpu_init(void)
689 690 691 692 693
{
	static __refdata void (*boot_func)(void) = eager_fpu_init_bp;

	clear_used_math();
	current_thread_info()->status = 0;
694 695 696 697

	if (eagerfpu == ENABLE)
		setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);

698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
	if (!cpu_has_eager_fpu) {
		stts();
		return;
	}

	if (boot_func) {
		boot_func();
		boot_func = NULL;
	}

	/*
	 * This is same as math_state_restore(). But use_xsave() is
	 * not yet patched to use math_state_restore().
	 */
	init_fpu(current);
	__thread_fpu_begin(current);
	if (cpu_has_xsave)
		xrstor_state(init_xstate_buf, -1);
	else
		fxrstor_checking(&init_xstate_buf->i387);
}
719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741

/*
 * Given the xsave area and a state inside, this function returns the
 * address of the state.
 *
 * This is the API that is called to get xstate address in either
 * standard format or compacted format of xsave area.
 *
 * Inputs:
 *	xsave: base address of the xsave area;
 *	xstate: state which is defined in xsave.h (e.g. XSTATE_FP, XSTATE_SSE,
 *	etc.)
 * Output:
 *	address of the state in the xsave area.
 */
void *get_xsave_addr(struct xsave_struct *xsave, int xstate)
{
	int feature = fls64(xstate) - 1;
	if (!test_bit(feature, (unsigned long *)&pcntxt_mask))
		return NULL;

	return (void *)xsave + xstate_comp_offsets[feature];
}
P
Paolo Bonzini 已提交
742
EXPORT_SYMBOL_GPL(get_xsave_addr);