xstate.c 18.5 KB
Newer Older
1 2 3 4 5 6
/*
 * xsave/xrstor support.
 *
 * Author: Suresh Siddha <suresh.b.siddha@intel.com>
 */
#include <linux/compat.h>
F
Fenghua Yu 已提交
7
#include <linux/cpu.h>
8
#include <asm/fpu/api.h>
9
#include <asm/fpu/internal.h>
10
#include <asm/sigframe.h>
A
Andy Lutomirski 已提交
11
#include <asm/tlbflush.h>
12
#include <asm/xcr.h>
13 14

/*
15
 * Mask of xstate features supported by the CPU and the kernel:
16
 */
17
u64 xfeatures_mask;
18

19 20 21
/*
 * Represents init state for the supported extended state.
 */
22
struct xsave_struct init_xstate_ctx;
23

24
static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32;
25
static unsigned int xstate_offsets[XFEATURES_NR_MAX], xstate_sizes[XFEATURES_NR_MAX];
26
static unsigned int xstate_comp_offsets[sizeof(xfeatures_mask)*8];
27 28 29

/* The number of supported xfeatures in xfeatures_mask: */
static unsigned int xfeatures_nr;
30

31
/*
32 33 34 35 36 37 38 39 40 41 42 43
 * When executing XSAVEOPT (optimized XSAVE), if a processor implementation
 * detects that an FPU state component is still (or is again) in its
 * initialized state, it may clear the corresponding bit in the header.xfeatures
 * field, and can skip the writeout of registers to the corresponding memory layout.
 *
 * This means that when the bit is zero, the state component might still contain
 * some previous - non-initialized register state.
 *
 * Before writing xstate information to user-space we sanitize those components,
 * to always ensure that the memory layout of a feature will be in the init state
 * if the corresponding header bit is zero. This is to ensure that user-space doesn't
 * see some stale state in the memory layout during signal handling, debugging etc.
44 45 46
 */
void __sanitize_i387_state(struct task_struct *tsk)
{
47
	struct i387_fxsave_struct *fx = &tsk->thread.fpu.state.fxsave;
48
	int feature_bit;
49
	u64 xfeatures;
50 51 52 53

	if (!fx)
		return;

54
	xfeatures = tsk->thread.fpu.state.xsave.header.xfeatures;
55 56 57

	/*
	 * None of the feature bits are in init state. So nothing else
L
Lucas De Marchi 已提交
58
	 * to do for us, as the memory layout is up to date.
59
	 */
60
	if ((xfeatures & xfeatures_mask) == xfeatures_mask)
61 62 63 64 65
		return;

	/*
	 * FP is in init state
	 */
66
	if (!(xfeatures & XSTATE_FP)) {
67 68 69 70 71 72 73 74 75 76 77 78
		fx->cwd = 0x37f;
		fx->swd = 0;
		fx->twd = 0;
		fx->fop = 0;
		fx->rip = 0;
		fx->rdp = 0;
		memset(&fx->st_space[0], 0, 128);
	}

	/*
	 * SSE is in init state
	 */
79
	if (!(xfeatures & XSTATE_SSE))
80 81
		memset(&fx->xmm_space[0], 0, 256);

82 83 84 85 86
	/*
	 * First two features are FPU and SSE, which above we handled
	 * in a special way already:
	 */
	feature_bit = 0x2;
87
	xfeatures = (xfeatures_mask & ~xfeatures) >> 2;
88 89

	/*
90 91 92
	 * Update all the remaining memory layouts according to their
	 * standard xstate layout, if their header bit is in the init
	 * state:
93
	 */
94 95
	while (xfeatures) {
		if (xfeatures & 0x1) {
96 97 98
			int offset = xstate_offsets[feature_bit];
			int size = xstate_sizes[feature_bit];

99
			memcpy((void *)fx + offset,
100
			       (void *)&init_xstate_ctx + offset,
101 102 103
			       size);
		}

104
		xfeatures >>= 1;
105 106 107 108
		feature_bit++;
	}
}

109 110 111 112
/*
 * Check for the presence of extended state information in the
 * user fpstate pointer in the sigcontext.
 */
113 114 115
static inline int check_for_xstate(struct i387_fxsave_struct __user *buf,
				   void __user *fpstate,
				   struct _fpx_sw_bytes *fx_sw)
116 117
{
	int min_xstate_size = sizeof(struct i387_fxsave_struct) +
118
			      sizeof(struct xstate_header);
119 120
	unsigned int magic2;

121 122
	if (__copy_from_user(fx_sw, &buf->sw_reserved[0], sizeof(*fx_sw)))
		return -1;
123

124 125 126 127 128 129
	/* Check for the first magic field and other error scenarios. */
	if (fx_sw->magic1 != FP_XSTATE_MAGIC1 ||
	    fx_sw->xstate_size < min_xstate_size ||
	    fx_sw->xstate_size > xstate_size ||
	    fx_sw->xstate_size > fx_sw->extended_size)
		return -1;
130 131 132 133 134 135 136

	/*
	 * Check for the presence of second magic word at the end of memory
	 * layout. This detects the case where the user just copied the legacy
	 * fpstate layout with out copying the extended state information
	 * in the memory layout.
	 */
137 138 139
	if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size))
	    || magic2 != FP_XSTATE_MAGIC2)
		return -1;
140 141 142 143

	return 0;
}

144 145 146
/*
 * Signal frame handlers.
 */
147 148 149
static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
{
	if (use_fxsr()) {
150
		struct xsave_struct *xsave = &tsk->thread.fpu.state.xsave;
151 152
		struct user_i387_ia32_struct env;
		struct _fpstate_ia32 __user *fp = buf;
153

154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
		convert_from_fxsr(&env, tsk);

		if (__copy_to_user(buf, &env, sizeof(env)) ||
		    __put_user(xsave->i387.swd, &fp->status) ||
		    __put_user(X86_FXSR_MAGIC, &fp->magic))
			return -1;
	} else {
		struct i387_fsave_struct __user *fp = buf;
		u32 swd;
		if (__get_user(swd, &fp->swd) || __put_user(swd, &fp->status))
			return -1;
	}

	return 0;
}

static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
171
{
172 173
	struct xsave_struct __user *x = buf;
	struct _fpx_sw_bytes *sw_bytes;
174
	u32 xfeatures;
175
	int err;
176

177 178 179
	/* Setup the bytes not touched by the [f]xsave and reserved for SW. */
	sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
	err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
180

181 182
	if (!use_xsave())
		return err;
183

184
	err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
185

186
	/*
187
	 * Read the xfeatures which we copied (directly from the cpu or
188 189
	 * from the state in task struct) to the user buffers.
	 */
190
	err |= __get_user(xfeatures, (__u32 *)&x->header.xfeatures);
191

192 193 194 195 196
	/*
	 * For legacy compatible, we always set FP/SSE bits in the bit
	 * vector while saving the state to the user context. This will
	 * enable us capturing any changes(during sigreturn) to
	 * the FP/SSE bits by the legacy applications which don't touch
197
	 * xfeatures in the xsave header.
198
	 *
199
	 * xsave aware apps can change the xfeatures in the xsave
200 201 202
	 * header as well as change any contents in the memory layout.
	 * xrestore as part of sigreturn will capture all the changes.
	 */
203
	xfeatures |= XSTATE_FPSSE;
204

205
	err |= __put_user(xfeatures, (__u32 *)&x->header.xfeatures);
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247

	return err;
}

static inline int save_user_xstate(struct xsave_struct __user *buf)
{
	int err;

	if (use_xsave())
		err = xsave_user(buf);
	else if (use_fxsr())
		err = fxsave_user((struct i387_fxsave_struct __user *) buf);
	else
		err = fsave_user((struct i387_fsave_struct __user *) buf);

	if (unlikely(err) && __clear_user(buf, xstate_size))
		err = -EFAULT;
	return err;
}

/*
 * Save the fpu, extended register state to the user signal frame.
 *
 * 'buf_fx' is the 64-byte aligned pointer at which the [f|fx|x]save
 *  state is copied.
 *  'buf' points to the 'buf_fx' or to the fsave header followed by 'buf_fx'.
 *
 *	buf == buf_fx for 64-bit frames and 32-bit fsave frame.
 *	buf != buf_fx for 32-bit frames with fxstate.
 *
 * If the fpu, extended register state is live, save the state directly
 * to the user frame pointed by the aligned pointer 'buf_fx'. Otherwise,
 * copy the thread's fpu state to the user frame starting at 'buf_fx'.
 *
 * If this is a 32-bit frame with fxstate, put a fsave header before
 * the aligned state at 'buf_fx'.
 *
 * For [f]xsave state, update the SW reserved fields in the [f]xsave frame
 * indicating the absence/presence of the extended state to the user.
 */
int save_xstate_sig(void __user *buf, void __user *buf_fx, int size)
{
248
	struct xsave_struct *xsave = &current->thread.fpu.state.xsave;
249 250 251 252 253 254 255 256 257
	struct task_struct *tsk = current;
	int ia32_fxstate = (buf != buf_fx);

	ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
			 config_enabled(CONFIG_IA32_EMULATION));

	if (!access_ok(VERIFY_WRITE, buf, size))
		return -EACCES;

258
	if (!static_cpu_has(X86_FEATURE_FPU))
259 260 261 262 263 264 265 266 267 268 269
		return fpregs_soft_get(current, NULL, 0,
			sizeof(struct user_i387_ia32_struct), NULL,
			(struct _fpstate_ia32 __user *) buf) ? -1 : 1;

	if (user_has_fpu()) {
		/* Save the live register state to the user directly. */
		if (save_user_xstate(buf_fx))
			return -1;
		/* Update the thread's fxstate to save the fsave header. */
		if (ia32_fxstate)
			fpu_fxsave(&tsk->thread.fpu);
270
	} else {
271
		sanitize_i387_state(tsk);
272
		if (__copy_to_user(buf_fx, xsave, xstate_size))
273 274
			return -1;
	}
275

276 277 278
	/* Save the fsave header for the 32-bit frames. */
	if ((ia32_fxstate || !use_fxsr()) && save_fsave_header(tsk, buf))
		return -1;
279

280 281 282 283 284
	if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate))
		return -1;

	return 0;
}
285

286 287 288
static inline void
sanitize_restored_xstate(struct task_struct *tsk,
			 struct user_i387_ia32_struct *ia32_env,
289
			 u64 xfeatures, int fx_only)
290
{
291
	struct xsave_struct *xsave = &tsk->thread.fpu.state.xsave;
292
	struct xstate_header *header = &xsave->header;
293

294 295
	if (use_xsave()) {
		/* These bits must be zero. */
296
		memset(header->reserved, 0, 48);
297 298

		/*
299 300
		 * Init the state that is not present in the memory
		 * layout and not enabled by the OS.
301
		 */
302
		if (fx_only)
303
			header->xfeatures = XSTATE_FPSSE;
304
		else
305
			header->xfeatures &= (xfeatures_mask & xfeatures);
306
	}
307

308
	if (use_fxsr()) {
309
		/*
310 311
		 * mscsr reserved bits must be masked to zero for security
		 * reasons.
312
		 */
313
		xsave->i387.mxcsr &= mxcsr_feature_mask;
314

315
		convert_to_fxsr(tsk, ia32_env);
316
	}
317 318
}

319
/*
320
 * Restore the extended state if present. Otherwise, restore the FP/SSE state.
321
 */
322
static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
323
{
324 325
	if (use_xsave()) {
		if ((unsigned long)buf % 64 || fx_only) {
326
			u64 init_bv = xfeatures_mask & ~XSTATE_FPSSE;
327
			xrstor_state(&init_xstate_ctx, init_bv);
328
			return fxrstor_user(buf);
329
		} else {
330
			u64 init_bv = xfeatures_mask & ~xbv;
331
			if (unlikely(init_bv))
332
				xrstor_state(&init_xstate_ctx, init_bv);
333 334 335
			return xrestore_user(buf, xbv);
		}
	} else if (use_fxsr()) {
336
		return fxrstor_user(buf);
337
	} else
338
		return frstor_user(buf);
339 340
}

341
int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
342
{
343
	int ia32_fxstate = (buf != buf_fx);
344
	struct task_struct *tsk = current;
345
	struct fpu *fpu = &tsk->thread.fpu;
346
	int state_size = xstate_size;
347
	u64 xfeatures = 0;
348 349 350 351
	int fx_only = 0;

	ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
			 config_enabled(CONFIG_IA32_EMULATION));
352 353

	if (!buf) {
354
		fpu_reset_state(fpu);
355
		return 0;
356 357 358 359 360
	}

	if (!access_ok(VERIFY_READ, buf, size))
		return -EACCES;

361
	fpu__activate_curr(fpu);
362

363
	if (!static_cpu_has(X86_FEATURE_FPU))
364 365 366
		return fpregs_soft_set(current, NULL,
				       0, sizeof(struct user_i387_ia32_struct),
				       NULL, buf) != 0;
367

368 369 370 371 372 373 374 375 376 377 378 379
	if (use_xsave()) {
		struct _fpx_sw_bytes fx_sw_user;
		if (unlikely(check_for_xstate(buf_fx, buf_fx, &fx_sw_user))) {
			/*
			 * Couldn't find the extended state information in the
			 * memory layout. Restore just the FP/SSE and init all
			 * the other extended state.
			 */
			state_size = sizeof(struct i387_fxsave_struct);
			fx_only = 1;
		} else {
			state_size = fx_sw_user.xstate_size;
380
			xfeatures = fx_sw_user.xfeatures;
381 382 383 384 385 386 387 388 389
		}
	}

	if (ia32_fxstate) {
		/*
		 * For 32-bit frames with fxstate, copy the user state to the
		 * thread's fpu state, reconstruct fxstate from the fsave
		 * header. Sanitize the copied state etc.
		 */
390
		struct fpu *fpu = &tsk->thread.fpu;
391
		struct user_i387_ia32_struct env;
392
		int err = 0;
393

394
		/*
395
		 * Drop the current fpu which clears fpu->fpstate_active. This ensures
396 397 398 399
		 * that any context-switch during the copy of the new state,
		 * avoids the intermediate state from getting restored/saved.
		 * Thus avoiding the new restored state from getting corrupted.
		 * We will be ready to restore/save the state only after
400
		 * fpu->fpstate_active is again set.
401
		 */
402
		drop_fpu(fpu);
403

404
		if (__copy_from_user(&fpu->state.xsave, buf_fx, state_size) ||
405
		    __copy_from_user(&env, buf, sizeof(env))) {
406
			fpstate_init(fpu);
407 408
			err = -1;
		} else {
409
			sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
410
		}
411

412
		fpu->fpstate_active = 1;
413 414
		if (use_eager_fpu()) {
			preempt_disable();
415
			fpu__restore();
416 417
			preempt_enable();
		}
418 419

		return err;
420
	} else {
421
		/*
422 423
		 * For 64-bit frames and 32-bit fsave frames, restore the user
		 * state to the registers directly (with exceptions handled).
424
		 */
425
		user_fpu_begin();
426
		if (restore_user_xstate(buf_fx, xfeatures, fx_only)) {
427
			fpu_reset_state(fpu);
428 429
			return -1;
		}
430
	}
431 432

	return 0;
433 434
}

435 436 437 438 439 440 441
/*
 * Prepare the SW reserved portion of the fxsave memory layout, indicating
 * the presence of the extended state information in the memory layout
 * pointed by the fpstate pointer in the sigcontext.
 * This will be saved when ever the FP and extended state context is
 * saved on the user stack during the signal handler delivery to the user.
 */
R
roel kluin 已提交
442
static void prepare_fx_sw_frame(void)
443
{
444 445
	int fsave_header_size = sizeof(struct i387_fsave_struct);
	int size = xstate_size + FP_XSTATE_MAGIC2_SIZE;
446

447 448
	if (config_enabled(CONFIG_X86_32))
		size += fsave_header_size;
449 450

	fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
451
	fx_sw_reserved.extended_size = size;
452
	fx_sw_reserved.xfeatures = xfeatures_mask;
453 454
	fx_sw_reserved.xstate_size = xstate_size;

455 456 457 458 459
	if (config_enabled(CONFIG_IA32_EMULATION)) {
		fx_sw_reserved_ia32 = fx_sw_reserved;
		fx_sw_reserved_ia32.extended_size += fsave_header_size;
	}
}
460

461
/*
462 463
 * Enable the extended processor state save/restore feature.
 * Called once per CPU onlining.
464
 */
465
void fpu__init_cpu_xstate(void)
466
{
467
	if (!cpu_has_xsave || !xfeatures_mask)
468 469
		return;

A
Andy Lutomirski 已提交
470
	cr4_set_bits(X86_CR4_OSXSAVE);
471
	xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask);
472 473
}

474 475 476 477
/*
 * Record the offsets and sizes of different state managed by the xsave
 * memory layout.
 */
478
static void __init setup_xstate_features(void)
479 480 481
{
	int eax, ebx, ecx, edx, leaf = 0x2;

482
	xfeatures_nr = fls64(xfeatures_mask);
483 484

	do {
485
		cpuid_count(XSTATE_CPUID, leaf, &eax, &ebx, &ecx, &edx);
486 487 488 489 490 491 492 493 494 495 496

		if (eax == 0)
			break;

		xstate_offsets[leaf] = ebx;
		xstate_sizes[leaf] = eax;

		leaf++;
	} while (1);
}

497 498
static void print_xstate_feature(u64 xstate_mask, const char *desc)
{
499
	if (xfeatures_mask & xstate_mask) {
500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
		int xstate_feature = fls64(xstate_mask)-1;

		pr_info("x86/fpu: Supporting XSAVE feature %2d: '%s'\n", xstate_feature, desc);
	}
}

/*
 * Print out all the supported xstate features:
 */
static void print_xstate_features(void)
{
	print_xstate_feature(XSTATE_FP,		"x87 floating point registers");
	print_xstate_feature(XSTATE_SSE,	"SSE registers");
	print_xstate_feature(XSTATE_YMM,	"AVX registers");
	print_xstate_feature(XSTATE_BNDREGS,	"MPX bounds registers");
	print_xstate_feature(XSTATE_BNDCSR,	"MPX CSR");
	print_xstate_feature(XSTATE_OPMASK,	"AVX-512 opmask");
	print_xstate_feature(XSTATE_ZMM_Hi256,	"AVX-512 Hi256");
	print_xstate_feature(XSTATE_Hi16_ZMM,	"AVX-512 ZMM_Hi256");
}

521 522 523 524 525 526 527 528 529 530
/*
 * This function sets up offsets and sizes of all extended states in
 * xsave area. This supports both standard format and compacted format
 * of the xsave aread.
 *
 * Input: void
 * Output: void
 */
void setup_xstate_comp(void)
{
531
	unsigned int xstate_comp_sizes[sizeof(xfeatures_mask)*8];
532 533
	int i;

534 535 536 537 538 539 540
	/*
	 * The FP xstates and SSE xstates are legacy states. They are always
	 * in the fixed offsets in the xsave area in either compacted form
	 * or standard form.
	 */
	xstate_comp_offsets[0] = 0;
	xstate_comp_offsets[1] = offsetof(struct i387_fxsave_struct, xmm_space);
541 542

	if (!cpu_has_xsaves) {
543
		for (i = 2; i < xfeatures_nr; i++) {
544
			if (test_bit(i, (unsigned long *)&xfeatures_mask)) {
545 546 547 548 549 550 551 552 553
				xstate_comp_offsets[i] = xstate_offsets[i];
				xstate_comp_sizes[i] = xstate_sizes[i];
			}
		}
		return;
	}

	xstate_comp_offsets[2] = FXSAVE_SIZE + XSAVE_HDR_SIZE;

554
	for (i = 2; i < xfeatures_nr; i++) {
555
		if (test_bit(i, (unsigned long *)&xfeatures_mask))
556 557 558 559 560 561 562 563 564 565 566
			xstate_comp_sizes[i] = xstate_sizes[i];
		else
			xstate_comp_sizes[i] = 0;

		if (i > 2)
			xstate_comp_offsets[i] = xstate_comp_offsets[i-1]
					+ xstate_comp_sizes[i-1];

	}
}

567 568 569
/*
 * setup the xstate image representing the init state
 */
570
static void setup_init_fpu_buf(void)
571
{
572 573 574 575 576 577
	static int on_boot_cpu = 1;

	if (!on_boot_cpu)
		return;
	on_boot_cpu = 0;

578 579 580 581
	if (!cpu_has_xsave)
		return;

	setup_xstate_features();
582
	print_xstate_features();
583

584
	if (cpu_has_xsaves) {
585 586
		init_xstate_ctx.header.xcomp_bv = (u64)1 << 63 | xfeatures_mask;
		init_xstate_ctx.header.xfeatures = xfeatures_mask;
587 588
	}

589 590 591
	/*
	 * Init all the features state with header_bv being 0x0
	 */
592
	xrstor_state_booting(&init_xstate_ctx, -1);
593

594 595 596 597
	/*
	 * Dump the init state again. This is to identify the init state
	 * of any feature which is not represented by all zero's.
	 */
598
	xsave_state_booting(&init_xstate_ctx);
599 600
}

F
Fenghua Yu 已提交
601
/*
602
 * Calculate total size of enabled xstates in XCR0/xfeatures_mask.
F
Fenghua Yu 已提交
603 604 605 606 607 608 609 610 611 612 613 614 615 616
 */
static void __init init_xstate_size(void)
{
	unsigned int eax, ebx, ecx, edx;
	int i;

	if (!cpu_has_xsaves) {
		cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
		xstate_size = ebx;
		return;
	}

	xstate_size = FXSAVE_SIZE + XSAVE_HDR_SIZE;
	for (i = 2; i < 64; i++) {
617
		if (test_bit(i, (unsigned long *)&xfeatures_mask)) {
F
Fenghua Yu 已提交
618 619 620 621 622 623
			cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
			xstate_size += eax;
		}
	}
}

624 625
/*
 * Enable and initialize the xsave feature.
626
 * Called once per system bootup.
627
 *
I
Ingo Molnar 已提交
628
 * ( Not marked __init because of false positive section warnings. )
629
 */
630
void fpu__init_system_xstate(void)
631 632
{
	unsigned int eax, ebx, ecx, edx;
633 634 635 636 637
	static bool on_boot_cpu = 1;

	if (!on_boot_cpu)
		return;
	on_boot_cpu = 0;
638

639 640 641 642 643
	if (!cpu_has_xsave) {
		pr_info("x86/fpu: Legacy x87 FPU detected.\n");
		return;
	}

644
	if (boot_cpu_data.cpuid_level < XSTATE_CPUID) {
645
		WARN(1, "x86/fpu: XSTATE_CPUID missing!\n");
646 647 648 649
		return;
	}

	cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
650
	xfeatures_mask = eax + ((u64)edx << 32);
651

652 653
	if ((xfeatures_mask & XSTATE_FPSSE) != XSTATE_FPSSE) {
		pr_err("x86/fpu: FP/SSE not present amongst the CPU's xstate features: 0x%llx.\n", xfeatures_mask);
654 655 656 657
		BUG();
	}

	/*
658
	 * Support only the state known to OS.
659
	 */
660
	xfeatures_mask = xfeatures_mask & XCNTXT_MASK;
661

662 663
	/* Enable xstate instructions to be able to continue with initialization: */
	fpu__init_cpu_xstate();
664 665 666 667

	/*
	 * Recompute the context size for enabled features
	 */
F
Fenghua Yu 已提交
668
	init_xstate_size();
669

670
	update_regset_xstate_info(xstate_size, xfeatures_mask);
671
	prepare_fx_sw_frame();
672
	setup_init_fpu_buf();
673

674
	pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is 0x%x bytes, using '%s' format.\n",
675
		xfeatures_mask,
676 677
		xstate_size,
		cpu_has_xsaves ? "compacted" : "standard");
678
}
679

680 681 682 683 684 685 686 687 688 689 690 691
/*
 * Restore minimal FPU state after suspend:
 */
void fpu__resume_cpu(void)
{
	/*
	 * Restore XCR0 on xsave capable CPUs:
	 */
	if (cpu_has_xsave)
		xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask);
}

692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708
/*
 * Given the xsave area and a state inside, this function returns the
 * address of the state.
 *
 * This is the API that is called to get xstate address in either
 * standard format or compacted format of xsave area.
 *
 * Inputs:
 *	xsave: base address of the xsave area;
 *	xstate: state which is defined in xsave.h (e.g. XSTATE_FP, XSTATE_SSE,
 *	etc.)
 * Output:
 *	address of the state in the xsave area.
 */
void *get_xsave_addr(struct xsave_struct *xsave, int xstate)
{
	int feature = fls64(xstate) - 1;
709
	if (!test_bit(feature, (unsigned long *)&xfeatures_mask))
710 711 712 713
		return NULL;

	return (void *)xsave + xstate_comp_offsets[feature];
}
P
Paolo Bonzini 已提交
714
EXPORT_SYMBOL_GPL(get_xsave_addr);