xstate.c 20.3 KB
Newer Older
1 2 3 4 5 6
/*
 * xsave/xrstor support.
 *
 * Author: Suresh Siddha <suresh.b.siddha@intel.com>
 */
#include <linux/compat.h>
F
Fenghua Yu 已提交
7
#include <linux/cpu.h>
8

9
#include <asm/fpu/api.h>
10
#include <asm/fpu/internal.h>
11
#include <asm/fpu/signal.h>
12
#include <asm/fpu/regset.h>
13
#include <asm/sigframe.h>
A
Andy Lutomirski 已提交
14
#include <asm/tlbflush.h>
15

16 17 18 19 20 21 22 23 24 25 26 27 28
static const char *xfeature_names[] =
{
	"x87 floating point registers"	,
	"SSE registers"			,
	"AVX registers"			,
	"MPX bounds registers"		,
	"MPX CSR"			,
	"AVX-512 opmask"		,
	"AVX-512 Hi256"			,
	"AVX-512 ZMM_Hi256"		,
	"unknown xstate feature"	,
};

29
/*
30
 * Mask of xstate features supported by the CPU and the kernel:
31
 */
32
u64 xfeatures_mask __read_mostly;
33

34
static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32;
35
static unsigned int xstate_offsets[XFEATURES_NR_MAX], xstate_sizes[XFEATURES_NR_MAX];
36
static unsigned int xstate_comp_offsets[sizeof(xfeatures_mask)*8];
37 38 39

/* The number of supported xfeatures in xfeatures_mask: */
static unsigned int xfeatures_nr;
40

41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
/*
 * Return whether the system supports a given xfeature.
 *
 * Also return the name of the (most advanced) feature that the caller requested:
 */
int cpu_has_xfeatures(u64 xfeatures_needed, const char **feature_name)
{
	u64 xfeatures_missing = xfeatures_needed & ~xfeatures_mask;

	if (unlikely(feature_name)) {
		long xfeature_idx, max_idx;
		u64 xfeatures_print;
		/*
		 * So we use FLS here to be able to print the most advanced
		 * feature that was requested but is missing. So if a driver
		 * asks about "XSTATE_SSE | XSTATE_YMM" we'll print the
		 * missing AVX feature - this is the most informative message
		 * to users:
		 */
		if (xfeatures_missing)
			xfeatures_print = xfeatures_missing;
		else
			xfeatures_print = xfeatures_needed;

		xfeature_idx = fls64(xfeatures_print)-1;
		max_idx = ARRAY_SIZE(xfeature_names)-1;
		xfeature_idx = min(xfeature_idx, max_idx);

		*feature_name = xfeature_names[xfeature_idx];
	}

	if (xfeatures_missing)
		return 0;

	return 1;
}
EXPORT_SYMBOL_GPL(cpu_has_xfeatures);

79
/*
80 81 82 83 84 85 86 87 88 89 90 91
 * When executing XSAVEOPT (optimized XSAVE), if a processor implementation
 * detects that an FPU state component is still (or is again) in its
 * initialized state, it may clear the corresponding bit in the header.xfeatures
 * field, and can skip the writeout of registers to the corresponding memory layout.
 *
 * This means that when the bit is zero, the state component might still contain
 * some previous - non-initialized register state.
 *
 * Before writing xstate information to user-space we sanitize those components,
 * to always ensure that the memory layout of a feature will be in the init state
 * if the corresponding header bit is zero. This is to ensure that user-space doesn't
 * see some stale state in the memory layout during signal handling, debugging etc.
92
 */
93
void fpstate_sanitize_xstate(struct fpu *fpu)
94
{
95
	struct i387_fxsave_struct *fx = &fpu->state.fxsave;
96
	int feature_bit;
97
	u64 xfeatures;
98

99
	if (!use_xsaveopt())
100 101
		return;

102
	xfeatures = fpu->state.xsave.header.xfeatures;
103 104 105

	/*
	 * None of the feature bits are in init state. So nothing else
L
Lucas De Marchi 已提交
106
	 * to do for us, as the memory layout is up to date.
107
	 */
108
	if ((xfeatures & xfeatures_mask) == xfeatures_mask)
109 110 111 112 113
		return;

	/*
	 * FP is in init state
	 */
114
	if (!(xfeatures & XSTATE_FP)) {
115 116 117 118 119 120 121 122 123 124 125 126
		fx->cwd = 0x37f;
		fx->swd = 0;
		fx->twd = 0;
		fx->fop = 0;
		fx->rip = 0;
		fx->rdp = 0;
		memset(&fx->st_space[0], 0, 128);
	}

	/*
	 * SSE is in init state
	 */
127
	if (!(xfeatures & XSTATE_SSE))
128 129
		memset(&fx->xmm_space[0], 0, 256);

130 131 132 133 134
	/*
	 * First two features are FPU and SSE, which above we handled
	 * in a special way already:
	 */
	feature_bit = 0x2;
135
	xfeatures = (xfeatures_mask & ~xfeatures) >> 2;
136 137

	/*
138 139 140
	 * Update all the remaining memory layouts according to their
	 * standard xstate layout, if their header bit is in the init
	 * state:
141
	 */
142 143
	while (xfeatures) {
		if (xfeatures & 0x1) {
144 145 146
			int offset = xstate_offsets[feature_bit];
			int size = xstate_sizes[feature_bit];

147
			memcpy((void *)fx + offset,
148
			       (void *)&init_fpstate.xsave + offset,
149 150 151
			       size);
		}

152
		xfeatures >>= 1;
153 154 155 156
		feature_bit++;
	}
}

157 158 159 160
/*
 * Check for the presence of extended state information in the
 * user fpstate pointer in the sigcontext.
 */
161 162 163
static inline int check_for_xstate(struct i387_fxsave_struct __user *buf,
				   void __user *fpstate,
				   struct _fpx_sw_bytes *fx_sw)
164 165
{
	int min_xstate_size = sizeof(struct i387_fxsave_struct) +
166
			      sizeof(struct xstate_header);
167 168
	unsigned int magic2;

169 170
	if (__copy_from_user(fx_sw, &buf->sw_reserved[0], sizeof(*fx_sw)))
		return -1;
171

172 173 174 175 176 177
	/* Check for the first magic field and other error scenarios. */
	if (fx_sw->magic1 != FP_XSTATE_MAGIC1 ||
	    fx_sw->xstate_size < min_xstate_size ||
	    fx_sw->xstate_size > xstate_size ||
	    fx_sw->xstate_size > fx_sw->extended_size)
		return -1;
178 179 180 181 182 183 184

	/*
	 * Check for the presence of second magic word at the end of memory
	 * layout. This detects the case where the user just copied the legacy
	 * fpstate layout with out copying the extended state information
	 * in the memory layout.
	 */
185 186 187
	if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size))
	    || magic2 != FP_XSTATE_MAGIC2)
		return -1;
188 189 190 191

	return 0;
}

192 193 194
/*
 * Signal frame handlers.
 */
195 196 197
static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
{
	if (use_fxsr()) {
198
		struct xsave_struct *xsave = &tsk->thread.fpu.state.xsave;
199 200
		struct user_i387_ia32_struct env;
		struct _fpstate_ia32 __user *fp = buf;
201

202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
		convert_from_fxsr(&env, tsk);

		if (__copy_to_user(buf, &env, sizeof(env)) ||
		    __put_user(xsave->i387.swd, &fp->status) ||
		    __put_user(X86_FXSR_MAGIC, &fp->magic))
			return -1;
	} else {
		struct i387_fsave_struct __user *fp = buf;
		u32 swd;
		if (__get_user(swd, &fp->swd) || __put_user(swd, &fp->status))
			return -1;
	}

	return 0;
}

static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
219
{
220 221
	struct xsave_struct __user *x = buf;
	struct _fpx_sw_bytes *sw_bytes;
222
	u32 xfeatures;
223
	int err;
224

225 226 227
	/* Setup the bytes not touched by the [f]xsave and reserved for SW. */
	sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
	err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
228

229 230
	if (!use_xsave())
		return err;
231

232
	err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
233

234
	/*
235
	 * Read the xfeatures which we copied (directly from the cpu or
236 237
	 * from the state in task struct) to the user buffers.
	 */
238
	err |= __get_user(xfeatures, (__u32 *)&x->header.xfeatures);
239

240 241 242 243 244
	/*
	 * For legacy compatible, we always set FP/SSE bits in the bit
	 * vector while saving the state to the user context. This will
	 * enable us capturing any changes(during sigreturn) to
	 * the FP/SSE bits by the legacy applications which don't touch
245
	 * xfeatures in the xsave header.
246
	 *
247
	 * xsave aware apps can change the xfeatures in the xsave
248 249 250
	 * header as well as change any contents in the memory layout.
	 * xrestore as part of sigreturn will capture all the changes.
	 */
251
	xfeatures |= XSTATE_FPSSE;
252

253
	err |= __put_user(xfeatures, (__u32 *)&x->header.xfeatures);
254 255 256 257

	return err;
}

258
static inline int copy_fpregs_to_sigframe(struct xsave_struct __user *buf)
259 260 261 262
{
	int err;

	if (use_xsave())
263
		err = copy_xregs_to_user(buf);
264
	else if (use_fxsr())
265
		err = copy_fxregs_to_user((struct i387_fxsave_struct __user *) buf);
266
	else
267
		err = copy_fregs_to_user((struct i387_fsave_struct __user *) buf);
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293

	if (unlikely(err) && __clear_user(buf, xstate_size))
		err = -EFAULT;
	return err;
}

/*
 * Save the fpu, extended register state to the user signal frame.
 *
 * 'buf_fx' is the 64-byte aligned pointer at which the [f|fx|x]save
 *  state is copied.
 *  'buf' points to the 'buf_fx' or to the fsave header followed by 'buf_fx'.
 *
 *	buf == buf_fx for 64-bit frames and 32-bit fsave frame.
 *	buf != buf_fx for 32-bit frames with fxstate.
 *
 * If the fpu, extended register state is live, save the state directly
 * to the user frame pointed by the aligned pointer 'buf_fx'. Otherwise,
 * copy the thread's fpu state to the user frame starting at 'buf_fx'.
 *
 * If this is a 32-bit frame with fxstate, put a fsave header before
 * the aligned state at 'buf_fx'.
 *
 * For [f]xsave state, update the SW reserved fields in the [f]xsave frame
 * indicating the absence/presence of the extended state to the user.
 */
294
int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
295
{
296
	struct xsave_struct *xsave = &current->thread.fpu.state.xsave;
297 298 299 300 301 302 303 304 305
	struct task_struct *tsk = current;
	int ia32_fxstate = (buf != buf_fx);

	ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
			 config_enabled(CONFIG_IA32_EMULATION));

	if (!access_ok(VERIFY_WRITE, buf, size))
		return -EACCES;

306
	if (!static_cpu_has(X86_FEATURE_FPU))
307 308 309 310
		return fpregs_soft_get(current, NULL, 0,
			sizeof(struct user_i387_ia32_struct), NULL,
			(struct _fpstate_ia32 __user *) buf) ? -1 : 1;

311
	if (fpregs_active()) {
312
		/* Save the live register state to the user directly. */
313
		if (copy_fpregs_to_sigframe(buf_fx))
314 315 316
			return -1;
		/* Update the thread's fxstate to save the fsave header. */
		if (ia32_fxstate)
317
			copy_fxregs_to_kernel(&tsk->thread.fpu);
318
	} else {
319
		fpstate_sanitize_xstate(&tsk->thread.fpu);
320
		if (__copy_to_user(buf_fx, xsave, xstate_size))
321 322
			return -1;
	}
323

324 325 326
	/* Save the fsave header for the 32-bit frames. */
	if ((ia32_fxstate || !use_fxsr()) && save_fsave_header(tsk, buf))
		return -1;
327

328 329 330 331 332
	if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate))
		return -1;

	return 0;
}
333

334 335 336
static inline void
sanitize_restored_xstate(struct task_struct *tsk,
			 struct user_i387_ia32_struct *ia32_env,
337
			 u64 xfeatures, int fx_only)
338
{
339
	struct xsave_struct *xsave = &tsk->thread.fpu.state.xsave;
340
	struct xstate_header *header = &xsave->header;
341

342 343
	if (use_xsave()) {
		/* These bits must be zero. */
344
		memset(header->reserved, 0, 48);
345 346

		/*
347 348
		 * Init the state that is not present in the memory
		 * layout and not enabled by the OS.
349
		 */
350
		if (fx_only)
351
			header->xfeatures = XSTATE_FPSSE;
352
		else
353
			header->xfeatures &= (xfeatures_mask & xfeatures);
354
	}
355

356
	if (use_fxsr()) {
357
		/*
358 359
		 * mscsr reserved bits must be masked to zero for security
		 * reasons.
360
		 */
361
		xsave->i387.mxcsr &= mxcsr_feature_mask;
362

363
		convert_to_fxsr(tsk, ia32_env);
364
	}
365 366
}

367
/*
368
 * Restore the extended state if present. Otherwise, restore the FP/SSE state.
369
 */
370
static inline int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_only)
371
{
372 373
	if (use_xsave()) {
		if ((unsigned long)buf % 64 || fx_only) {
374
			u64 init_bv = xfeatures_mask & ~XSTATE_FPSSE;
375 376
			copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
			return copy_user_to_fxregs(buf);
377
		} else {
378
			u64 init_bv = xfeatures_mask & ~xbv;
379
			if (unlikely(init_bv))
380 381
				copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
			return copy_user_to_xregs(buf, xbv);
382 383
		}
	} else if (use_fxsr()) {
384
		return copy_user_to_fxregs(buf);
385
	} else
386
		return copy_user_to_fregs(buf);
387 388
}

389
static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
390
{
391
	int ia32_fxstate = (buf != buf_fx);
392
	struct task_struct *tsk = current;
393
	struct fpu *fpu = &tsk->thread.fpu;
394
	int state_size = xstate_size;
395
	u64 xfeatures = 0;
396 397 398 399
	int fx_only = 0;

	ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
			 config_enabled(CONFIG_IA32_EMULATION));
400 401

	if (!buf) {
402
		fpu__clear(fpu);
403
		return 0;
404 405 406 407 408
	}

	if (!access_ok(VERIFY_READ, buf, size))
		return -EACCES;

409
	fpu__activate_curr(fpu);
410

411
	if (!static_cpu_has(X86_FEATURE_FPU))
412 413 414
		return fpregs_soft_set(current, NULL,
				       0, sizeof(struct user_i387_ia32_struct),
				       NULL, buf) != 0;
415

416 417 418 419 420 421 422 423 424 425 426 427
	if (use_xsave()) {
		struct _fpx_sw_bytes fx_sw_user;
		if (unlikely(check_for_xstate(buf_fx, buf_fx, &fx_sw_user))) {
			/*
			 * Couldn't find the extended state information in the
			 * memory layout. Restore just the FP/SSE and init all
			 * the other extended state.
			 */
			state_size = sizeof(struct i387_fxsave_struct);
			fx_only = 1;
		} else {
			state_size = fx_sw_user.xstate_size;
428
			xfeatures = fx_sw_user.xfeatures;
429 430 431 432 433 434 435 436 437
		}
	}

	if (ia32_fxstate) {
		/*
		 * For 32-bit frames with fxstate, copy the user state to the
		 * thread's fpu state, reconstruct fxstate from the fsave
		 * header. Sanitize the copied state etc.
		 */
438
		struct fpu *fpu = &tsk->thread.fpu;
439
		struct user_i387_ia32_struct env;
440
		int err = 0;
441

442
		/*
443
		 * Drop the current fpu which clears fpu->fpstate_active. This ensures
444 445 446 447
		 * that any context-switch during the copy of the new state,
		 * avoids the intermediate state from getting restored/saved.
		 * Thus avoiding the new restored state from getting corrupted.
		 * We will be ready to restore/save the state only after
448
		 * fpu->fpstate_active is again set.
449
		 */
450
		fpu__drop(fpu);
451

452
		if (__copy_from_user(&fpu->state.xsave, buf_fx, state_size) ||
453
		    __copy_from_user(&env, buf, sizeof(env))) {
454
			fpstate_init(&fpu->state);
455 456
			err = -1;
		} else {
457
			sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
458
		}
459

460
		fpu->fpstate_active = 1;
461 462
		if (use_eager_fpu()) {
			preempt_disable();
463
			fpu__restore();
464 465
			preempt_enable();
		}
466 467

		return err;
468
	} else {
469
		/*
470 471
		 * For 64-bit frames and 32-bit fsave frames, restore the user
		 * state to the registers directly (with exceptions handled).
472
		 */
473
		user_fpu_begin();
474
		if (copy_user_to_fpregs_zeroing(buf_fx, xfeatures, fx_only)) {
475
			fpu__clear(fpu);
476 477
			return -1;
		}
478
	}
479 480

	return 0;
481 482
}

483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519
static inline int xstate_sigframe_size(void)
{
	return use_xsave() ? xstate_size + FP_XSTATE_MAGIC2_SIZE : xstate_size;
}

/*
 * Restore FPU state from a sigframe:
 */
int fpu__restore_sig(void __user *buf, int ia32_frame)
{
	void __user *buf_fx = buf;
	int size = xstate_sigframe_size();

	if (ia32_frame && use_fxsr()) {
		buf_fx = buf + sizeof(struct i387_fsave_struct);
		size += sizeof(struct i387_fsave_struct);
	}

	return __fpu__restore_sig(buf, buf_fx, size);
}

unsigned long
fpu__alloc_mathframe(unsigned long sp, int ia32_frame,
		     unsigned long *buf_fx, unsigned long *size)
{
	unsigned long frame_size = xstate_sigframe_size();

	*buf_fx = sp = round_down(sp - frame_size, 64);
	if (ia32_frame && use_fxsr()) {
		frame_size += sizeof(struct i387_fsave_struct);
		sp -= sizeof(struct i387_fsave_struct);
	}

	*size = frame_size;

	return sp;
}
520 521 522 523 524 525 526
/*
 * Prepare the SW reserved portion of the fxsave memory layout, indicating
 * the presence of the extended state information in the memory layout
 * pointed by the fpstate pointer in the sigcontext.
 * This will be saved when ever the FP and extended state context is
 * saved on the user stack during the signal handler delivery to the user.
 */
R
roel kluin 已提交
527
static void prepare_fx_sw_frame(void)
528
{
529 530
	int fsave_header_size = sizeof(struct i387_fsave_struct);
	int size = xstate_size + FP_XSTATE_MAGIC2_SIZE;
531

532 533
	if (config_enabled(CONFIG_X86_32))
		size += fsave_header_size;
534 535

	fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
536
	fx_sw_reserved.extended_size = size;
537
	fx_sw_reserved.xfeatures = xfeatures_mask;
538 539
	fx_sw_reserved.xstate_size = xstate_size;

540 541 542 543 544
	if (config_enabled(CONFIG_IA32_EMULATION)) {
		fx_sw_reserved_ia32 = fx_sw_reserved;
		fx_sw_reserved_ia32.extended_size += fsave_header_size;
	}
}
545

546
/*
547 548
 * Enable the extended processor state save/restore feature.
 * Called once per CPU onlining.
549
 */
550
void fpu__init_cpu_xstate(void)
551
{
552
	if (!cpu_has_xsave || !xfeatures_mask)
553 554
		return;

A
Andy Lutomirski 已提交
555
	cr4_set_bits(X86_CR4_OSXSAVE);
556
	xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask);
557 558
}

559 560 561 562
/*
 * Record the offsets and sizes of different state managed by the xsave
 * memory layout.
 */
563
static void __init setup_xstate_features(void)
564 565 566
{
	int eax, ebx, ecx, edx, leaf = 0x2;

567
	xfeatures_nr = fls64(xfeatures_mask);
568 569

	do {
570
		cpuid_count(XSTATE_CPUID, leaf, &eax, &ebx, &ecx, &edx);
571 572 573 574 575 576 577 578 579 580 581

		if (eax == 0)
			break;

		xstate_offsets[leaf] = ebx;
		xstate_sizes[leaf] = eax;

		leaf++;
	} while (1);
}

582
static void print_xstate_feature(u64 xstate_mask)
583
{
584
	const char *feature_name;
585

586 587
	if (cpu_has_xfeatures(xstate_mask, &feature_name))
		pr_info("x86/fpu: Supporting XSAVE feature 0x%02Lx: '%s'\n", xstate_mask, feature_name);
588 589 590 591 592 593 594
}

/*
 * Print out all the supported xstate features:
 */
static void print_xstate_features(void)
{
595 596 597 598 599 600 601 602
	print_xstate_feature(XSTATE_FP);
	print_xstate_feature(XSTATE_SSE);
	print_xstate_feature(XSTATE_YMM);
	print_xstate_feature(XSTATE_BNDREGS);
	print_xstate_feature(XSTATE_BNDCSR);
	print_xstate_feature(XSTATE_OPMASK);
	print_xstate_feature(XSTATE_ZMM_Hi256);
	print_xstate_feature(XSTATE_Hi16_ZMM);
603 604
}

605 606 607 608 609 610 611 612 613 614
/*
 * This function sets up offsets and sizes of all extended states in
 * xsave area. This supports both standard format and compacted format
 * of the xsave aread.
 *
 * Input: void
 * Output: void
 */
void setup_xstate_comp(void)
{
615
	unsigned int xstate_comp_sizes[sizeof(xfeatures_mask)*8];
616 617
	int i;

618 619 620 621 622 623 624
	/*
	 * The FP xstates and SSE xstates are legacy states. They are always
	 * in the fixed offsets in the xsave area in either compacted form
	 * or standard form.
	 */
	xstate_comp_offsets[0] = 0;
	xstate_comp_offsets[1] = offsetof(struct i387_fxsave_struct, xmm_space);
625 626

	if (!cpu_has_xsaves) {
627
		for (i = 2; i < xfeatures_nr; i++) {
628
			if (test_bit(i, (unsigned long *)&xfeatures_mask)) {
629 630 631 632 633 634 635 636 637
				xstate_comp_offsets[i] = xstate_offsets[i];
				xstate_comp_sizes[i] = xstate_sizes[i];
			}
		}
		return;
	}

	xstate_comp_offsets[2] = FXSAVE_SIZE + XSAVE_HDR_SIZE;

638
	for (i = 2; i < xfeatures_nr; i++) {
639
		if (test_bit(i, (unsigned long *)&xfeatures_mask))
640 641 642 643 644 645 646 647 648 649 650
			xstate_comp_sizes[i] = xstate_sizes[i];
		else
			xstate_comp_sizes[i] = 0;

		if (i > 2)
			xstate_comp_offsets[i] = xstate_comp_offsets[i-1]
					+ xstate_comp_sizes[i-1];

	}
}

651 652 653
/*
 * setup the xstate image representing the init state
 */
654
static void setup_init_fpu_buf(void)
655
{
656 657 658 659
	if (!cpu_has_xsave)
		return;

	setup_xstate_features();
660
	print_xstate_features();
661

662
	if (cpu_has_xsaves) {
663 664
		init_fpstate.xsave.header.xcomp_bv = (u64)1 << 63 | xfeatures_mask;
		init_fpstate.xsave.header.xfeatures = xfeatures_mask;
665 666
	}

667 668 669
	/*
	 * Init all the features state with header_bv being 0x0
	 */
670
	copy_kernel_to_xregs_booting(&init_fpstate.xsave, -1);
671

672 673 674 675
	/*
	 * Dump the init state again. This is to identify the init state
	 * of any feature which is not represented by all zero's.
	 */
676
	copy_xregs_to_kernel_booting(&init_fpstate.xsave);
677 678
}

F
Fenghua Yu 已提交
679
/*
680
 * Calculate total size of enabled xstates in XCR0/xfeatures_mask.
F
Fenghua Yu 已提交
681 682 683 684 685 686 687 688 689 690 691 692 693 694
 */
static void __init init_xstate_size(void)
{
	unsigned int eax, ebx, ecx, edx;
	int i;

	if (!cpu_has_xsaves) {
		cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
		xstate_size = ebx;
		return;
	}

	xstate_size = FXSAVE_SIZE + XSAVE_HDR_SIZE;
	for (i = 2; i < 64; i++) {
695
		if (test_bit(i, (unsigned long *)&xfeatures_mask)) {
F
Fenghua Yu 已提交
696 697 698 699 700 701
			cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
			xstate_size += eax;
		}
	}
}

702 703
/*
 * Enable and initialize the xsave feature.
704
 * Called once per system bootup.
705
 *
I
Ingo Molnar 已提交
706
 * ( Not marked __init because of false positive section warnings. )
707
 */
708
void fpu__init_system_xstate(void)
709 710 711
{
	unsigned int eax, ebx, ecx, edx;

712 713 714 715 716
	if (!cpu_has_xsave) {
		pr_info("x86/fpu: Legacy x87 FPU detected.\n");
		return;
	}

717
	if (boot_cpu_data.cpuid_level < XSTATE_CPUID) {
718
		WARN(1, "x86/fpu: XSTATE_CPUID missing!\n");
719 720 721 722
		return;
	}

	cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
723
	xfeatures_mask = eax + ((u64)edx << 32);
724

725 726
	if ((xfeatures_mask & XSTATE_FPSSE) != XSTATE_FPSSE) {
		pr_err("x86/fpu: FP/SSE not present amongst the CPU's xstate features: 0x%llx.\n", xfeatures_mask);
727 728 729 730
		BUG();
	}

	/*
731
	 * Support only the state known to OS.
732
	 */
733
	xfeatures_mask = xfeatures_mask & XCNTXT_MASK;
734

735 736
	/* Enable xstate instructions to be able to continue with initialization: */
	fpu__init_cpu_xstate();
737 738 739 740

	/*
	 * Recompute the context size for enabled features
	 */
F
Fenghua Yu 已提交
741
	init_xstate_size();
742

743
	update_regset_xstate_info(xstate_size, xfeatures_mask);
744
	prepare_fx_sw_frame();
745
	setup_init_fpu_buf();
746

747
	pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is 0x%x bytes, using '%s' format.\n",
748
		xfeatures_mask,
749 750
		xstate_size,
		cpu_has_xsaves ? "compacted" : "standard");
751
}
752

753 754 755 756 757 758 759 760 761 762 763 764
/*
 * Restore minimal FPU state after suspend:
 */
void fpu__resume_cpu(void)
{
	/*
	 * Restore XCR0 on xsave capable CPUs:
	 */
	if (cpu_has_xsave)
		xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask);
}

765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781
/*
 * Given the xsave area and a state inside, this function returns the
 * address of the state.
 *
 * This is the API that is called to get xstate address in either
 * standard format or compacted format of xsave area.
 *
 * Inputs:
 *	xsave: base address of the xsave area;
 *	xstate: state which is defined in xsave.h (e.g. XSTATE_FP, XSTATE_SSE,
 *	etc.)
 * Output:
 *	address of the state in the xsave area.
 */
void *get_xsave_addr(struct xsave_struct *xsave, int xstate)
{
	int feature = fls64(xstate) - 1;
782
	if (!test_bit(feature, (unsigned long *)&xfeatures_mask))
783 784 785 786
		return NULL;

	return (void *)xsave + xstate_comp_offsets[feature];
}
P
Paolo Bonzini 已提交
787
EXPORT_SYMBOL_GPL(get_xsave_addr);