xstate.c 20.3 KB
Newer Older
1 2 3 4 5 6
/*
 * xsave/xrstor support.
 *
 * Author: Suresh Siddha <suresh.b.siddha@intel.com>
 */
#include <linux/compat.h>
F
Fenghua Yu 已提交
7
#include <linux/cpu.h>
8

9
#include <asm/fpu/api.h>
10
#include <asm/fpu/internal.h>
11
#include <asm/fpu/signal.h>
12
#include <asm/fpu/regset.h>
13
#include <asm/sigframe.h>
A
Andy Lutomirski 已提交
14
#include <asm/tlbflush.h>
15

16 17 18 19 20 21 22 23 24 25 26 27 28
static const char *xfeature_names[] =
{
	"x87 floating point registers"	,
	"SSE registers"			,
	"AVX registers"			,
	"MPX bounds registers"		,
	"MPX CSR"			,
	"AVX-512 opmask"		,
	"AVX-512 Hi256"			,
	"AVX-512 ZMM_Hi256"		,
	"unknown xstate feature"	,
};

29
/*
30
 * Mask of xstate features supported by the CPU and the kernel:
31
 */
32
u64 xfeatures_mask __read_mostly;
33

34 35 36
/*
 * Represents init state for the supported extended state.
 */
37
struct xsave_struct init_xstate_ctx;
38

39
static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32;
40
static unsigned int xstate_offsets[XFEATURES_NR_MAX], xstate_sizes[XFEATURES_NR_MAX];
41
static unsigned int xstate_comp_offsets[sizeof(xfeatures_mask)*8];
42 43 44

/* The number of supported xfeatures in xfeatures_mask: */
static unsigned int xfeatures_nr;
45

46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
/*
 * Return whether the system supports a given xfeature.
 *
 * Also return the name of the (most advanced) feature that the caller requested:
 */
int cpu_has_xfeatures(u64 xfeatures_needed, const char **feature_name)
{
	u64 xfeatures_missing = xfeatures_needed & ~xfeatures_mask;

	if (unlikely(feature_name)) {
		long xfeature_idx, max_idx;
		u64 xfeatures_print;
		/*
		 * So we use FLS here to be able to print the most advanced
		 * feature that was requested but is missing. So if a driver
		 * asks about "XSTATE_SSE | XSTATE_YMM" we'll print the
		 * missing AVX feature - this is the most informative message
		 * to users:
		 */
		if (xfeatures_missing)
			xfeatures_print = xfeatures_missing;
		else
			xfeatures_print = xfeatures_needed;

		xfeature_idx = fls64(xfeatures_print)-1;
		max_idx = ARRAY_SIZE(xfeature_names)-1;
		xfeature_idx = min(xfeature_idx, max_idx);

		*feature_name = xfeature_names[xfeature_idx];
	}

	if (xfeatures_missing)
		return 0;

	return 1;
}
EXPORT_SYMBOL_GPL(cpu_has_xfeatures);

84
/*
85 86 87 88 89 90 91 92 93 94 95 96
 * When executing XSAVEOPT (optimized XSAVE), if a processor implementation
 * detects that an FPU state component is still (or is again) in its
 * initialized state, it may clear the corresponding bit in the header.xfeatures
 * field, and can skip the writeout of registers to the corresponding memory layout.
 *
 * This means that when the bit is zero, the state component might still contain
 * some previous - non-initialized register state.
 *
 * Before writing xstate information to user-space we sanitize those components,
 * to always ensure that the memory layout of a feature will be in the init state
 * if the corresponding header bit is zero. This is to ensure that user-space doesn't
 * see some stale state in the memory layout during signal handling, debugging etc.
97
 */
98
void fpstate_sanitize_xstate(struct fpu *fpu)
99
{
100
	struct i387_fxsave_struct *fx = &fpu->state.fxsave;
101
	int feature_bit;
102
	u64 xfeatures;
103

104
	if (!use_xsaveopt())
105 106
		return;

107
	xfeatures = fpu->state.xsave.header.xfeatures;
108 109 110

	/*
	 * None of the feature bits are in init state. So nothing else
L
Lucas De Marchi 已提交
111
	 * to do for us, as the memory layout is up to date.
112
	 */
113
	if ((xfeatures & xfeatures_mask) == xfeatures_mask)
114 115 116 117 118
		return;

	/*
	 * FP is in init state
	 */
119
	if (!(xfeatures & XSTATE_FP)) {
120 121 122 123 124 125 126 127 128 129 130 131
		fx->cwd = 0x37f;
		fx->swd = 0;
		fx->twd = 0;
		fx->fop = 0;
		fx->rip = 0;
		fx->rdp = 0;
		memset(&fx->st_space[0], 0, 128);
	}

	/*
	 * SSE is in init state
	 */
132
	if (!(xfeatures & XSTATE_SSE))
133 134
		memset(&fx->xmm_space[0], 0, 256);

135 136 137 138 139
	/*
	 * First two features are FPU and SSE, which above we handled
	 * in a special way already:
	 */
	feature_bit = 0x2;
140
	xfeatures = (xfeatures_mask & ~xfeatures) >> 2;
141 142

	/*
143 144 145
	 * Update all the remaining memory layouts according to their
	 * standard xstate layout, if their header bit is in the init
	 * state:
146
	 */
147 148
	while (xfeatures) {
		if (xfeatures & 0x1) {
149 150 151
			int offset = xstate_offsets[feature_bit];
			int size = xstate_sizes[feature_bit];

152
			memcpy((void *)fx + offset,
153
			       (void *)&init_xstate_ctx + offset,
154 155 156
			       size);
		}

157
		xfeatures >>= 1;
158 159 160 161
		feature_bit++;
	}
}

162 163 164 165
/*
 * Check for the presence of extended state information in the
 * user fpstate pointer in the sigcontext.
 */
166 167 168
static inline int check_for_xstate(struct i387_fxsave_struct __user *buf,
				   void __user *fpstate,
				   struct _fpx_sw_bytes *fx_sw)
169 170
{
	int min_xstate_size = sizeof(struct i387_fxsave_struct) +
171
			      sizeof(struct xstate_header);
172 173
	unsigned int magic2;

174 175
	if (__copy_from_user(fx_sw, &buf->sw_reserved[0], sizeof(*fx_sw)))
		return -1;
176

177 178 179 180 181 182
	/* Check for the first magic field and other error scenarios. */
	if (fx_sw->magic1 != FP_XSTATE_MAGIC1 ||
	    fx_sw->xstate_size < min_xstate_size ||
	    fx_sw->xstate_size > xstate_size ||
	    fx_sw->xstate_size > fx_sw->extended_size)
		return -1;
183 184 185 186 187 188 189

	/*
	 * Check for the presence of second magic word at the end of memory
	 * layout. This detects the case where the user just copied the legacy
	 * fpstate layout with out copying the extended state information
	 * in the memory layout.
	 */
190 191 192
	if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size))
	    || magic2 != FP_XSTATE_MAGIC2)
		return -1;
193 194 195 196

	return 0;
}

197 198 199
/*
 * Signal frame handlers.
 */
200 201 202
static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
{
	if (use_fxsr()) {
203
		struct xsave_struct *xsave = &tsk->thread.fpu.state.xsave;
204 205
		struct user_i387_ia32_struct env;
		struct _fpstate_ia32 __user *fp = buf;
206

207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
		convert_from_fxsr(&env, tsk);

		if (__copy_to_user(buf, &env, sizeof(env)) ||
		    __put_user(xsave->i387.swd, &fp->status) ||
		    __put_user(X86_FXSR_MAGIC, &fp->magic))
			return -1;
	} else {
		struct i387_fsave_struct __user *fp = buf;
		u32 swd;
		if (__get_user(swd, &fp->swd) || __put_user(swd, &fp->status))
			return -1;
	}

	return 0;
}

static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
224
{
225 226
	struct xsave_struct __user *x = buf;
	struct _fpx_sw_bytes *sw_bytes;
227
	u32 xfeatures;
228
	int err;
229

230 231 232
	/* Setup the bytes not touched by the [f]xsave and reserved for SW. */
	sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
	err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
233

234 235
	if (!use_xsave())
		return err;
236

237
	err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
238

239
	/*
240
	 * Read the xfeatures which we copied (directly from the cpu or
241 242
	 * from the state in task struct) to the user buffers.
	 */
243
	err |= __get_user(xfeatures, (__u32 *)&x->header.xfeatures);
244

245 246 247 248 249
	/*
	 * For legacy compatible, we always set FP/SSE bits in the bit
	 * vector while saving the state to the user context. This will
	 * enable us capturing any changes(during sigreturn) to
	 * the FP/SSE bits by the legacy applications which don't touch
250
	 * xfeatures in the xsave header.
251
	 *
252
	 * xsave aware apps can change the xfeatures in the xsave
253 254 255
	 * header as well as change any contents in the memory layout.
	 * xrestore as part of sigreturn will capture all the changes.
	 */
256
	xfeatures |= XSTATE_FPSSE;
257

258
	err |= __put_user(xfeatures, (__u32 *)&x->header.xfeatures);
259 260 261 262

	return err;
}

263
static inline int copy_fpregs_to_sigframe(struct xsave_struct __user *buf)
264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
{
	int err;

	if (use_xsave())
		err = xsave_user(buf);
	else if (use_fxsr())
		err = fxsave_user((struct i387_fxsave_struct __user *) buf);
	else
		err = fsave_user((struct i387_fsave_struct __user *) buf);

	if (unlikely(err) && __clear_user(buf, xstate_size))
		err = -EFAULT;
	return err;
}

/*
 * Save the fpu, extended register state to the user signal frame.
 *
 * 'buf_fx' is the 64-byte aligned pointer at which the [f|fx|x]save
 *  state is copied.
 *  'buf' points to the 'buf_fx' or to the fsave header followed by 'buf_fx'.
 *
 *	buf == buf_fx for 64-bit frames and 32-bit fsave frame.
 *	buf != buf_fx for 32-bit frames with fxstate.
 *
 * If the fpu, extended register state is live, save the state directly
 * to the user frame pointed by the aligned pointer 'buf_fx'. Otherwise,
 * copy the thread's fpu state to the user frame starting at 'buf_fx'.
 *
 * If this is a 32-bit frame with fxstate, put a fsave header before
 * the aligned state at 'buf_fx'.
 *
 * For [f]xsave state, update the SW reserved fields in the [f]xsave frame
 * indicating the absence/presence of the extended state to the user.
 */
299
int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
300
{
301
	struct xsave_struct *xsave = &current->thread.fpu.state.xsave;
302 303 304 305 306 307 308 309 310
	struct task_struct *tsk = current;
	int ia32_fxstate = (buf != buf_fx);

	ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
			 config_enabled(CONFIG_IA32_EMULATION));

	if (!access_ok(VERIFY_WRITE, buf, size))
		return -EACCES;

311
	if (!static_cpu_has(X86_FEATURE_FPU))
312 313 314 315
		return fpregs_soft_get(current, NULL, 0,
			sizeof(struct user_i387_ia32_struct), NULL,
			(struct _fpstate_ia32 __user *) buf) ? -1 : 1;

316
	if (fpregs_active()) {
317
		/* Save the live register state to the user directly. */
318
		if (copy_fpregs_to_sigframe(buf_fx))
319 320 321 322
			return -1;
		/* Update the thread's fxstate to save the fsave header. */
		if (ia32_fxstate)
			fpu_fxsave(&tsk->thread.fpu);
323
	} else {
324
		fpstate_sanitize_xstate(&tsk->thread.fpu);
325
		if (__copy_to_user(buf_fx, xsave, xstate_size))
326 327
			return -1;
	}
328

329 330 331
	/* Save the fsave header for the 32-bit frames. */
	if ((ia32_fxstate || !use_fxsr()) && save_fsave_header(tsk, buf))
		return -1;
332

333 334 335 336 337
	if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate))
		return -1;

	return 0;
}
338

339 340 341
static inline void
sanitize_restored_xstate(struct task_struct *tsk,
			 struct user_i387_ia32_struct *ia32_env,
342
			 u64 xfeatures, int fx_only)
343
{
344
	struct xsave_struct *xsave = &tsk->thread.fpu.state.xsave;
345
	struct xstate_header *header = &xsave->header;
346

347 348
	if (use_xsave()) {
		/* These bits must be zero. */
349
		memset(header->reserved, 0, 48);
350 351

		/*
352 353
		 * Init the state that is not present in the memory
		 * layout and not enabled by the OS.
354
		 */
355
		if (fx_only)
356
			header->xfeatures = XSTATE_FPSSE;
357
		else
358
			header->xfeatures &= (xfeatures_mask & xfeatures);
359
	}
360

361
	if (use_fxsr()) {
362
		/*
363 364
		 * mscsr reserved bits must be masked to zero for security
		 * reasons.
365
		 */
366
		xsave->i387.mxcsr &= mxcsr_feature_mask;
367

368
		convert_to_fxsr(tsk, ia32_env);
369
	}
370 371
}

372
/*
373
 * Restore the extended state if present. Otherwise, restore the FP/SSE state.
374
 */
375
static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
376
{
377 378
	if (use_xsave()) {
		if ((unsigned long)buf % 64 || fx_only) {
379
			u64 init_bv = xfeatures_mask & ~XSTATE_FPSSE;
380
			xrstor_state(&init_xstate_ctx, init_bv);
381
			return fxrstor_user(buf);
382
		} else {
383
			u64 init_bv = xfeatures_mask & ~xbv;
384
			if (unlikely(init_bv))
385
				xrstor_state(&init_xstate_ctx, init_bv);
386 387 388
			return xrestore_user(buf, xbv);
		}
	} else if (use_fxsr()) {
389
		return fxrstor_user(buf);
390
	} else
391
		return frstor_user(buf);
392 393
}

394
static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
395
{
396
	int ia32_fxstate = (buf != buf_fx);
397
	struct task_struct *tsk = current;
398
	struct fpu *fpu = &tsk->thread.fpu;
399
	int state_size = xstate_size;
400
	u64 xfeatures = 0;
401 402 403 404
	int fx_only = 0;

	ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
			 config_enabled(CONFIG_IA32_EMULATION));
405 406

	if (!buf) {
407
		fpu__clear(fpu);
408
		return 0;
409 410 411 412 413
	}

	if (!access_ok(VERIFY_READ, buf, size))
		return -EACCES;

414
	fpu__activate_curr(fpu);
415

416
	if (!static_cpu_has(X86_FEATURE_FPU))
417 418 419
		return fpregs_soft_set(current, NULL,
				       0, sizeof(struct user_i387_ia32_struct),
				       NULL, buf) != 0;
420

421 422 423 424 425 426 427 428 429 430 431 432
	if (use_xsave()) {
		struct _fpx_sw_bytes fx_sw_user;
		if (unlikely(check_for_xstate(buf_fx, buf_fx, &fx_sw_user))) {
			/*
			 * Couldn't find the extended state information in the
			 * memory layout. Restore just the FP/SSE and init all
			 * the other extended state.
			 */
			state_size = sizeof(struct i387_fxsave_struct);
			fx_only = 1;
		} else {
			state_size = fx_sw_user.xstate_size;
433
			xfeatures = fx_sw_user.xfeatures;
434 435 436 437 438 439 440 441 442
		}
	}

	if (ia32_fxstate) {
		/*
		 * For 32-bit frames with fxstate, copy the user state to the
		 * thread's fpu state, reconstruct fxstate from the fsave
		 * header. Sanitize the copied state etc.
		 */
443
		struct fpu *fpu = &tsk->thread.fpu;
444
		struct user_i387_ia32_struct env;
445
		int err = 0;
446

447
		/*
448
		 * Drop the current fpu which clears fpu->fpstate_active. This ensures
449 450 451 452
		 * that any context-switch during the copy of the new state,
		 * avoids the intermediate state from getting restored/saved.
		 * Thus avoiding the new restored state from getting corrupted.
		 * We will be ready to restore/save the state only after
453
		 * fpu->fpstate_active is again set.
454
		 */
455
		fpu__drop(fpu);
456

457
		if (__copy_from_user(&fpu->state.xsave, buf_fx, state_size) ||
458
		    __copy_from_user(&env, buf, sizeof(env))) {
459
			fpstate_init(&fpu->state);
460 461
			err = -1;
		} else {
462
			sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
463
		}
464

465
		fpu->fpstate_active = 1;
466 467
		if (use_eager_fpu()) {
			preempt_disable();
468
			fpu__restore();
469 470
			preempt_enable();
		}
471 472

		return err;
473
	} else {
474
		/*
475 476
		 * For 64-bit frames and 32-bit fsave frames, restore the user
		 * state to the registers directly (with exceptions handled).
477
		 */
478
		user_fpu_begin();
479
		if (restore_user_xstate(buf_fx, xfeatures, fx_only)) {
480
			fpu__clear(fpu);
481 482
			return -1;
		}
483
	}
484 485

	return 0;
486 487
}

488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
static inline int xstate_sigframe_size(void)
{
	return use_xsave() ? xstate_size + FP_XSTATE_MAGIC2_SIZE : xstate_size;
}

/*
 * Restore FPU state from a sigframe:
 */
int fpu__restore_sig(void __user *buf, int ia32_frame)
{
	void __user *buf_fx = buf;
	int size = xstate_sigframe_size();

	if (ia32_frame && use_fxsr()) {
		buf_fx = buf + sizeof(struct i387_fsave_struct);
		size += sizeof(struct i387_fsave_struct);
	}

	return __fpu__restore_sig(buf, buf_fx, size);
}

unsigned long
fpu__alloc_mathframe(unsigned long sp, int ia32_frame,
		     unsigned long *buf_fx, unsigned long *size)
{
	unsigned long frame_size = xstate_sigframe_size();

	*buf_fx = sp = round_down(sp - frame_size, 64);
	if (ia32_frame && use_fxsr()) {
		frame_size += sizeof(struct i387_fsave_struct);
		sp -= sizeof(struct i387_fsave_struct);
	}

	*size = frame_size;

	return sp;
}
525 526 527 528 529 530 531
/*
 * Prepare the SW reserved portion of the fxsave memory layout, indicating
 * the presence of the extended state information in the memory layout
 * pointed by the fpstate pointer in the sigcontext.
 * This will be saved when ever the FP and extended state context is
 * saved on the user stack during the signal handler delivery to the user.
 */
R
roel kluin 已提交
532
static void prepare_fx_sw_frame(void)
533
{
534 535
	int fsave_header_size = sizeof(struct i387_fsave_struct);
	int size = xstate_size + FP_XSTATE_MAGIC2_SIZE;
536

537 538
	if (config_enabled(CONFIG_X86_32))
		size += fsave_header_size;
539 540

	fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
541
	fx_sw_reserved.extended_size = size;
542
	fx_sw_reserved.xfeatures = xfeatures_mask;
543 544
	fx_sw_reserved.xstate_size = xstate_size;

545 546 547 548 549
	if (config_enabled(CONFIG_IA32_EMULATION)) {
		fx_sw_reserved_ia32 = fx_sw_reserved;
		fx_sw_reserved_ia32.extended_size += fsave_header_size;
	}
}
550

551
/*
552 553
 * Enable the extended processor state save/restore feature.
 * Called once per CPU onlining.
554
 */
555
void fpu__init_cpu_xstate(void)
556
{
557
	if (!cpu_has_xsave || !xfeatures_mask)
558 559
		return;

A
Andy Lutomirski 已提交
560
	cr4_set_bits(X86_CR4_OSXSAVE);
561
	xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask);
562 563
}

564 565 566 567
/*
 * Record the offsets and sizes of different state managed by the xsave
 * memory layout.
 */
568
static void __init setup_xstate_features(void)
569 570 571
{
	int eax, ebx, ecx, edx, leaf = 0x2;

572
	xfeatures_nr = fls64(xfeatures_mask);
573 574

	do {
575
		cpuid_count(XSTATE_CPUID, leaf, &eax, &ebx, &ecx, &edx);
576 577 578 579 580 581 582 583 584 585 586

		if (eax == 0)
			break;

		xstate_offsets[leaf] = ebx;
		xstate_sizes[leaf] = eax;

		leaf++;
	} while (1);
}

587
static void print_xstate_feature(u64 xstate_mask)
588
{
589
	const char *feature_name;
590

591 592
	if (cpu_has_xfeatures(xstate_mask, &feature_name))
		pr_info("x86/fpu: Supporting XSAVE feature 0x%02Lx: '%s'\n", xstate_mask, feature_name);
593 594 595 596 597 598 599
}

/*
 * Print out all the supported xstate features:
 */
static void print_xstate_features(void)
{
600 601 602 603 604 605 606 607
	print_xstate_feature(XSTATE_FP);
	print_xstate_feature(XSTATE_SSE);
	print_xstate_feature(XSTATE_YMM);
	print_xstate_feature(XSTATE_BNDREGS);
	print_xstate_feature(XSTATE_BNDCSR);
	print_xstate_feature(XSTATE_OPMASK);
	print_xstate_feature(XSTATE_ZMM_Hi256);
	print_xstate_feature(XSTATE_Hi16_ZMM);
608 609
}

610 611 612 613 614 615 616 617 618 619
/*
 * This function sets up offsets and sizes of all extended states in
 * xsave area. This supports both standard format and compacted format
 * of the xsave aread.
 *
 * Input: void
 * Output: void
 */
void setup_xstate_comp(void)
{
620
	unsigned int xstate_comp_sizes[sizeof(xfeatures_mask)*8];
621 622
	int i;

623 624 625 626 627 628 629
	/*
	 * The FP xstates and SSE xstates are legacy states. They are always
	 * in the fixed offsets in the xsave area in either compacted form
	 * or standard form.
	 */
	xstate_comp_offsets[0] = 0;
	xstate_comp_offsets[1] = offsetof(struct i387_fxsave_struct, xmm_space);
630 631

	if (!cpu_has_xsaves) {
632
		for (i = 2; i < xfeatures_nr; i++) {
633
			if (test_bit(i, (unsigned long *)&xfeatures_mask)) {
634 635 636 637 638 639 640 641 642
				xstate_comp_offsets[i] = xstate_offsets[i];
				xstate_comp_sizes[i] = xstate_sizes[i];
			}
		}
		return;
	}

	xstate_comp_offsets[2] = FXSAVE_SIZE + XSAVE_HDR_SIZE;

643
	for (i = 2; i < xfeatures_nr; i++) {
644
		if (test_bit(i, (unsigned long *)&xfeatures_mask))
645 646 647 648 649 650 651 652 653 654 655
			xstate_comp_sizes[i] = xstate_sizes[i];
		else
			xstate_comp_sizes[i] = 0;

		if (i > 2)
			xstate_comp_offsets[i] = xstate_comp_offsets[i-1]
					+ xstate_comp_sizes[i-1];

	}
}

656 657 658
/*
 * setup the xstate image representing the init state
 */
659
static void setup_init_fpu_buf(void)
660
{
661 662 663 664
	if (!cpu_has_xsave)
		return;

	setup_xstate_features();
665
	print_xstate_features();
666

667
	if (cpu_has_xsaves) {
668 669
		init_xstate_ctx.header.xcomp_bv = (u64)1 << 63 | xfeatures_mask;
		init_xstate_ctx.header.xfeatures = xfeatures_mask;
670 671
	}

672 673 674
	/*
	 * Init all the features state with header_bv being 0x0
	 */
675
	xrstor_state_booting(&init_xstate_ctx, -1);
676

677 678 679 680
	/*
	 * Dump the init state again. This is to identify the init state
	 * of any feature which is not represented by all zero's.
	 */
681
	xsave_state_booting(&init_xstate_ctx);
682 683
}

F
Fenghua Yu 已提交
684
/*
685
 * Calculate total size of enabled xstates in XCR0/xfeatures_mask.
F
Fenghua Yu 已提交
686 687 688 689 690 691 692 693 694 695 696 697 698 699
 */
static void __init init_xstate_size(void)
{
	unsigned int eax, ebx, ecx, edx;
	int i;

	if (!cpu_has_xsaves) {
		cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
		xstate_size = ebx;
		return;
	}

	xstate_size = FXSAVE_SIZE + XSAVE_HDR_SIZE;
	for (i = 2; i < 64; i++) {
700
		if (test_bit(i, (unsigned long *)&xfeatures_mask)) {
F
Fenghua Yu 已提交
701 702 703 704 705 706
			cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
			xstate_size += eax;
		}
	}
}

707 708
/*
 * Enable and initialize the xsave feature.
709
 * Called once per system bootup.
710
 *
I
Ingo Molnar 已提交
711
 * ( Not marked __init because of false positive section warnings. )
712
 */
713
void fpu__init_system_xstate(void)
714 715 716
{
	unsigned int eax, ebx, ecx, edx;

717 718 719 720 721
	if (!cpu_has_xsave) {
		pr_info("x86/fpu: Legacy x87 FPU detected.\n");
		return;
	}

722
	if (boot_cpu_data.cpuid_level < XSTATE_CPUID) {
723
		WARN(1, "x86/fpu: XSTATE_CPUID missing!\n");
724 725 726 727
		return;
	}

	cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
728
	xfeatures_mask = eax + ((u64)edx << 32);
729

730 731
	if ((xfeatures_mask & XSTATE_FPSSE) != XSTATE_FPSSE) {
		pr_err("x86/fpu: FP/SSE not present amongst the CPU's xstate features: 0x%llx.\n", xfeatures_mask);
732 733 734 735
		BUG();
	}

	/*
736
	 * Support only the state known to OS.
737
	 */
738
	xfeatures_mask = xfeatures_mask & XCNTXT_MASK;
739

740 741
	/* Enable xstate instructions to be able to continue with initialization: */
	fpu__init_cpu_xstate();
742 743 744 745

	/*
	 * Recompute the context size for enabled features
	 */
F
Fenghua Yu 已提交
746
	init_xstate_size();
747

748
	update_regset_xstate_info(xstate_size, xfeatures_mask);
749
	prepare_fx_sw_frame();
750
	setup_init_fpu_buf();
751

752
	pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is 0x%x bytes, using '%s' format.\n",
753
		xfeatures_mask,
754 755
		xstate_size,
		cpu_has_xsaves ? "compacted" : "standard");
756
}
757

758 759 760 761 762 763 764 765 766 767 768 769
/*
 * Restore minimal FPU state after suspend:
 */
void fpu__resume_cpu(void)
{
	/*
	 * Restore XCR0 on xsave capable CPUs:
	 */
	if (cpu_has_xsave)
		xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask);
}

770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
/*
 * Given the xsave area and a state inside, this function returns the
 * address of the state.
 *
 * This is the API that is called to get xstate address in either
 * standard format or compacted format of xsave area.
 *
 * Inputs:
 *	xsave: base address of the xsave area;
 *	xstate: state which is defined in xsave.h (e.g. XSTATE_FP, XSTATE_SSE,
 *	etc.)
 * Output:
 *	address of the state in the xsave area.
 */
void *get_xsave_addr(struct xsave_struct *xsave, int xstate)
{
	int feature = fls64(xstate) - 1;
787
	if (!test_bit(feature, (unsigned long *)&xfeatures_mask))
788 789 790 791
		return NULL;

	return (void *)xsave + xstate_comp_offsets[feature];
}
P
Paolo Bonzini 已提交
792
EXPORT_SYMBOL_GPL(get_xsave_addr);