xstate.c 19.7 KB
Newer Older
1 2 3 4 5 6
/*
 * xsave/xrstor support.
 *
 * Author: Suresh Siddha <suresh.b.siddha@intel.com>
 */
#include <linux/compat.h>
F
Fenghua Yu 已提交
7
#include <linux/cpu.h>
8
#include <asm/fpu/api.h>
9
#include <asm/fpu/internal.h>
10
#include <asm/sigframe.h>
A
Andy Lutomirski 已提交
11
#include <asm/tlbflush.h>
12
#include <asm/xcr.h>
13

14 15 16 17 18 19 20 21 22 23 24 25 26
static const char *xfeature_names[] =
{
	"x87 floating point registers"	,
	"SSE registers"			,
	"AVX registers"			,
	"MPX bounds registers"		,
	"MPX CSR"			,
	"AVX-512 opmask"		,
	"AVX-512 Hi256"			,
	"AVX-512 ZMM_Hi256"		,
	"unknown xstate feature"	,
};

27
/*
28
 * Mask of xstate features supported by the CPU and the kernel:
29
 */
30
u64 xfeatures_mask __read_mostly;
31

32 33 34
/*
 * Represents init state for the supported extended state.
 */
35
struct xsave_struct init_xstate_ctx;
36

37
static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32;
38
static unsigned int xstate_offsets[XFEATURES_NR_MAX], xstate_sizes[XFEATURES_NR_MAX];
39
static unsigned int xstate_comp_offsets[sizeof(xfeatures_mask)*8];
40 41 42

/* The number of supported xfeatures in xfeatures_mask: */
static unsigned int xfeatures_nr;
43

44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
/*
 * Return whether the system supports a given xfeature.
 *
 * Also return the name of the (most advanced) feature that the caller requested:
 */
int cpu_has_xfeatures(u64 xfeatures_needed, const char **feature_name)
{
	u64 xfeatures_missing = xfeatures_needed & ~xfeatures_mask;

	if (unlikely(feature_name)) {
		long xfeature_idx, max_idx;
		u64 xfeatures_print;
		/*
		 * So we use FLS here to be able to print the most advanced
		 * feature that was requested but is missing. So if a driver
		 * asks about "XSTATE_SSE | XSTATE_YMM" we'll print the
		 * missing AVX feature - this is the most informative message
		 * to users:
		 */
		if (xfeatures_missing)
			xfeatures_print = xfeatures_missing;
		else
			xfeatures_print = xfeatures_needed;

		xfeature_idx = fls64(xfeatures_print)-1;
		max_idx = ARRAY_SIZE(xfeature_names)-1;
		xfeature_idx = min(xfeature_idx, max_idx);

		*feature_name = xfeature_names[xfeature_idx];
	}

	if (xfeatures_missing)
		return 0;

	return 1;
}
EXPORT_SYMBOL_GPL(cpu_has_xfeatures);

82
/*
83 84 85 86 87 88 89 90 91 92 93 94
 * When executing XSAVEOPT (optimized XSAVE), if a processor implementation
 * detects that an FPU state component is still (or is again) in its
 * initialized state, it may clear the corresponding bit in the header.xfeatures
 * field, and can skip the writeout of registers to the corresponding memory layout.
 *
 * This means that when the bit is zero, the state component might still contain
 * some previous - non-initialized register state.
 *
 * Before writing xstate information to user-space we sanitize those components,
 * to always ensure that the memory layout of a feature will be in the init state
 * if the corresponding header bit is zero. This is to ensure that user-space doesn't
 * see some stale state in the memory layout during signal handling, debugging etc.
95 96 97
 */
void __sanitize_i387_state(struct task_struct *tsk)
{
98
	struct i387_fxsave_struct *fx = &tsk->thread.fpu.state.fxsave;
99
	int feature_bit;
100
	u64 xfeatures;
101 102 103 104

	if (!fx)
		return;

105
	xfeatures = tsk->thread.fpu.state.xsave.header.xfeatures;
106 107 108

	/*
	 * None of the feature bits are in init state. So nothing else
L
Lucas De Marchi 已提交
109
	 * to do for us, as the memory layout is up to date.
110
	 */
111
	if ((xfeatures & xfeatures_mask) == xfeatures_mask)
112 113 114 115 116
		return;

	/*
	 * FP is in init state
	 */
117
	if (!(xfeatures & XSTATE_FP)) {
118 119 120 121 122 123 124 125 126 127 128 129
		fx->cwd = 0x37f;
		fx->swd = 0;
		fx->twd = 0;
		fx->fop = 0;
		fx->rip = 0;
		fx->rdp = 0;
		memset(&fx->st_space[0], 0, 128);
	}

	/*
	 * SSE is in init state
	 */
130
	if (!(xfeatures & XSTATE_SSE))
131 132
		memset(&fx->xmm_space[0], 0, 256);

133 134 135 136 137
	/*
	 * First two features are FPU and SSE, which above we handled
	 * in a special way already:
	 */
	feature_bit = 0x2;
138
	xfeatures = (xfeatures_mask & ~xfeatures) >> 2;
139 140

	/*
141 142 143
	 * Update all the remaining memory layouts according to their
	 * standard xstate layout, if their header bit is in the init
	 * state:
144
	 */
145 146
	while (xfeatures) {
		if (xfeatures & 0x1) {
147 148 149
			int offset = xstate_offsets[feature_bit];
			int size = xstate_sizes[feature_bit];

150
			memcpy((void *)fx + offset,
151
			       (void *)&init_xstate_ctx + offset,
152 153 154
			       size);
		}

155
		xfeatures >>= 1;
156 157 158 159
		feature_bit++;
	}
}

160 161 162 163
/*
 * Check for the presence of extended state information in the
 * user fpstate pointer in the sigcontext.
 */
164 165 166
static inline int check_for_xstate(struct i387_fxsave_struct __user *buf,
				   void __user *fpstate,
				   struct _fpx_sw_bytes *fx_sw)
167 168
{
	int min_xstate_size = sizeof(struct i387_fxsave_struct) +
169
			      sizeof(struct xstate_header);
170 171
	unsigned int magic2;

172 173
	if (__copy_from_user(fx_sw, &buf->sw_reserved[0], sizeof(*fx_sw)))
		return -1;
174

175 176 177 178 179 180
	/* Check for the first magic field and other error scenarios. */
	if (fx_sw->magic1 != FP_XSTATE_MAGIC1 ||
	    fx_sw->xstate_size < min_xstate_size ||
	    fx_sw->xstate_size > xstate_size ||
	    fx_sw->xstate_size > fx_sw->extended_size)
		return -1;
181 182 183 184 185 186 187

	/*
	 * Check for the presence of second magic word at the end of memory
	 * layout. This detects the case where the user just copied the legacy
	 * fpstate layout with out copying the extended state information
	 * in the memory layout.
	 */
188 189 190
	if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size))
	    || magic2 != FP_XSTATE_MAGIC2)
		return -1;
191 192 193 194

	return 0;
}

195 196 197
/*
 * Signal frame handlers.
 */
198 199 200
static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
{
	if (use_fxsr()) {
201
		struct xsave_struct *xsave = &tsk->thread.fpu.state.xsave;
202 203
		struct user_i387_ia32_struct env;
		struct _fpstate_ia32 __user *fp = buf;
204

205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
		convert_from_fxsr(&env, tsk);

		if (__copy_to_user(buf, &env, sizeof(env)) ||
		    __put_user(xsave->i387.swd, &fp->status) ||
		    __put_user(X86_FXSR_MAGIC, &fp->magic))
			return -1;
	} else {
		struct i387_fsave_struct __user *fp = buf;
		u32 swd;
		if (__get_user(swd, &fp->swd) || __put_user(swd, &fp->status))
			return -1;
	}

	return 0;
}

static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
222
{
223 224
	struct xsave_struct __user *x = buf;
	struct _fpx_sw_bytes *sw_bytes;
225
	u32 xfeatures;
226
	int err;
227

228 229 230
	/* Setup the bytes not touched by the [f]xsave and reserved for SW. */
	sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
	err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
231

232 233
	if (!use_xsave())
		return err;
234

235
	err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
236

237
	/*
238
	 * Read the xfeatures which we copied (directly from the cpu or
239 240
	 * from the state in task struct) to the user buffers.
	 */
241
	err |= __get_user(xfeatures, (__u32 *)&x->header.xfeatures);
242

243 244 245 246 247
	/*
	 * For legacy compatible, we always set FP/SSE bits in the bit
	 * vector while saving the state to the user context. This will
	 * enable us capturing any changes(during sigreturn) to
	 * the FP/SSE bits by the legacy applications which don't touch
248
	 * xfeatures in the xsave header.
249
	 *
250
	 * xsave aware apps can change the xfeatures in the xsave
251 252 253
	 * header as well as change any contents in the memory layout.
	 * xrestore as part of sigreturn will capture all the changes.
	 */
254
	xfeatures |= XSTATE_FPSSE;
255

256
	err |= __put_user(xfeatures, (__u32 *)&x->header.xfeatures);
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298

	return err;
}

static inline int save_user_xstate(struct xsave_struct __user *buf)
{
	int err;

	if (use_xsave())
		err = xsave_user(buf);
	else if (use_fxsr())
		err = fxsave_user((struct i387_fxsave_struct __user *) buf);
	else
		err = fsave_user((struct i387_fsave_struct __user *) buf);

	if (unlikely(err) && __clear_user(buf, xstate_size))
		err = -EFAULT;
	return err;
}

/*
 * Save the fpu, extended register state to the user signal frame.
 *
 * 'buf_fx' is the 64-byte aligned pointer at which the [f|fx|x]save
 *  state is copied.
 *  'buf' points to the 'buf_fx' or to the fsave header followed by 'buf_fx'.
 *
 *	buf == buf_fx for 64-bit frames and 32-bit fsave frame.
 *	buf != buf_fx for 32-bit frames with fxstate.
 *
 * If the fpu, extended register state is live, save the state directly
 * to the user frame pointed by the aligned pointer 'buf_fx'. Otherwise,
 * copy the thread's fpu state to the user frame starting at 'buf_fx'.
 *
 * If this is a 32-bit frame with fxstate, put a fsave header before
 * the aligned state at 'buf_fx'.
 *
 * For [f]xsave state, update the SW reserved fields in the [f]xsave frame
 * indicating the absence/presence of the extended state to the user.
 */
int save_xstate_sig(void __user *buf, void __user *buf_fx, int size)
{
299
	struct xsave_struct *xsave = &current->thread.fpu.state.xsave;
300 301 302 303 304 305 306 307 308
	struct task_struct *tsk = current;
	int ia32_fxstate = (buf != buf_fx);

	ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
			 config_enabled(CONFIG_IA32_EMULATION));

	if (!access_ok(VERIFY_WRITE, buf, size))
		return -EACCES;

309
	if (!static_cpu_has(X86_FEATURE_FPU))
310 311 312 313 314 315 316 317 318 319 320
		return fpregs_soft_get(current, NULL, 0,
			sizeof(struct user_i387_ia32_struct), NULL,
			(struct _fpstate_ia32 __user *) buf) ? -1 : 1;

	if (user_has_fpu()) {
		/* Save the live register state to the user directly. */
		if (save_user_xstate(buf_fx))
			return -1;
		/* Update the thread's fxstate to save the fsave header. */
		if (ia32_fxstate)
			fpu_fxsave(&tsk->thread.fpu);
321
	} else {
322
		sanitize_i387_state(tsk);
323
		if (__copy_to_user(buf_fx, xsave, xstate_size))
324 325
			return -1;
	}
326

327 328 329
	/* Save the fsave header for the 32-bit frames. */
	if ((ia32_fxstate || !use_fxsr()) && save_fsave_header(tsk, buf))
		return -1;
330

331 332 333 334 335
	if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate))
		return -1;

	return 0;
}
336

337 338 339
static inline void
sanitize_restored_xstate(struct task_struct *tsk,
			 struct user_i387_ia32_struct *ia32_env,
340
			 u64 xfeatures, int fx_only)
341
{
342
	struct xsave_struct *xsave = &tsk->thread.fpu.state.xsave;
343
	struct xstate_header *header = &xsave->header;
344

345 346
	if (use_xsave()) {
		/* These bits must be zero. */
347
		memset(header->reserved, 0, 48);
348 349

		/*
350 351
		 * Init the state that is not present in the memory
		 * layout and not enabled by the OS.
352
		 */
353
		if (fx_only)
354
			header->xfeatures = XSTATE_FPSSE;
355
		else
356
			header->xfeatures &= (xfeatures_mask & xfeatures);
357
	}
358

359
	if (use_fxsr()) {
360
		/*
361 362
		 * mscsr reserved bits must be masked to zero for security
		 * reasons.
363
		 */
364
		xsave->i387.mxcsr &= mxcsr_feature_mask;
365

366
		convert_to_fxsr(tsk, ia32_env);
367
	}
368 369
}

370
/*
371
 * Restore the extended state if present. Otherwise, restore the FP/SSE state.
372
 */
373
static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
374
{
375 376
	if (use_xsave()) {
		if ((unsigned long)buf % 64 || fx_only) {
377
			u64 init_bv = xfeatures_mask & ~XSTATE_FPSSE;
378
			xrstor_state(&init_xstate_ctx, init_bv);
379
			return fxrstor_user(buf);
380
		} else {
381
			u64 init_bv = xfeatures_mask & ~xbv;
382
			if (unlikely(init_bv))
383
				xrstor_state(&init_xstate_ctx, init_bv);
384 385 386
			return xrestore_user(buf, xbv);
		}
	} else if (use_fxsr()) {
387
		return fxrstor_user(buf);
388
	} else
389
		return frstor_user(buf);
390 391
}

392
int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
393
{
394
	int ia32_fxstate = (buf != buf_fx);
395
	struct task_struct *tsk = current;
396
	struct fpu *fpu = &tsk->thread.fpu;
397
	int state_size = xstate_size;
398
	u64 xfeatures = 0;
399 400 401 402
	int fx_only = 0;

	ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
			 config_enabled(CONFIG_IA32_EMULATION));
403 404

	if (!buf) {
405
		fpu_reset_state(fpu);
406
		return 0;
407 408 409 410 411
	}

	if (!access_ok(VERIFY_READ, buf, size))
		return -EACCES;

412
	fpu__activate_curr(fpu);
413

414
	if (!static_cpu_has(X86_FEATURE_FPU))
415 416 417
		return fpregs_soft_set(current, NULL,
				       0, sizeof(struct user_i387_ia32_struct),
				       NULL, buf) != 0;
418

419 420 421 422 423 424 425 426 427 428 429 430
	if (use_xsave()) {
		struct _fpx_sw_bytes fx_sw_user;
		if (unlikely(check_for_xstate(buf_fx, buf_fx, &fx_sw_user))) {
			/*
			 * Couldn't find the extended state information in the
			 * memory layout. Restore just the FP/SSE and init all
			 * the other extended state.
			 */
			state_size = sizeof(struct i387_fxsave_struct);
			fx_only = 1;
		} else {
			state_size = fx_sw_user.xstate_size;
431
			xfeatures = fx_sw_user.xfeatures;
432 433 434 435 436 437 438 439 440
		}
	}

	if (ia32_fxstate) {
		/*
		 * For 32-bit frames with fxstate, copy the user state to the
		 * thread's fpu state, reconstruct fxstate from the fsave
		 * header. Sanitize the copied state etc.
		 */
441
		struct fpu *fpu = &tsk->thread.fpu;
442
		struct user_i387_ia32_struct env;
443
		int err = 0;
444

445
		/*
446
		 * Drop the current fpu which clears fpu->fpstate_active. This ensures
447 448 449 450
		 * that any context-switch during the copy of the new state,
		 * avoids the intermediate state from getting restored/saved.
		 * Thus avoiding the new restored state from getting corrupted.
		 * We will be ready to restore/save the state only after
451
		 * fpu->fpstate_active is again set.
452
		 */
453
		drop_fpu(fpu);
454

455
		if (__copy_from_user(&fpu->state.xsave, buf_fx, state_size) ||
456
		    __copy_from_user(&env, buf, sizeof(env))) {
457
			fpstate_init(fpu);
458 459
			err = -1;
		} else {
460
			sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
461
		}
462

463
		fpu->fpstate_active = 1;
464 465
		if (use_eager_fpu()) {
			preempt_disable();
466
			fpu__restore();
467 468
			preempt_enable();
		}
469 470

		return err;
471
	} else {
472
		/*
473 474
		 * For 64-bit frames and 32-bit fsave frames, restore the user
		 * state to the registers directly (with exceptions handled).
475
		 */
476
		user_fpu_begin();
477
		if (restore_user_xstate(buf_fx, xfeatures, fx_only)) {
478
			fpu_reset_state(fpu);
479 480
			return -1;
		}
481
	}
482 483

	return 0;
484 485
}

486 487 488 489 490 491 492
/*
 * Prepare the SW reserved portion of the fxsave memory layout, indicating
 * the presence of the extended state information in the memory layout
 * pointed by the fpstate pointer in the sigcontext.
 * This will be saved when ever the FP and extended state context is
 * saved on the user stack during the signal handler delivery to the user.
 */
R
roel kluin 已提交
493
static void prepare_fx_sw_frame(void)
494
{
495 496
	int fsave_header_size = sizeof(struct i387_fsave_struct);
	int size = xstate_size + FP_XSTATE_MAGIC2_SIZE;
497

498 499
	if (config_enabled(CONFIG_X86_32))
		size += fsave_header_size;
500 501

	fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
502
	fx_sw_reserved.extended_size = size;
503
	fx_sw_reserved.xfeatures = xfeatures_mask;
504 505
	fx_sw_reserved.xstate_size = xstate_size;

506 507 508 509 510
	if (config_enabled(CONFIG_IA32_EMULATION)) {
		fx_sw_reserved_ia32 = fx_sw_reserved;
		fx_sw_reserved_ia32.extended_size += fsave_header_size;
	}
}
511

512
/*
513 514
 * Enable the extended processor state save/restore feature.
 * Called once per CPU onlining.
515
 */
516
void fpu__init_cpu_xstate(void)
517
{
518
	if (!cpu_has_xsave || !xfeatures_mask)
519 520
		return;

A
Andy Lutomirski 已提交
521
	cr4_set_bits(X86_CR4_OSXSAVE);
522
	xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask);
523 524
}

525 526 527 528
/*
 * Record the offsets and sizes of different state managed by the xsave
 * memory layout.
 */
529
static void __init setup_xstate_features(void)
530 531 532
{
	int eax, ebx, ecx, edx, leaf = 0x2;

533
	xfeatures_nr = fls64(xfeatures_mask);
534 535

	do {
536
		cpuid_count(XSTATE_CPUID, leaf, &eax, &ebx, &ecx, &edx);
537 538 539 540 541 542 543 544 545 546 547

		if (eax == 0)
			break;

		xstate_offsets[leaf] = ebx;
		xstate_sizes[leaf] = eax;

		leaf++;
	} while (1);
}

548 549
static void print_xstate_feature(u64 xstate_mask, const char *desc)
{
550
	if (xfeatures_mask & xstate_mask) {
551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571
		int xstate_feature = fls64(xstate_mask)-1;

		pr_info("x86/fpu: Supporting XSAVE feature %2d: '%s'\n", xstate_feature, desc);
	}
}

/*
 * Print out all the supported xstate features:
 */
static void print_xstate_features(void)
{
	print_xstate_feature(XSTATE_FP,		"x87 floating point registers");
	print_xstate_feature(XSTATE_SSE,	"SSE registers");
	print_xstate_feature(XSTATE_YMM,	"AVX registers");
	print_xstate_feature(XSTATE_BNDREGS,	"MPX bounds registers");
	print_xstate_feature(XSTATE_BNDCSR,	"MPX CSR");
	print_xstate_feature(XSTATE_OPMASK,	"AVX-512 opmask");
	print_xstate_feature(XSTATE_ZMM_Hi256,	"AVX-512 Hi256");
	print_xstate_feature(XSTATE_Hi16_ZMM,	"AVX-512 ZMM_Hi256");
}

572 573 574 575 576 577 578 579 580 581
/*
 * This function sets up offsets and sizes of all extended states in
 * xsave area. This supports both standard format and compacted format
 * of the xsave aread.
 *
 * Input: void
 * Output: void
 */
void setup_xstate_comp(void)
{
582
	unsigned int xstate_comp_sizes[sizeof(xfeatures_mask)*8];
583 584
	int i;

585 586 587 588 589 590 591
	/*
	 * The FP xstates and SSE xstates are legacy states. They are always
	 * in the fixed offsets in the xsave area in either compacted form
	 * or standard form.
	 */
	xstate_comp_offsets[0] = 0;
	xstate_comp_offsets[1] = offsetof(struct i387_fxsave_struct, xmm_space);
592 593

	if (!cpu_has_xsaves) {
594
		for (i = 2; i < xfeatures_nr; i++) {
595
			if (test_bit(i, (unsigned long *)&xfeatures_mask)) {
596 597 598 599 600 601 602 603 604
				xstate_comp_offsets[i] = xstate_offsets[i];
				xstate_comp_sizes[i] = xstate_sizes[i];
			}
		}
		return;
	}

	xstate_comp_offsets[2] = FXSAVE_SIZE + XSAVE_HDR_SIZE;

605
	for (i = 2; i < xfeatures_nr; i++) {
606
		if (test_bit(i, (unsigned long *)&xfeatures_mask))
607 608 609 610 611 612 613 614 615 616 617
			xstate_comp_sizes[i] = xstate_sizes[i];
		else
			xstate_comp_sizes[i] = 0;

		if (i > 2)
			xstate_comp_offsets[i] = xstate_comp_offsets[i-1]
					+ xstate_comp_sizes[i-1];

	}
}

618 619 620
/*
 * setup the xstate image representing the init state
 */
621
static void setup_init_fpu_buf(void)
622
{
623 624 625 626 627 628
	static int on_boot_cpu = 1;

	if (!on_boot_cpu)
		return;
	on_boot_cpu = 0;

629 630 631 632
	if (!cpu_has_xsave)
		return;

	setup_xstate_features();
633
	print_xstate_features();
634

635
	if (cpu_has_xsaves) {
636 637
		init_xstate_ctx.header.xcomp_bv = (u64)1 << 63 | xfeatures_mask;
		init_xstate_ctx.header.xfeatures = xfeatures_mask;
638 639
	}

640 641 642
	/*
	 * Init all the features state with header_bv being 0x0
	 */
643
	xrstor_state_booting(&init_xstate_ctx, -1);
644

645 646 647 648
	/*
	 * Dump the init state again. This is to identify the init state
	 * of any feature which is not represented by all zero's.
	 */
649
	xsave_state_booting(&init_xstate_ctx);
650 651
}

F
Fenghua Yu 已提交
652
/*
653
 * Calculate total size of enabled xstates in XCR0/xfeatures_mask.
F
Fenghua Yu 已提交
654 655 656 657 658 659 660 661 662 663 664 665 666 667
 */
static void __init init_xstate_size(void)
{
	unsigned int eax, ebx, ecx, edx;
	int i;

	if (!cpu_has_xsaves) {
		cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
		xstate_size = ebx;
		return;
	}

	xstate_size = FXSAVE_SIZE + XSAVE_HDR_SIZE;
	for (i = 2; i < 64; i++) {
668
		if (test_bit(i, (unsigned long *)&xfeatures_mask)) {
F
Fenghua Yu 已提交
669 670 671 672 673 674
			cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
			xstate_size += eax;
		}
	}
}

675 676
/*
 * Enable and initialize the xsave feature.
677
 * Called once per system bootup.
678
 *
I
Ingo Molnar 已提交
679
 * ( Not marked __init because of false positive section warnings. )
680
 */
681
void fpu__init_system_xstate(void)
682 683
{
	unsigned int eax, ebx, ecx, edx;
684 685 686 687 688
	static bool on_boot_cpu = 1;

	if (!on_boot_cpu)
		return;
	on_boot_cpu = 0;
689

690 691 692 693 694
	if (!cpu_has_xsave) {
		pr_info("x86/fpu: Legacy x87 FPU detected.\n");
		return;
	}

695
	if (boot_cpu_data.cpuid_level < XSTATE_CPUID) {
696
		WARN(1, "x86/fpu: XSTATE_CPUID missing!\n");
697 698 699 700
		return;
	}

	cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
701
	xfeatures_mask = eax + ((u64)edx << 32);
702

703 704
	if ((xfeatures_mask & XSTATE_FPSSE) != XSTATE_FPSSE) {
		pr_err("x86/fpu: FP/SSE not present amongst the CPU's xstate features: 0x%llx.\n", xfeatures_mask);
705 706 707 708
		BUG();
	}

	/*
709
	 * Support only the state known to OS.
710
	 */
711
	xfeatures_mask = xfeatures_mask & XCNTXT_MASK;
712

713 714
	/* Enable xstate instructions to be able to continue with initialization: */
	fpu__init_cpu_xstate();
715 716 717 718

	/*
	 * Recompute the context size for enabled features
	 */
F
Fenghua Yu 已提交
719
	init_xstate_size();
720

721
	update_regset_xstate_info(xstate_size, xfeatures_mask);
722
	prepare_fx_sw_frame();
723
	setup_init_fpu_buf();
724

725
	pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is 0x%x bytes, using '%s' format.\n",
726
		xfeatures_mask,
727 728
		xstate_size,
		cpu_has_xsaves ? "compacted" : "standard");
729
}
730

731 732 733 734 735 736 737 738 739 740 741 742
/*
 * Restore minimal FPU state after suspend:
 */
void fpu__resume_cpu(void)
{
	/*
	 * Restore XCR0 on xsave capable CPUs:
	 */
	if (cpu_has_xsave)
		xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask);
}

743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759
/*
 * Given the xsave area and a state inside, this function returns the
 * address of the state.
 *
 * This is the API that is called to get xstate address in either
 * standard format or compacted format of xsave area.
 *
 * Inputs:
 *	xsave: base address of the xsave area;
 *	xstate: state which is defined in xsave.h (e.g. XSTATE_FP, XSTATE_SSE,
 *	etc.)
 * Output:
 *	address of the state in the xsave area.
 */
void *get_xsave_addr(struct xsave_struct *xsave, int xstate)
{
	int feature = fls64(xstate) - 1;
760
	if (!test_bit(feature, (unsigned long *)&xfeatures_mask))
761 762 763 764
		return NULL;

	return (void *)xsave + xstate_comp_offsets[feature];
}
P
Paolo Bonzini 已提交
765
EXPORT_SYMBOL_GPL(get_xsave_addr);