xstate.c 19.6 KB
Newer Older
1 2 3 4 5 6
/*
 * xsave/xrstor support.
 *
 * Author: Suresh Siddha <suresh.b.siddha@intel.com>
 */
#include <linux/compat.h>
F
Fenghua Yu 已提交
7
#include <linux/cpu.h>
8
#include <asm/fpu/api.h>
9
#include <asm/fpu/internal.h>
10
#include <asm/sigframe.h>
A
Andy Lutomirski 已提交
11
#include <asm/tlbflush.h>
12

13 14 15 16 17 18 19 20 21 22 23 24 25
static const char *xfeature_names[] =
{
	"x87 floating point registers"	,
	"SSE registers"			,
	"AVX registers"			,
	"MPX bounds registers"		,
	"MPX CSR"			,
	"AVX-512 opmask"		,
	"AVX-512 Hi256"			,
	"AVX-512 ZMM_Hi256"		,
	"unknown xstate feature"	,
};

26
/*
27
 * Mask of xstate features supported by the CPU and the kernel:
28
 */
29
u64 xfeatures_mask __read_mostly;
30

31 32 33
/*
 * Represents init state for the supported extended state.
 */
34
struct xsave_struct init_xstate_ctx;
35

36
static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32;
37
static unsigned int xstate_offsets[XFEATURES_NR_MAX], xstate_sizes[XFEATURES_NR_MAX];
38
static unsigned int xstate_comp_offsets[sizeof(xfeatures_mask)*8];
39 40 41

/* The number of supported xfeatures in xfeatures_mask: */
static unsigned int xfeatures_nr;
42

43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
/*
 * Return whether the system supports a given xfeature.
 *
 * Also return the name of the (most advanced) feature that the caller requested:
 */
int cpu_has_xfeatures(u64 xfeatures_needed, const char **feature_name)
{
	u64 xfeatures_missing = xfeatures_needed & ~xfeatures_mask;

	if (unlikely(feature_name)) {
		long xfeature_idx, max_idx;
		u64 xfeatures_print;
		/*
		 * So we use FLS here to be able to print the most advanced
		 * feature that was requested but is missing. So if a driver
		 * asks about "XSTATE_SSE | XSTATE_YMM" we'll print the
		 * missing AVX feature - this is the most informative message
		 * to users:
		 */
		if (xfeatures_missing)
			xfeatures_print = xfeatures_missing;
		else
			xfeatures_print = xfeatures_needed;

		xfeature_idx = fls64(xfeatures_print)-1;
		max_idx = ARRAY_SIZE(xfeature_names)-1;
		xfeature_idx = min(xfeature_idx, max_idx);

		*feature_name = xfeature_names[xfeature_idx];
	}

	if (xfeatures_missing)
		return 0;

	return 1;
}
EXPORT_SYMBOL_GPL(cpu_has_xfeatures);

81
/*
82 83 84 85 86 87 88 89 90 91 92 93
 * When executing XSAVEOPT (optimized XSAVE), if a processor implementation
 * detects that an FPU state component is still (or is again) in its
 * initialized state, it may clear the corresponding bit in the header.xfeatures
 * field, and can skip the writeout of registers to the corresponding memory layout.
 *
 * This means that when the bit is zero, the state component might still contain
 * some previous - non-initialized register state.
 *
 * Before writing xstate information to user-space we sanitize those components,
 * to always ensure that the memory layout of a feature will be in the init state
 * if the corresponding header bit is zero. This is to ensure that user-space doesn't
 * see some stale state in the memory layout during signal handling, debugging etc.
94
 */
95
void fpstate_sanitize_xstate(struct fpu *fpu)
96
{
97
	struct i387_fxsave_struct *fx = &fpu->state.fxsave;
98
	int feature_bit;
99
	u64 xfeatures;
100

101
	if (!use_xsaveopt())
102 103
		return;

104
	xfeatures = fpu->state.xsave.header.xfeatures;
105 106 107

	/*
	 * None of the feature bits are in init state. So nothing else
L
Lucas De Marchi 已提交
108
	 * to do for us, as the memory layout is up to date.
109
	 */
110
	if ((xfeatures & xfeatures_mask) == xfeatures_mask)
111 112 113 114 115
		return;

	/*
	 * FP is in init state
	 */
116
	if (!(xfeatures & XSTATE_FP)) {
117 118 119 120 121 122 123 124 125 126 127 128
		fx->cwd = 0x37f;
		fx->swd = 0;
		fx->twd = 0;
		fx->fop = 0;
		fx->rip = 0;
		fx->rdp = 0;
		memset(&fx->st_space[0], 0, 128);
	}

	/*
	 * SSE is in init state
	 */
129
	if (!(xfeatures & XSTATE_SSE))
130 131
		memset(&fx->xmm_space[0], 0, 256);

132 133 134 135 136
	/*
	 * First two features are FPU and SSE, which above we handled
	 * in a special way already:
	 */
	feature_bit = 0x2;
137
	xfeatures = (xfeatures_mask & ~xfeatures) >> 2;
138 139

	/*
140 141 142
	 * Update all the remaining memory layouts according to their
	 * standard xstate layout, if their header bit is in the init
	 * state:
143
	 */
144 145
	while (xfeatures) {
		if (xfeatures & 0x1) {
146 147 148
			int offset = xstate_offsets[feature_bit];
			int size = xstate_sizes[feature_bit];

149
			memcpy((void *)fx + offset,
150
			       (void *)&init_xstate_ctx + offset,
151 152 153
			       size);
		}

154
		xfeatures >>= 1;
155 156 157 158
		feature_bit++;
	}
}

159 160 161 162
/*
 * Check for the presence of extended state information in the
 * user fpstate pointer in the sigcontext.
 */
163 164 165
static inline int check_for_xstate(struct i387_fxsave_struct __user *buf,
				   void __user *fpstate,
				   struct _fpx_sw_bytes *fx_sw)
166 167
{
	int min_xstate_size = sizeof(struct i387_fxsave_struct) +
168
			      sizeof(struct xstate_header);
169 170
	unsigned int magic2;

171 172
	if (__copy_from_user(fx_sw, &buf->sw_reserved[0], sizeof(*fx_sw)))
		return -1;
173

174 175 176 177 178 179
	/* Check for the first magic field and other error scenarios. */
	if (fx_sw->magic1 != FP_XSTATE_MAGIC1 ||
	    fx_sw->xstate_size < min_xstate_size ||
	    fx_sw->xstate_size > xstate_size ||
	    fx_sw->xstate_size > fx_sw->extended_size)
		return -1;
180 181 182 183 184 185 186

	/*
	 * Check for the presence of second magic word at the end of memory
	 * layout. This detects the case where the user just copied the legacy
	 * fpstate layout with out copying the extended state information
	 * in the memory layout.
	 */
187 188 189
	if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size))
	    || magic2 != FP_XSTATE_MAGIC2)
		return -1;
190 191 192 193

	return 0;
}

194 195 196
/*
 * Signal frame handlers.
 */
197 198 199
static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
{
	if (use_fxsr()) {
200
		struct xsave_struct *xsave = &tsk->thread.fpu.state.xsave;
201 202
		struct user_i387_ia32_struct env;
		struct _fpstate_ia32 __user *fp = buf;
203

204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
		convert_from_fxsr(&env, tsk);

		if (__copy_to_user(buf, &env, sizeof(env)) ||
		    __put_user(xsave->i387.swd, &fp->status) ||
		    __put_user(X86_FXSR_MAGIC, &fp->magic))
			return -1;
	} else {
		struct i387_fsave_struct __user *fp = buf;
		u32 swd;
		if (__get_user(swd, &fp->swd) || __put_user(swd, &fp->status))
			return -1;
	}

	return 0;
}

static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
221
{
222 223
	struct xsave_struct __user *x = buf;
	struct _fpx_sw_bytes *sw_bytes;
224
	u32 xfeatures;
225
	int err;
226

227 228 229
	/* Setup the bytes not touched by the [f]xsave and reserved for SW. */
	sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
	err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
230

231 232
	if (!use_xsave())
		return err;
233

234
	err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
235

236
	/*
237
	 * Read the xfeatures which we copied (directly from the cpu or
238 239
	 * from the state in task struct) to the user buffers.
	 */
240
	err |= __get_user(xfeatures, (__u32 *)&x->header.xfeatures);
241

242 243 244 245 246
	/*
	 * For legacy compatible, we always set FP/SSE bits in the bit
	 * vector while saving the state to the user context. This will
	 * enable us capturing any changes(during sigreturn) to
	 * the FP/SSE bits by the legacy applications which don't touch
247
	 * xfeatures in the xsave header.
248
	 *
249
	 * xsave aware apps can change the xfeatures in the xsave
250 251 252
	 * header as well as change any contents in the memory layout.
	 * xrestore as part of sigreturn will capture all the changes.
	 */
253
	xfeatures |= XSTATE_FPSSE;
254

255
	err |= __put_user(xfeatures, (__u32 *)&x->header.xfeatures);
256 257 258 259

	return err;
}

260
static inline int copy_fpregs_to_sigframe(struct xsave_struct __user *buf)
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
{
	int err;

	if (use_xsave())
		err = xsave_user(buf);
	else if (use_fxsr())
		err = fxsave_user((struct i387_fxsave_struct __user *) buf);
	else
		err = fsave_user((struct i387_fsave_struct __user *) buf);

	if (unlikely(err) && __clear_user(buf, xstate_size))
		err = -EFAULT;
	return err;
}

/*
 * Save the fpu, extended register state to the user signal frame.
 *
 * 'buf_fx' is the 64-byte aligned pointer at which the [f|fx|x]save
 *  state is copied.
 *  'buf' points to the 'buf_fx' or to the fsave header followed by 'buf_fx'.
 *
 *	buf == buf_fx for 64-bit frames and 32-bit fsave frame.
 *	buf != buf_fx for 32-bit frames with fxstate.
 *
 * If the fpu, extended register state is live, save the state directly
 * to the user frame pointed by the aligned pointer 'buf_fx'. Otherwise,
 * copy the thread's fpu state to the user frame starting at 'buf_fx'.
 *
 * If this is a 32-bit frame with fxstate, put a fsave header before
 * the aligned state at 'buf_fx'.
 *
 * For [f]xsave state, update the SW reserved fields in the [f]xsave frame
 * indicating the absence/presence of the extended state to the user.
 */
296
int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
297
{
298
	struct xsave_struct *xsave = &current->thread.fpu.state.xsave;
299 300 301 302 303 304 305 306 307
	struct task_struct *tsk = current;
	int ia32_fxstate = (buf != buf_fx);

	ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
			 config_enabled(CONFIG_IA32_EMULATION));

	if (!access_ok(VERIFY_WRITE, buf, size))
		return -EACCES;

308
	if (!static_cpu_has(X86_FEATURE_FPU))
309 310 311 312 313 314
		return fpregs_soft_get(current, NULL, 0,
			sizeof(struct user_i387_ia32_struct), NULL,
			(struct _fpstate_ia32 __user *) buf) ? -1 : 1;

	if (user_has_fpu()) {
		/* Save the live register state to the user directly. */
315
		if (copy_fpregs_to_sigframe(buf_fx))
316 317 318 319
			return -1;
		/* Update the thread's fxstate to save the fsave header. */
		if (ia32_fxstate)
			fpu_fxsave(&tsk->thread.fpu);
320
	} else {
321
		fpstate_sanitize_xstate(&tsk->thread.fpu);
322
		if (__copy_to_user(buf_fx, xsave, xstate_size))
323 324
			return -1;
	}
325

326 327 328
	/* Save the fsave header for the 32-bit frames. */
	if ((ia32_fxstate || !use_fxsr()) && save_fsave_header(tsk, buf))
		return -1;
329

330 331 332 333 334
	if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate))
		return -1;

	return 0;
}
335

336 337 338
static inline void
sanitize_restored_xstate(struct task_struct *tsk,
			 struct user_i387_ia32_struct *ia32_env,
339
			 u64 xfeatures, int fx_only)
340
{
341
	struct xsave_struct *xsave = &tsk->thread.fpu.state.xsave;
342
	struct xstate_header *header = &xsave->header;
343

344 345
	if (use_xsave()) {
		/* These bits must be zero. */
346
		memset(header->reserved, 0, 48);
347 348

		/*
349 350
		 * Init the state that is not present in the memory
		 * layout and not enabled by the OS.
351
		 */
352
		if (fx_only)
353
			header->xfeatures = XSTATE_FPSSE;
354
		else
355
			header->xfeatures &= (xfeatures_mask & xfeatures);
356
	}
357

358
	if (use_fxsr()) {
359
		/*
360 361
		 * mscsr reserved bits must be masked to zero for security
		 * reasons.
362
		 */
363
		xsave->i387.mxcsr &= mxcsr_feature_mask;
364

365
		convert_to_fxsr(tsk, ia32_env);
366
	}
367 368
}

369
/*
370
 * Restore the extended state if present. Otherwise, restore the FP/SSE state.
371
 */
372
static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
373
{
374 375
	if (use_xsave()) {
		if ((unsigned long)buf % 64 || fx_only) {
376
			u64 init_bv = xfeatures_mask & ~XSTATE_FPSSE;
377
			xrstor_state(&init_xstate_ctx, init_bv);
378
			return fxrstor_user(buf);
379
		} else {
380
			u64 init_bv = xfeatures_mask & ~xbv;
381
			if (unlikely(init_bv))
382
				xrstor_state(&init_xstate_ctx, init_bv);
383 384 385
			return xrestore_user(buf, xbv);
		}
	} else if (use_fxsr()) {
386
		return fxrstor_user(buf);
387
	} else
388
		return frstor_user(buf);
389 390
}

391
int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
392
{
393
	int ia32_fxstate = (buf != buf_fx);
394
	struct task_struct *tsk = current;
395
	struct fpu *fpu = &tsk->thread.fpu;
396
	int state_size = xstate_size;
397
	u64 xfeatures = 0;
398 399 400 401
	int fx_only = 0;

	ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
			 config_enabled(CONFIG_IA32_EMULATION));
402 403

	if (!buf) {
404
		fpu_reset_state(fpu);
405
		return 0;
406 407 408 409 410
	}

	if (!access_ok(VERIFY_READ, buf, size))
		return -EACCES;

411
	fpu__activate_curr(fpu);
412

413
	if (!static_cpu_has(X86_FEATURE_FPU))
414 415 416
		return fpregs_soft_set(current, NULL,
				       0, sizeof(struct user_i387_ia32_struct),
				       NULL, buf) != 0;
417

418 419 420 421 422 423 424 425 426 427 428 429
	if (use_xsave()) {
		struct _fpx_sw_bytes fx_sw_user;
		if (unlikely(check_for_xstate(buf_fx, buf_fx, &fx_sw_user))) {
			/*
			 * Couldn't find the extended state information in the
			 * memory layout. Restore just the FP/SSE and init all
			 * the other extended state.
			 */
			state_size = sizeof(struct i387_fxsave_struct);
			fx_only = 1;
		} else {
			state_size = fx_sw_user.xstate_size;
430
			xfeatures = fx_sw_user.xfeatures;
431 432 433 434 435 436 437 438 439
		}
	}

	if (ia32_fxstate) {
		/*
		 * For 32-bit frames with fxstate, copy the user state to the
		 * thread's fpu state, reconstruct fxstate from the fsave
		 * header. Sanitize the copied state etc.
		 */
440
		struct fpu *fpu = &tsk->thread.fpu;
441
		struct user_i387_ia32_struct env;
442
		int err = 0;
443

444
		/*
445
		 * Drop the current fpu which clears fpu->fpstate_active. This ensures
446 447 448 449
		 * that any context-switch during the copy of the new state,
		 * avoids the intermediate state from getting restored/saved.
		 * Thus avoiding the new restored state from getting corrupted.
		 * We will be ready to restore/save the state only after
450
		 * fpu->fpstate_active is again set.
451
		 */
452
		drop_fpu(fpu);
453

454
		if (__copy_from_user(&fpu->state.xsave, buf_fx, state_size) ||
455
		    __copy_from_user(&env, buf, sizeof(env))) {
456
			fpstate_init(fpu);
457 458
			err = -1;
		} else {
459
			sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
460
		}
461

462
		fpu->fpstate_active = 1;
463 464
		if (use_eager_fpu()) {
			preempt_disable();
465
			fpu__restore();
466 467
			preempt_enable();
		}
468 469

		return err;
470
	} else {
471
		/*
472 473
		 * For 64-bit frames and 32-bit fsave frames, restore the user
		 * state to the registers directly (with exceptions handled).
474
		 */
475
		user_fpu_begin();
476
		if (restore_user_xstate(buf_fx, xfeatures, fx_only)) {
477
			fpu_reset_state(fpu);
478 479
			return -1;
		}
480
	}
481 482

	return 0;
483 484
}

485 486 487 488 489 490 491
/*
 * Prepare the SW reserved portion of the fxsave memory layout, indicating
 * the presence of the extended state information in the memory layout
 * pointed by the fpstate pointer in the sigcontext.
 * This will be saved when ever the FP and extended state context is
 * saved on the user stack during the signal handler delivery to the user.
 */
R
roel kluin 已提交
492
static void prepare_fx_sw_frame(void)
493
{
494 495
	int fsave_header_size = sizeof(struct i387_fsave_struct);
	int size = xstate_size + FP_XSTATE_MAGIC2_SIZE;
496

497 498
	if (config_enabled(CONFIG_X86_32))
		size += fsave_header_size;
499 500

	fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
501
	fx_sw_reserved.extended_size = size;
502
	fx_sw_reserved.xfeatures = xfeatures_mask;
503 504
	fx_sw_reserved.xstate_size = xstate_size;

505 506 507 508 509
	if (config_enabled(CONFIG_IA32_EMULATION)) {
		fx_sw_reserved_ia32 = fx_sw_reserved;
		fx_sw_reserved_ia32.extended_size += fsave_header_size;
	}
}
510

511
/*
512 513
 * Enable the extended processor state save/restore feature.
 * Called once per CPU onlining.
514
 */
515
void fpu__init_cpu_xstate(void)
516
{
517
	if (!cpu_has_xsave || !xfeatures_mask)
518 519
		return;

A
Andy Lutomirski 已提交
520
	cr4_set_bits(X86_CR4_OSXSAVE);
521
	xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask);
522 523
}

524 525 526 527
/*
 * Record the offsets and sizes of different state managed by the xsave
 * memory layout.
 */
528
static void __init setup_xstate_features(void)
529 530 531
{
	int eax, ebx, ecx, edx, leaf = 0x2;

532
	xfeatures_nr = fls64(xfeatures_mask);
533 534

	do {
535
		cpuid_count(XSTATE_CPUID, leaf, &eax, &ebx, &ecx, &edx);
536 537 538 539 540 541 542 543 544 545 546

		if (eax == 0)
			break;

		xstate_offsets[leaf] = ebx;
		xstate_sizes[leaf] = eax;

		leaf++;
	} while (1);
}

547
static void print_xstate_feature(u64 xstate_mask)
548
{
549
	const char *feature_name;
550

551 552
	if (cpu_has_xfeatures(xstate_mask, &feature_name))
		pr_info("x86/fpu: Supporting XSAVE feature 0x%02Lx: '%s'\n", xstate_mask, feature_name);
553 554 555 556 557 558 559
}

/*
 * Print out all the supported xstate features:
 */
static void print_xstate_features(void)
{
560 561 562 563 564 565 566 567
	print_xstate_feature(XSTATE_FP);
	print_xstate_feature(XSTATE_SSE);
	print_xstate_feature(XSTATE_YMM);
	print_xstate_feature(XSTATE_BNDREGS);
	print_xstate_feature(XSTATE_BNDCSR);
	print_xstate_feature(XSTATE_OPMASK);
	print_xstate_feature(XSTATE_ZMM_Hi256);
	print_xstate_feature(XSTATE_Hi16_ZMM);
568 569
}

570 571 572 573 574 575 576 577 578 579
/*
 * This function sets up offsets and sizes of all extended states in
 * xsave area. This supports both standard format and compacted format
 * of the xsave aread.
 *
 * Input: void
 * Output: void
 */
void setup_xstate_comp(void)
{
580
	unsigned int xstate_comp_sizes[sizeof(xfeatures_mask)*8];
581 582
	int i;

583 584 585 586 587 588 589
	/*
	 * The FP xstates and SSE xstates are legacy states. They are always
	 * in the fixed offsets in the xsave area in either compacted form
	 * or standard form.
	 */
	xstate_comp_offsets[0] = 0;
	xstate_comp_offsets[1] = offsetof(struct i387_fxsave_struct, xmm_space);
590 591

	if (!cpu_has_xsaves) {
592
		for (i = 2; i < xfeatures_nr; i++) {
593
			if (test_bit(i, (unsigned long *)&xfeatures_mask)) {
594 595 596 597 598 599 600 601 602
				xstate_comp_offsets[i] = xstate_offsets[i];
				xstate_comp_sizes[i] = xstate_sizes[i];
			}
		}
		return;
	}

	xstate_comp_offsets[2] = FXSAVE_SIZE + XSAVE_HDR_SIZE;

603
	for (i = 2; i < xfeatures_nr; i++) {
604
		if (test_bit(i, (unsigned long *)&xfeatures_mask))
605 606 607 608 609 610 611 612 613 614 615
			xstate_comp_sizes[i] = xstate_sizes[i];
		else
			xstate_comp_sizes[i] = 0;

		if (i > 2)
			xstate_comp_offsets[i] = xstate_comp_offsets[i-1]
					+ xstate_comp_sizes[i-1];

	}
}

616 617 618
/*
 * setup the xstate image representing the init state
 */
619
static void setup_init_fpu_buf(void)
620
{
621 622 623 624 625 626
	static int on_boot_cpu = 1;

	if (!on_boot_cpu)
		return;
	on_boot_cpu = 0;

627 628 629 630
	if (!cpu_has_xsave)
		return;

	setup_xstate_features();
631
	print_xstate_features();
632

633
	if (cpu_has_xsaves) {
634 635
		init_xstate_ctx.header.xcomp_bv = (u64)1 << 63 | xfeatures_mask;
		init_xstate_ctx.header.xfeatures = xfeatures_mask;
636 637
	}

638 639 640
	/*
	 * Init all the features state with header_bv being 0x0
	 */
641
	xrstor_state_booting(&init_xstate_ctx, -1);
642

643 644 645 646
	/*
	 * Dump the init state again. This is to identify the init state
	 * of any feature which is not represented by all zero's.
	 */
647
	xsave_state_booting(&init_xstate_ctx);
648 649
}

F
Fenghua Yu 已提交
650
/*
651
 * Calculate total size of enabled xstates in XCR0/xfeatures_mask.
F
Fenghua Yu 已提交
652 653 654 655 656 657 658 659 660 661 662 663 664 665
 */
static void __init init_xstate_size(void)
{
	unsigned int eax, ebx, ecx, edx;
	int i;

	if (!cpu_has_xsaves) {
		cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
		xstate_size = ebx;
		return;
	}

	xstate_size = FXSAVE_SIZE + XSAVE_HDR_SIZE;
	for (i = 2; i < 64; i++) {
666
		if (test_bit(i, (unsigned long *)&xfeatures_mask)) {
F
Fenghua Yu 已提交
667 668 669 670 671 672
			cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
			xstate_size += eax;
		}
	}
}

673 674
/*
 * Enable and initialize the xsave feature.
675
 * Called once per system bootup.
676
 *
I
Ingo Molnar 已提交
677
 * ( Not marked __init because of false positive section warnings. )
678
 */
679
void fpu__init_system_xstate(void)
680 681
{
	unsigned int eax, ebx, ecx, edx;
682 683 684 685 686
	static bool on_boot_cpu = 1;

	if (!on_boot_cpu)
		return;
	on_boot_cpu = 0;
687

688 689 690 691 692
	if (!cpu_has_xsave) {
		pr_info("x86/fpu: Legacy x87 FPU detected.\n");
		return;
	}

693
	if (boot_cpu_data.cpuid_level < XSTATE_CPUID) {
694
		WARN(1, "x86/fpu: XSTATE_CPUID missing!\n");
695 696 697 698
		return;
	}

	cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
699
	xfeatures_mask = eax + ((u64)edx << 32);
700

701 702
	if ((xfeatures_mask & XSTATE_FPSSE) != XSTATE_FPSSE) {
		pr_err("x86/fpu: FP/SSE not present amongst the CPU's xstate features: 0x%llx.\n", xfeatures_mask);
703 704 705 706
		BUG();
	}

	/*
707
	 * Support only the state known to OS.
708
	 */
709
	xfeatures_mask = xfeatures_mask & XCNTXT_MASK;
710

711 712
	/* Enable xstate instructions to be able to continue with initialization: */
	fpu__init_cpu_xstate();
713 714 715 716

	/*
	 * Recompute the context size for enabled features
	 */
F
Fenghua Yu 已提交
717
	init_xstate_size();
718

719
	update_regset_xstate_info(xstate_size, xfeatures_mask);
720
	prepare_fx_sw_frame();
721
	setup_init_fpu_buf();
722

723
	pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is 0x%x bytes, using '%s' format.\n",
724
		xfeatures_mask,
725 726
		xstate_size,
		cpu_has_xsaves ? "compacted" : "standard");
727
}
728

729 730 731 732 733 734 735 736 737 738 739 740
/*
 * Restore minimal FPU state after suspend:
 */
void fpu__resume_cpu(void)
{
	/*
	 * Restore XCR0 on xsave capable CPUs:
	 */
	if (cpu_has_xsave)
		xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask);
}

741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757
/*
 * Given the xsave area and a state inside, this function returns the
 * address of the state.
 *
 * This is the API that is called to get xstate address in either
 * standard format or compacted format of xsave area.
 *
 * Inputs:
 *	xsave: base address of the xsave area;
 *	xstate: state which is defined in xsave.h (e.g. XSTATE_FP, XSTATE_SSE,
 *	etc.)
 * Output:
 *	address of the state in the xsave area.
 */
void *get_xsave_addr(struct xsave_struct *xsave, int xstate)
{
	int feature = fls64(xstate) - 1;
758
	if (!test_bit(feature, (unsigned long *)&xfeatures_mask))
759 760 761 762
		return NULL;

	return (void *)xsave + xstate_comp_offsets[feature];
}
P
Paolo Bonzini 已提交
763
EXPORT_SYMBOL_GPL(get_xsave_addr);