xstate.c 20.4 KB
Newer Older
1 2 3 4 5 6
/*
 * xsave/xrstor support.
 *
 * Author: Suresh Siddha <suresh.b.siddha@intel.com>
 */
#include <linux/compat.h>
F
Fenghua Yu 已提交
7
#include <linux/cpu.h>
8
#include <asm/fpu/api.h>
9
#include <asm/fpu/internal.h>
10
#include <asm/fpu/signal.h>
11
#include <asm/sigframe.h>
A
Andy Lutomirski 已提交
12
#include <asm/tlbflush.h>
13

14 15 16 17 18 19 20 21 22 23 24 25 26
static const char *xfeature_names[] =
{
	"x87 floating point registers"	,
	"SSE registers"			,
	"AVX registers"			,
	"MPX bounds registers"		,
	"MPX CSR"			,
	"AVX-512 opmask"		,
	"AVX-512 Hi256"			,
	"AVX-512 ZMM_Hi256"		,
	"unknown xstate feature"	,
};

27
/*
28
 * Mask of xstate features supported by the CPU and the kernel:
29
 */
30
u64 xfeatures_mask __read_mostly;
31

32 33 34
/*
 * Represents init state for the supported extended state.
 */
35
struct xsave_struct init_xstate_ctx;
36

37
static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32;
38
static unsigned int xstate_offsets[XFEATURES_NR_MAX], xstate_sizes[XFEATURES_NR_MAX];
39
static unsigned int xstate_comp_offsets[sizeof(xfeatures_mask)*8];
40 41 42

/* The number of supported xfeatures in xfeatures_mask: */
static unsigned int xfeatures_nr;
43

44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
/*
 * Return whether the system supports a given xfeature.
 *
 * Also return the name of the (most advanced) feature that the caller requested:
 */
int cpu_has_xfeatures(u64 xfeatures_needed, const char **feature_name)
{
	u64 xfeatures_missing = xfeatures_needed & ~xfeatures_mask;

	if (unlikely(feature_name)) {
		long xfeature_idx, max_idx;
		u64 xfeatures_print;
		/*
		 * So we use FLS here to be able to print the most advanced
		 * feature that was requested but is missing. So if a driver
		 * asks about "XSTATE_SSE | XSTATE_YMM" we'll print the
		 * missing AVX feature - this is the most informative message
		 * to users:
		 */
		if (xfeatures_missing)
			xfeatures_print = xfeatures_missing;
		else
			xfeatures_print = xfeatures_needed;

		xfeature_idx = fls64(xfeatures_print)-1;
		max_idx = ARRAY_SIZE(xfeature_names)-1;
		xfeature_idx = min(xfeature_idx, max_idx);

		*feature_name = xfeature_names[xfeature_idx];
	}

	if (xfeatures_missing)
		return 0;

	return 1;
}
EXPORT_SYMBOL_GPL(cpu_has_xfeatures);

82
/*
83 84 85 86 87 88 89 90 91 92 93 94
 * When executing XSAVEOPT (optimized XSAVE), if a processor implementation
 * detects that an FPU state component is still (or is again) in its
 * initialized state, it may clear the corresponding bit in the header.xfeatures
 * field, and can skip the writeout of registers to the corresponding memory layout.
 *
 * This means that when the bit is zero, the state component might still contain
 * some previous - non-initialized register state.
 *
 * Before writing xstate information to user-space we sanitize those components,
 * to always ensure that the memory layout of a feature will be in the init state
 * if the corresponding header bit is zero. This is to ensure that user-space doesn't
 * see some stale state in the memory layout during signal handling, debugging etc.
95
 */
96
void fpstate_sanitize_xstate(struct fpu *fpu)
97
{
98
	struct i387_fxsave_struct *fx = &fpu->state.fxsave;
99
	int feature_bit;
100
	u64 xfeatures;
101

102
	if (!use_xsaveopt())
103 104
		return;

105
	xfeatures = fpu->state.xsave.header.xfeatures;
106 107 108

	/*
	 * None of the feature bits are in init state. So nothing else
L
Lucas De Marchi 已提交
109
	 * to do for us, as the memory layout is up to date.
110
	 */
111
	if ((xfeatures & xfeatures_mask) == xfeatures_mask)
112 113 114 115 116
		return;

	/*
	 * FP is in init state
	 */
117
	if (!(xfeatures & XSTATE_FP)) {
118 119 120 121 122 123 124 125 126 127 128 129
		fx->cwd = 0x37f;
		fx->swd = 0;
		fx->twd = 0;
		fx->fop = 0;
		fx->rip = 0;
		fx->rdp = 0;
		memset(&fx->st_space[0], 0, 128);
	}

	/*
	 * SSE is in init state
	 */
130
	if (!(xfeatures & XSTATE_SSE))
131 132
		memset(&fx->xmm_space[0], 0, 256);

133 134 135 136 137
	/*
	 * First two features are FPU and SSE, which above we handled
	 * in a special way already:
	 */
	feature_bit = 0x2;
138
	xfeatures = (xfeatures_mask & ~xfeatures) >> 2;
139 140

	/*
141 142 143
	 * Update all the remaining memory layouts according to their
	 * standard xstate layout, if their header bit is in the init
	 * state:
144
	 */
145 146
	while (xfeatures) {
		if (xfeatures & 0x1) {
147 148 149
			int offset = xstate_offsets[feature_bit];
			int size = xstate_sizes[feature_bit];

150
			memcpy((void *)fx + offset,
151
			       (void *)&init_xstate_ctx + offset,
152 153 154
			       size);
		}

155
		xfeatures >>= 1;
156 157 158 159
		feature_bit++;
	}
}

160 161 162 163
/*
 * Check for the presence of extended state information in the
 * user fpstate pointer in the sigcontext.
 */
164 165 166
static inline int check_for_xstate(struct i387_fxsave_struct __user *buf,
				   void __user *fpstate,
				   struct _fpx_sw_bytes *fx_sw)
167 168
{
	int min_xstate_size = sizeof(struct i387_fxsave_struct) +
169
			      sizeof(struct xstate_header);
170 171
	unsigned int magic2;

172 173
	if (__copy_from_user(fx_sw, &buf->sw_reserved[0], sizeof(*fx_sw)))
		return -1;
174

175 176 177 178 179 180
	/* Check for the first magic field and other error scenarios. */
	if (fx_sw->magic1 != FP_XSTATE_MAGIC1 ||
	    fx_sw->xstate_size < min_xstate_size ||
	    fx_sw->xstate_size > xstate_size ||
	    fx_sw->xstate_size > fx_sw->extended_size)
		return -1;
181 182 183 184 185 186 187

	/*
	 * Check for the presence of second magic word at the end of memory
	 * layout. This detects the case where the user just copied the legacy
	 * fpstate layout with out copying the extended state information
	 * in the memory layout.
	 */
188 189 190
	if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size))
	    || magic2 != FP_XSTATE_MAGIC2)
		return -1;
191 192 193 194

	return 0;
}

195 196 197
/*
 * Signal frame handlers.
 */
198 199 200
static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
{
	if (use_fxsr()) {
201
		struct xsave_struct *xsave = &tsk->thread.fpu.state.xsave;
202 203
		struct user_i387_ia32_struct env;
		struct _fpstate_ia32 __user *fp = buf;
204

205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
		convert_from_fxsr(&env, tsk);

		if (__copy_to_user(buf, &env, sizeof(env)) ||
		    __put_user(xsave->i387.swd, &fp->status) ||
		    __put_user(X86_FXSR_MAGIC, &fp->magic))
			return -1;
	} else {
		struct i387_fsave_struct __user *fp = buf;
		u32 swd;
		if (__get_user(swd, &fp->swd) || __put_user(swd, &fp->status))
			return -1;
	}

	return 0;
}

static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
222
{
223 224
	struct xsave_struct __user *x = buf;
	struct _fpx_sw_bytes *sw_bytes;
225
	u32 xfeatures;
226
	int err;
227

228 229 230
	/* Setup the bytes not touched by the [f]xsave and reserved for SW. */
	sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
	err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
231

232 233
	if (!use_xsave())
		return err;
234

235
	err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
236

237
	/*
238
	 * Read the xfeatures which we copied (directly from the cpu or
239 240
	 * from the state in task struct) to the user buffers.
	 */
241
	err |= __get_user(xfeatures, (__u32 *)&x->header.xfeatures);
242

243 244 245 246 247
	/*
	 * For legacy compatible, we always set FP/SSE bits in the bit
	 * vector while saving the state to the user context. This will
	 * enable us capturing any changes(during sigreturn) to
	 * the FP/SSE bits by the legacy applications which don't touch
248
	 * xfeatures in the xsave header.
249
	 *
250
	 * xsave aware apps can change the xfeatures in the xsave
251 252 253
	 * header as well as change any contents in the memory layout.
	 * xrestore as part of sigreturn will capture all the changes.
	 */
254
	xfeatures |= XSTATE_FPSSE;
255

256
	err |= __put_user(xfeatures, (__u32 *)&x->header.xfeatures);
257 258 259 260

	return err;
}

261
static inline int copy_fpregs_to_sigframe(struct xsave_struct __user *buf)
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
{
	int err;

	if (use_xsave())
		err = xsave_user(buf);
	else if (use_fxsr())
		err = fxsave_user((struct i387_fxsave_struct __user *) buf);
	else
		err = fsave_user((struct i387_fsave_struct __user *) buf);

	if (unlikely(err) && __clear_user(buf, xstate_size))
		err = -EFAULT;
	return err;
}

/*
 * Save the fpu, extended register state to the user signal frame.
 *
 * 'buf_fx' is the 64-byte aligned pointer at which the [f|fx|x]save
 *  state is copied.
 *  'buf' points to the 'buf_fx' or to the fsave header followed by 'buf_fx'.
 *
 *	buf == buf_fx for 64-bit frames and 32-bit fsave frame.
 *	buf != buf_fx for 32-bit frames with fxstate.
 *
 * If the fpu, extended register state is live, save the state directly
 * to the user frame pointed by the aligned pointer 'buf_fx'. Otherwise,
 * copy the thread's fpu state to the user frame starting at 'buf_fx'.
 *
 * If this is a 32-bit frame with fxstate, put a fsave header before
 * the aligned state at 'buf_fx'.
 *
 * For [f]xsave state, update the SW reserved fields in the [f]xsave frame
 * indicating the absence/presence of the extended state to the user.
 */
297
int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
298
{
299
	struct xsave_struct *xsave = &current->thread.fpu.state.xsave;
300 301 302 303 304 305 306 307 308
	struct task_struct *tsk = current;
	int ia32_fxstate = (buf != buf_fx);

	ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
			 config_enabled(CONFIG_IA32_EMULATION));

	if (!access_ok(VERIFY_WRITE, buf, size))
		return -EACCES;

309
	if (!static_cpu_has(X86_FEATURE_FPU))
310 311 312 313
		return fpregs_soft_get(current, NULL, 0,
			sizeof(struct user_i387_ia32_struct), NULL,
			(struct _fpstate_ia32 __user *) buf) ? -1 : 1;

314
	if (fpregs_active()) {
315
		/* Save the live register state to the user directly. */
316
		if (copy_fpregs_to_sigframe(buf_fx))
317 318 319 320
			return -1;
		/* Update the thread's fxstate to save the fsave header. */
		if (ia32_fxstate)
			fpu_fxsave(&tsk->thread.fpu);
321
	} else {
322
		fpstate_sanitize_xstate(&tsk->thread.fpu);
323
		if (__copy_to_user(buf_fx, xsave, xstate_size))
324 325
			return -1;
	}
326

327 328 329
	/* Save the fsave header for the 32-bit frames. */
	if ((ia32_fxstate || !use_fxsr()) && save_fsave_header(tsk, buf))
		return -1;
330

331 332 333 334 335
	if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate))
		return -1;

	return 0;
}
336

337 338 339
static inline void
sanitize_restored_xstate(struct task_struct *tsk,
			 struct user_i387_ia32_struct *ia32_env,
340
			 u64 xfeatures, int fx_only)
341
{
342
	struct xsave_struct *xsave = &tsk->thread.fpu.state.xsave;
343
	struct xstate_header *header = &xsave->header;
344

345 346
	if (use_xsave()) {
		/* These bits must be zero. */
347
		memset(header->reserved, 0, 48);
348 349

		/*
350 351
		 * Init the state that is not present in the memory
		 * layout and not enabled by the OS.
352
		 */
353
		if (fx_only)
354
			header->xfeatures = XSTATE_FPSSE;
355
		else
356
			header->xfeatures &= (xfeatures_mask & xfeatures);
357
	}
358

359
	if (use_fxsr()) {
360
		/*
361 362
		 * mscsr reserved bits must be masked to zero for security
		 * reasons.
363
		 */
364
		xsave->i387.mxcsr &= mxcsr_feature_mask;
365

366
		convert_to_fxsr(tsk, ia32_env);
367
	}
368 369
}

370
/*
371
 * Restore the extended state if present. Otherwise, restore the FP/SSE state.
372
 */
373
static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
374
{
375 376
	if (use_xsave()) {
		if ((unsigned long)buf % 64 || fx_only) {
377
			u64 init_bv = xfeatures_mask & ~XSTATE_FPSSE;
378
			xrstor_state(&init_xstate_ctx, init_bv);
379
			return fxrstor_user(buf);
380
		} else {
381
			u64 init_bv = xfeatures_mask & ~xbv;
382
			if (unlikely(init_bv))
383
				xrstor_state(&init_xstate_ctx, init_bv);
384 385 386
			return xrestore_user(buf, xbv);
		}
	} else if (use_fxsr()) {
387
		return fxrstor_user(buf);
388
	} else
389
		return frstor_user(buf);
390 391
}

392
static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
393
{
394
	int ia32_fxstate = (buf != buf_fx);
395
	struct task_struct *tsk = current;
396
	struct fpu *fpu = &tsk->thread.fpu;
397
	int state_size = xstate_size;
398
	u64 xfeatures = 0;
399 400 401 402
	int fx_only = 0;

	ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
			 config_enabled(CONFIG_IA32_EMULATION));
403 404

	if (!buf) {
405
		fpu__clear(fpu);
406
		return 0;
407 408 409 410 411
	}

	if (!access_ok(VERIFY_READ, buf, size))
		return -EACCES;

412
	fpu__activate_curr(fpu);
413

414
	if (!static_cpu_has(X86_FEATURE_FPU))
415 416 417
		return fpregs_soft_set(current, NULL,
				       0, sizeof(struct user_i387_ia32_struct),
				       NULL, buf) != 0;
418

419 420 421 422 423 424 425 426 427 428 429 430
	if (use_xsave()) {
		struct _fpx_sw_bytes fx_sw_user;
		if (unlikely(check_for_xstate(buf_fx, buf_fx, &fx_sw_user))) {
			/*
			 * Couldn't find the extended state information in the
			 * memory layout. Restore just the FP/SSE and init all
			 * the other extended state.
			 */
			state_size = sizeof(struct i387_fxsave_struct);
			fx_only = 1;
		} else {
			state_size = fx_sw_user.xstate_size;
431
			xfeatures = fx_sw_user.xfeatures;
432 433 434 435 436 437 438 439 440
		}
	}

	if (ia32_fxstate) {
		/*
		 * For 32-bit frames with fxstate, copy the user state to the
		 * thread's fpu state, reconstruct fxstate from the fsave
		 * header. Sanitize the copied state etc.
		 */
441
		struct fpu *fpu = &tsk->thread.fpu;
442
		struct user_i387_ia32_struct env;
443
		int err = 0;
444

445
		/*
446
		 * Drop the current fpu which clears fpu->fpstate_active. This ensures
447 448 449 450
		 * that any context-switch during the copy of the new state,
		 * avoids the intermediate state from getting restored/saved.
		 * Thus avoiding the new restored state from getting corrupted.
		 * We will be ready to restore/save the state only after
451
		 * fpu->fpstate_active is again set.
452
		 */
453
		fpu__drop(fpu);
454

455
		if (__copy_from_user(&fpu->state.xsave, buf_fx, state_size) ||
456
		    __copy_from_user(&env, buf, sizeof(env))) {
457
			fpstate_init(fpu);
458 459
			err = -1;
		} else {
460
			sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
461
		}
462

463
		fpu->fpstate_active = 1;
464 465
		if (use_eager_fpu()) {
			preempt_disable();
466
			fpu__restore();
467 468
			preempt_enable();
		}
469 470

		return err;
471
	} else {
472
		/*
473 474
		 * For 64-bit frames and 32-bit fsave frames, restore the user
		 * state to the registers directly (with exceptions handled).
475
		 */
476
		user_fpu_begin();
477
		if (restore_user_xstate(buf_fx, xfeatures, fx_only)) {
478
			fpu__clear(fpu);
479 480
			return -1;
		}
481
	}
482 483

	return 0;
484 485
}

486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522
static inline int xstate_sigframe_size(void)
{
	return use_xsave() ? xstate_size + FP_XSTATE_MAGIC2_SIZE : xstate_size;
}

/*
 * Restore FPU state from a sigframe:
 */
int fpu__restore_sig(void __user *buf, int ia32_frame)
{
	void __user *buf_fx = buf;
	int size = xstate_sigframe_size();

	if (ia32_frame && use_fxsr()) {
		buf_fx = buf + sizeof(struct i387_fsave_struct);
		size += sizeof(struct i387_fsave_struct);
	}

	return __fpu__restore_sig(buf, buf_fx, size);
}

unsigned long
fpu__alloc_mathframe(unsigned long sp, int ia32_frame,
		     unsigned long *buf_fx, unsigned long *size)
{
	unsigned long frame_size = xstate_sigframe_size();

	*buf_fx = sp = round_down(sp - frame_size, 64);
	if (ia32_frame && use_fxsr()) {
		frame_size += sizeof(struct i387_fsave_struct);
		sp -= sizeof(struct i387_fsave_struct);
	}

	*size = frame_size;

	return sp;
}
523 524 525 526 527 528 529
/*
 * Prepare the SW reserved portion of the fxsave memory layout, indicating
 * the presence of the extended state information in the memory layout
 * pointed by the fpstate pointer in the sigcontext.
 * This will be saved when ever the FP and extended state context is
 * saved on the user stack during the signal handler delivery to the user.
 */
R
roel kluin 已提交
530
static void prepare_fx_sw_frame(void)
531
{
532 533
	int fsave_header_size = sizeof(struct i387_fsave_struct);
	int size = xstate_size + FP_XSTATE_MAGIC2_SIZE;
534

535 536
	if (config_enabled(CONFIG_X86_32))
		size += fsave_header_size;
537 538

	fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
539
	fx_sw_reserved.extended_size = size;
540
	fx_sw_reserved.xfeatures = xfeatures_mask;
541 542
	fx_sw_reserved.xstate_size = xstate_size;

543 544 545 546 547
	if (config_enabled(CONFIG_IA32_EMULATION)) {
		fx_sw_reserved_ia32 = fx_sw_reserved;
		fx_sw_reserved_ia32.extended_size += fsave_header_size;
	}
}
548

549
/*
550 551
 * Enable the extended processor state save/restore feature.
 * Called once per CPU onlining.
552
 */
553
void fpu__init_cpu_xstate(void)
554
{
555
	if (!cpu_has_xsave || !xfeatures_mask)
556 557
		return;

A
Andy Lutomirski 已提交
558
	cr4_set_bits(X86_CR4_OSXSAVE);
559
	xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask);
560 561
}

562 563 564 565
/*
 * Record the offsets and sizes of different state managed by the xsave
 * memory layout.
 */
566
static void __init setup_xstate_features(void)
567 568 569
{
	int eax, ebx, ecx, edx, leaf = 0x2;

570
	xfeatures_nr = fls64(xfeatures_mask);
571 572

	do {
573
		cpuid_count(XSTATE_CPUID, leaf, &eax, &ebx, &ecx, &edx);
574 575 576 577 578 579 580 581 582 583 584

		if (eax == 0)
			break;

		xstate_offsets[leaf] = ebx;
		xstate_sizes[leaf] = eax;

		leaf++;
	} while (1);
}

585
static void print_xstate_feature(u64 xstate_mask)
586
{
587
	const char *feature_name;
588

589 590
	if (cpu_has_xfeatures(xstate_mask, &feature_name))
		pr_info("x86/fpu: Supporting XSAVE feature 0x%02Lx: '%s'\n", xstate_mask, feature_name);
591 592 593 594 595 596 597
}

/*
 * Print out all the supported xstate features:
 */
static void print_xstate_features(void)
{
598 599 600 601 602 603 604 605
	print_xstate_feature(XSTATE_FP);
	print_xstate_feature(XSTATE_SSE);
	print_xstate_feature(XSTATE_YMM);
	print_xstate_feature(XSTATE_BNDREGS);
	print_xstate_feature(XSTATE_BNDCSR);
	print_xstate_feature(XSTATE_OPMASK);
	print_xstate_feature(XSTATE_ZMM_Hi256);
	print_xstate_feature(XSTATE_Hi16_ZMM);
606 607
}

608 609 610 611 612 613 614 615 616 617
/*
 * This function sets up offsets and sizes of all extended states in
 * xsave area. This supports both standard format and compacted format
 * of the xsave aread.
 *
 * Input: void
 * Output: void
 */
void setup_xstate_comp(void)
{
618
	unsigned int xstate_comp_sizes[sizeof(xfeatures_mask)*8];
619 620
	int i;

621 622 623 624 625 626 627
	/*
	 * The FP xstates and SSE xstates are legacy states. They are always
	 * in the fixed offsets in the xsave area in either compacted form
	 * or standard form.
	 */
	xstate_comp_offsets[0] = 0;
	xstate_comp_offsets[1] = offsetof(struct i387_fxsave_struct, xmm_space);
628 629

	if (!cpu_has_xsaves) {
630
		for (i = 2; i < xfeatures_nr; i++) {
631
			if (test_bit(i, (unsigned long *)&xfeatures_mask)) {
632 633 634 635 636 637 638 639 640
				xstate_comp_offsets[i] = xstate_offsets[i];
				xstate_comp_sizes[i] = xstate_sizes[i];
			}
		}
		return;
	}

	xstate_comp_offsets[2] = FXSAVE_SIZE + XSAVE_HDR_SIZE;

641
	for (i = 2; i < xfeatures_nr; i++) {
642
		if (test_bit(i, (unsigned long *)&xfeatures_mask))
643 644 645 646 647 648 649 650 651 652 653
			xstate_comp_sizes[i] = xstate_sizes[i];
		else
			xstate_comp_sizes[i] = 0;

		if (i > 2)
			xstate_comp_offsets[i] = xstate_comp_offsets[i-1]
					+ xstate_comp_sizes[i-1];

	}
}

654 655 656
/*
 * setup the xstate image representing the init state
 */
657
static void setup_init_fpu_buf(void)
658
{
659 660 661 662 663 664
	static int on_boot_cpu = 1;

	if (!on_boot_cpu)
		return;
	on_boot_cpu = 0;

665 666 667 668
	if (!cpu_has_xsave)
		return;

	setup_xstate_features();
669
	print_xstate_features();
670

671
	if (cpu_has_xsaves) {
672 673
		init_xstate_ctx.header.xcomp_bv = (u64)1 << 63 | xfeatures_mask;
		init_xstate_ctx.header.xfeatures = xfeatures_mask;
674 675
	}

676 677 678
	/*
	 * Init all the features state with header_bv being 0x0
	 */
679
	xrstor_state_booting(&init_xstate_ctx, -1);
680

681 682 683 684
	/*
	 * Dump the init state again. This is to identify the init state
	 * of any feature which is not represented by all zero's.
	 */
685
	xsave_state_booting(&init_xstate_ctx);
686 687
}

F
Fenghua Yu 已提交
688
/*
689
 * Calculate total size of enabled xstates in XCR0/xfeatures_mask.
F
Fenghua Yu 已提交
690 691 692 693 694 695 696 697 698 699 700 701 702 703
 */
static void __init init_xstate_size(void)
{
	unsigned int eax, ebx, ecx, edx;
	int i;

	if (!cpu_has_xsaves) {
		cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
		xstate_size = ebx;
		return;
	}

	xstate_size = FXSAVE_SIZE + XSAVE_HDR_SIZE;
	for (i = 2; i < 64; i++) {
704
		if (test_bit(i, (unsigned long *)&xfeatures_mask)) {
F
Fenghua Yu 已提交
705 706 707 708 709 710
			cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
			xstate_size += eax;
		}
	}
}

711 712
/*
 * Enable and initialize the xsave feature.
713
 * Called once per system bootup.
714
 *
I
Ingo Molnar 已提交
715
 * ( Not marked __init because of false positive section warnings. )
716
 */
717
void fpu__init_system_xstate(void)
718 719
{
	unsigned int eax, ebx, ecx, edx;
720 721 722 723 724
	static bool on_boot_cpu = 1;

	if (!on_boot_cpu)
		return;
	on_boot_cpu = 0;
725

726 727 728 729 730
	if (!cpu_has_xsave) {
		pr_info("x86/fpu: Legacy x87 FPU detected.\n");
		return;
	}

731
	if (boot_cpu_data.cpuid_level < XSTATE_CPUID) {
732
		WARN(1, "x86/fpu: XSTATE_CPUID missing!\n");
733 734 735 736
		return;
	}

	cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
737
	xfeatures_mask = eax + ((u64)edx << 32);
738

739 740
	if ((xfeatures_mask & XSTATE_FPSSE) != XSTATE_FPSSE) {
		pr_err("x86/fpu: FP/SSE not present amongst the CPU's xstate features: 0x%llx.\n", xfeatures_mask);
741 742 743 744
		BUG();
	}

	/*
745
	 * Support only the state known to OS.
746
	 */
747
	xfeatures_mask = xfeatures_mask & XCNTXT_MASK;
748

749 750
	/* Enable xstate instructions to be able to continue with initialization: */
	fpu__init_cpu_xstate();
751 752 753 754

	/*
	 * Recompute the context size for enabled features
	 */
F
Fenghua Yu 已提交
755
	init_xstate_size();
756

757
	update_regset_xstate_info(xstate_size, xfeatures_mask);
758
	prepare_fx_sw_frame();
759
	setup_init_fpu_buf();
760

761
	pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is 0x%x bytes, using '%s' format.\n",
762
		xfeatures_mask,
763 764
		xstate_size,
		cpu_has_xsaves ? "compacted" : "standard");
765
}
766

767 768 769 770 771 772 773 774 775 776 777 778
/*
 * Restore minimal FPU state after suspend:
 */
void fpu__resume_cpu(void)
{
	/*
	 * Restore XCR0 on xsave capable CPUs:
	 */
	if (cpu_has_xsave)
		xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask);
}

779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795
/*
 * Given the xsave area and a state inside, this function returns the
 * address of the state.
 *
 * This is the API that is called to get xstate address in either
 * standard format or compacted format of xsave area.
 *
 * Inputs:
 *	xsave: base address of the xsave area;
 *	xstate: state which is defined in xsave.h (e.g. XSTATE_FP, XSTATE_SSE,
 *	etc.)
 * Output:
 *	address of the state in the xsave area.
 */
void *get_xsave_addr(struct xsave_struct *xsave, int xstate)
{
	int feature = fls64(xstate) - 1;
796
	if (!test_bit(feature, (unsigned long *)&xfeatures_mask))
797 798 799 800
		return NULL;

	return (void *)xsave + xstate_comp_offsets[feature];
}
P
Paolo Bonzini 已提交
801
EXPORT_SYMBOL_GPL(get_xsave_addr);