core.c 20.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7
/*
 *  Copyright (C) 1994 Linus Torvalds
 *
 *  Pentium III FXSR, SSE support
 *  General FPU state handling cleanups
 *	Gareth Hughes <gareth@valinux.com>, May 2000
 */
8
#include <asm/fpu/internal.h>
9
#include <asm/fpu/regset.h>
10
#include <asm/fpu/signal.h>
11
#include <asm/traps.h>
12

13
#include <linux/hardirq.h>
L
Linus Torvalds 已提交
14

15 16 17 18 19 20
/*
 * Represents the initial FPU state. It's mostly (but not completely) zeroes,
 * depending on the FPU hardware format:
 */
union thread_xstate init_fpstate __read_mostly;

I
Ingo Molnar 已提交
21 22 23 24 25 26 27 28 29 30 31
/*
 * Track whether the kernel is using the FPU state
 * currently.
 *
 * This flag is used:
 *
 *   - by IRQ context code to potentially use the FPU
 *     if it's unused.
 *
 *   - to debug kernel_fpu_begin()/end() correctness
 */
32 33
static DEFINE_PER_CPU(bool, in_kernel_fpu);

34
/*
35
 * Track which context is using the FPU on the CPU:
36
 */
37
DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
38

39
static void kernel_fpu_disable(void)
40 41 42 43 44
{
	WARN_ON(this_cpu_read(in_kernel_fpu));
	this_cpu_write(in_kernel_fpu, true);
}

45
static void kernel_fpu_enable(void)
46
{
47
	WARN_ON_ONCE(!this_cpu_read(in_kernel_fpu));
48 49 50
	this_cpu_write(in_kernel_fpu, false);
}

I
Ingo Molnar 已提交
51 52 53 54 55
static bool kernel_fpu_disabled(void)
{
	return this_cpu_read(in_kernel_fpu);
}

56 57 58
/*
 * Were we in an interrupt that interrupted kernel mode?
 *
59
 * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
60 61 62 63
 * pair does nothing at all: the thread must not have fpu (so
 * that we don't try to save the FPU state), and TS must
 * be set (so that the clts/stts pair does nothing that is
 * visible in the interrupted kernel thread).
64
 *
65 66
 * Except for the eagerfpu case when we return true; in the likely case
 * the thread has FPU but we are not going to set/clear TS.
67
 */
68
static bool interrupted_kernel_fpu_idle(void)
69
{
I
Ingo Molnar 已提交
70
	if (kernel_fpu_disabled())
71 72
		return false;

73
	if (use_eager_fpu())
74
		return true;
75

76
	return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
77 78 79 80 81 82 83 84 85 86
}

/*
 * Were we in user mode (or vm86 mode) when we were
 * interrupted?
 *
 * Doing kernel_fpu_begin/end() is ok if we are running
 * in an interrupt context from user mode - we'll just
 * save the FPU state as required.
 */
87
static bool interrupted_user_mode(void)
88 89
{
	struct pt_regs *regs = get_irq_regs();
90
	return regs && user_mode(regs);
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
}

/*
 * Can we use the FPU in kernel mode with the
 * whole "kernel_fpu_begin/end()" sequence?
 *
 * It's always ok in process context (ie "not interrupt")
 * but it is sometimes ok even from an irq.
 */
bool irq_fpu_usable(void)
{
	return !in_interrupt() ||
		interrupted_user_mode() ||
		interrupted_kernel_fpu_idle();
}
EXPORT_SYMBOL(irq_fpu_usable);

108
void __kernel_fpu_begin(void)
109
{
110
	struct fpu *fpu = &current->thread.fpu;
111

112
	kernel_fpu_disable();
113

114
	if (fpu->fpregs_active) {
115
		copy_fpregs_to_fpstate(fpu);
116
	} else {
117
		this_cpu_write(fpu_fpregs_owner_ctx, NULL);
118
		__fpregs_activate_hw();
119 120
	}
}
121
EXPORT_SYMBOL(__kernel_fpu_begin);
122

123
void __kernel_fpu_end(void)
124
{
125
	struct fpu *fpu = &current->thread.fpu;
126

127
	if (fpu->fpregs_active) {
128
		if (WARN_ON(copy_fpstate_to_fpregs(fpu)))
129
			fpu__clear(fpu);
130 131
	} else {
		__fpregs_deactivate_hw();
132
	}
133

134
	kernel_fpu_enable();
135
}
136
EXPORT_SYMBOL(__kernel_fpu_end);
137

138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
void kernel_fpu_begin(void)
{
	preempt_disable();
	WARN_ON_ONCE(!irq_fpu_usable());
	__kernel_fpu_begin();
}
EXPORT_SYMBOL_GPL(kernel_fpu_begin);

void kernel_fpu_end(void)
{
	__kernel_fpu_end();
	preempt_enable();
}
EXPORT_SYMBOL_GPL(kernel_fpu_end);

153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
/*
 * CR0::TS save/restore functions:
 */
int irq_ts_save(void)
{
	/*
	 * If in process context and not atomic, we can take a spurious DNA fault.
	 * Otherwise, doing clts() in process context requires disabling preemption
	 * or some heavy lifting like kernel_fpu_begin()
	 */
	if (!in_atomic())
		return 0;

	if (read_cr0() & X86_CR0_TS) {
		clts();
		return 1;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(irq_ts_save);

void irq_ts_restore(int TS_state)
{
	if (TS_state)
		stts();
}
EXPORT_SYMBOL_GPL(irq_ts_restore);

182
/*
I
Ingo Molnar 已提交
183
 * Save the FPU state (mark it for reload if necessary):
184 185
 *
 * This only ever gets called for the current task.
186
 */
187
void fpu__save(struct fpu *fpu)
188
{
189
	WARN_ON(fpu != &current->thread.fpu);
190

191
	preempt_disable();
192
	if (fpu->fpregs_active) {
I
Ingo Molnar 已提交
193
		if (!copy_fpregs_to_fpstate(fpu))
194
			fpregs_deactivate(fpu);
195
	}
196 197
	preempt_enable();
}
198
EXPORT_SYMBOL_GPL(fpu__save);
199

200 201 202 203 204 205 206 207 208 209 210
/*
 * Legacy x87 fpstate state init:
 */
static inline void fpstate_init_fstate(struct i387_fsave_struct *fp)
{
	fp->cwd = 0xffff037fu;
	fp->swd = 0xffff0000u;
	fp->twd = 0xffffffffu;
	fp->fos = 0xffff0000u;
}

211
void fpstate_init(union thread_xstate *state)
L
Linus Torvalds 已提交
212
{
213
	if (!cpu_has_fpu) {
214
		fpstate_init_soft(&state->soft);
215
		return;
216 217
	}

218
	memset(state, 0, xstate_size);
219

220
	if (cpu_has_fxsr)
221
		fpstate_init_fxstate(&state->fxsave);
222
	else
223
		fpstate_init_fstate(&state->fsave);
224
}
225
EXPORT_SYMBOL_GPL(fpstate_init);
226

227 228 229 230 231 232 233 234
/*
 * Copy the current task's FPU state to a new task's FPU context.
 *
 * In the 'eager' case we just save to the destination context.
 *
 * In the 'lazy' case we save to the source context, mark the FPU lazy
 * via stts() and copy the source context into the destination context.
 */
235
static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
236
{
237
	WARN_ON(src_fpu != &current->thread.fpu);
238

239 240 241 242 243
	/*
	 * Don't let 'init optimized' areas of the XSAVE area
	 * leak into the child task:
	 */
	if (use_eager_fpu())
244
		memset(&dst_fpu->state.xsave, 0, xstate_size);
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265

	/*
	 * Save current FPU registers directly into the child
	 * FPU context, without any memory-to-memory copying.
	 *
	 * If the FPU context got destroyed in the process (FNSAVE
	 * done on old CPUs) then copy it back into the source
	 * context and mark the current task for lazy restore.
	 *
	 * We have to do all this with preemption disabled,
	 * mostly because of the FNSAVE case, because in that
	 * case we must not allow preemption in the window
	 * between the FNSAVE and us marking the context lazy.
	 *
	 * It shouldn't be an issue as even FNSAVE is plenty
	 * fast in terms of critical section length.
	 */
	preempt_disable();
	if (!copy_fpregs_to_fpstate(dst_fpu)) {
		memcpy(&src_fpu->state, &dst_fpu->state, xstate_size);
		fpregs_deactivate(src_fpu);
266
	}
267
	preempt_enable();
268 269
}

270
int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
I
Ingo Molnar 已提交
271
{
272
	dst_fpu->counter = 0;
273
	dst_fpu->fpregs_active = 0;
274
	dst_fpu->last_cpu = -1;
I
Ingo Molnar 已提交
275

276
	if (src_fpu->fpstate_active)
277
		fpu_copy(dst_fpu, src_fpu);
278

I
Ingo Molnar 已提交
279 280 281
	return 0;
}

282
/*
283 284
 * Activate the current task's in-memory FPU context,
 * if it has not been used before:
285
 */
286
void fpu__activate_curr(struct fpu *fpu)
287
{
288
	WARN_ON_ONCE(fpu != &current->thread.fpu);
289

290
	if (!fpu->fpstate_active) {
291
		fpstate_init(&fpu->state);
292

293 294 295
		/* Safe to do for the current task: */
		fpu->fpstate_active = 1;
	}
296
}
297
EXPORT_SYMBOL_GPL(fpu__activate_curr);
298

299
/*
300 301
 * This function must be called before we modify a stopped child's
 * fpstate.
302 303
 *
 * If the child has not used the FPU before then initialize its
304
 * fpstate.
305 306 307
 *
 * If the child has used the FPU before then unlazy it.
 *
308 309 310
 * [ After this function call, after registers in the fpstate are
 *   modified and the child task has woken up, the child task will
 *   restore the modified FPU state from the modified context. If we
311
 *   didn't clear its lazy status here then the lazy in-registers
312
 *   state pending on its former CPU could be restored, corrupting
313 314 315
 *   the modifications. ]
 *
 * This function is also called before we read a stopped child's
316 317
 * FPU state - to make sure it's initialized if the child has
 * no active FPU state.
318 319 320 321
 *
 * TODO: A future optimization would be to skip the unlazying in
 *       the read-only case, it's not strictly necessary for
 *       read-only access to the context.
322
 */
323
static void fpu__activate_stopped(struct fpu *child_fpu)
324
{
325
	WARN_ON_ONCE(child_fpu == &current->thread.fpu);
326

327
	if (child_fpu->fpstate_active) {
328
		child_fpu->last_cpu = -1;
329
	} else {
330
		fpstate_init(&child_fpu->state);
331

332 333 334
		/* Safe to do for stopped child tasks: */
		child_fpu->fpstate_active = 1;
	}
L
Linus Torvalds 已提交
335 336
}

337
/*
338 339 340 341
 * 'fpu__restore()' is called to copy FPU registers from
 * the FPU fpstate to the live hw registers and to activate
 * access to the hardware registers, so that FPU instructions
 * can be used afterwards.
342
 *
343 344 345
 * Must be called with kernel preemption disabled (for example
 * with local interrupts disabled, as it is in the case of
 * do_device_not_available()).
346
 */
347
void fpu__restore(void)
348 349
{
	struct task_struct *tsk = current;
350
	struct fpu *fpu = &tsk->thread.fpu;
351

352
	fpu__activate_curr(fpu);
353

354
	/* Avoid __kernel_fpu_begin() right after fpregs_activate() */
355
	kernel_fpu_disable();
356
	fpregs_activate(fpu);
357
	if (unlikely(copy_fpstate_to_fpregs(fpu))) {
358
		fpu__clear(fpu);
359 360 361 362 363 364
		force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
	} else {
		tsk->thread.fpu.counter++;
	}
	kernel_fpu_enable();
}
365
EXPORT_SYMBOL_GPL(fpu__restore);
366

367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
/*
 * Drops current FPU state: deactivates the fpregs and
 * the fpstate. NOTE: it still leaves previous contents
 * in the fpregs in the eager-FPU case.
 *
 * This function can be used in cases where we know that
 * a state-restore is coming: either an explicit one,
 * or a reschedule.
 */
void fpu__drop(struct fpu *fpu)
{
	preempt_disable();
	fpu->counter = 0;

	if (fpu->fpregs_active) {
		/* Ignore delayed exceptions from user space */
		asm volatile("1: fwait\n"
			     "2:\n"
			     _ASM_EXTABLE(1b, 2b));
		fpregs_deactivate(fpu);
	}

	fpu->fpstate_active = 0;

	preempt_enable();
}

394 395 396 397 398 399 400
/*
 * Clear FPU registers by setting them up from
 * the init fpstate:
 */
static inline void copy_init_fpstate_to_fpregs(void)
{
	if (use_xsave())
401
		copy_kernel_to_xregs(&init_fpstate.xsave, -1);
402
	else
403
		copy_kernel_to_fxregs(&init_fpstate.fxsave);
404 405
}

406
/*
407 408 409 410
 * Clear the FPU state back to init state.
 *
 * Called by sys_execve(), by the signal handler code and by various
 * error paths.
411
 */
412
void fpu__clear(struct fpu *fpu)
413
{
414
	WARN_ON_ONCE(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
415

416 417
	if (!use_eager_fpu()) {
		/* FPU state will be reallocated lazily at the first use. */
418
		fpu__drop(fpu);
419
	} else {
420
		if (!fpu->fpstate_active) {
421
			fpu__activate_curr(fpu);
422 423
			user_fpu_begin();
		}
424
		copy_init_fpstate_to_fpregs();
425 426 427
	}
}

428
/*
429
 * The xstateregs_active() routine is the same as the regset_fpregs_active() routine,
430 431 432
 * as the "regset->n" for the xstate regset will be updated based on the feature
 * capabilites supported by the xsave.
 */
433
int regset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
R
Roland McGrath 已提交
434
{
435 436 437
	struct fpu *target_fpu = &target->thread.fpu;

	return target_fpu->fpstate_active ? regset->n : 0;
R
Roland McGrath 已提交
438
}
L
Linus Torvalds 已提交
439

440
int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
L
Linus Torvalds 已提交
441
{
442 443 444
	struct fpu *target_fpu = &target->thread.fpu;

	return (cpu_has_fxsr && target_fpu->fpstate_active) ? regset->n : 0;
R
Roland McGrath 已提交
445
}
L
Linus Torvalds 已提交
446

R
Roland McGrath 已提交
447 448 449 450
int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
		unsigned int pos, unsigned int count,
		void *kbuf, void __user *ubuf)
{
451
	struct fpu *fpu = &target->thread.fpu;
452

R
Roland McGrath 已提交
453 454 455
	if (!cpu_has_fxsr)
		return -ENODEV;

456
	fpu__activate_stopped(fpu);
457
	fpstate_sanitize_xstate(fpu);
458

R
Roland McGrath 已提交
459
	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
460
				   &fpu->state.fxsave, 0, -1);
L
Linus Torvalds 已提交
461
}
R
Roland McGrath 已提交
462 463 464 465 466

int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
		unsigned int pos, unsigned int count,
		const void *kbuf, const void __user *ubuf)
{
467
	struct fpu *fpu = &target->thread.fpu;
R
Roland McGrath 已提交
468 469 470 471 472
	int ret;

	if (!cpu_has_fxsr)
		return -ENODEV;

473
	fpu__activate_stopped(fpu);
474
	fpstate_sanitize_xstate(fpu);
475

R
Roland McGrath 已提交
476
	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
477
				 &fpu->state.fxsave, 0, -1);
R
Roland McGrath 已提交
478 479 480 481

	/*
	 * mxcsr reserved bits must be masked to zero for security reasons.
	 */
482
	fpu->state.fxsave.mxcsr &= mxcsr_feature_mask;
R
Roland McGrath 已提交
483

484 485 486 487 488
	/*
	 * update the header bits in the xsave header, indicating the
	 * presence of FP and SSE state.
	 */
	if (cpu_has_xsave)
489
		fpu->state.xsave.header.xfeatures |= XSTATE_FPSSE;
490

R
Roland McGrath 已提交
491 492 493
	return ret;
}

494 495 496 497
int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
		unsigned int pos, unsigned int count,
		void *kbuf, void __user *ubuf)
{
498
	struct fpu *fpu = &target->thread.fpu;
499
	struct xsave_struct *xsave;
500 501 502 503 504
	int ret;

	if (!cpu_has_xsave)
		return -ENODEV;

505
	fpu__activate_stopped(fpu);
506

507
	xsave = &fpu->state.xsave;
508

509
	/*
510 511 512
	 * Copy the 48bytes defined by the software first into the xstate
	 * memory layout in the thread struct, so that we can copy the entire
	 * xstateregs to the user using one user_regset_copyout().
513
	 */
514 515
	memcpy(&xsave->i387.sw_reserved,
		xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
516
	/*
517
	 * Copy the xstate memory layout.
518
	 */
519
	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
520 521 522 523 524 525 526
	return ret;
}

int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
		  unsigned int pos, unsigned int count,
		  const void *kbuf, const void __user *ubuf)
{
527
	struct fpu *fpu = &target->thread.fpu;
528
	struct xsave_struct *xsave;
529 530 531 532 533
	int ret;

	if (!cpu_has_xsave)
		return -ENODEV;

534
	fpu__activate_stopped(fpu);
535

536
	xsave = &fpu->state.xsave;
537

538
	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
539 540 541
	/*
	 * mxcsr reserved bits must be masked to zero for security reasons.
	 */
542
	xsave->i387.mxcsr &= mxcsr_feature_mask;
543
	xsave->header.xfeatures &= xfeatures_mask;
544 545 546
	/*
	 * These bits must be zero.
	 */
547
	memset(&xsave->header.reserved, 0, 48);
I
Ingo Molnar 已提交
548

549 550 551
	return ret;
}

R
Roland McGrath 已提交
552
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
L
Linus Torvalds 已提交
553 554 555 556 557

/*
 * FPU tag word conversions.
 */

558
static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
L
Linus Torvalds 已提交
559 560
{
	unsigned int tmp; /* to avoid 16 bit prefixes in the code */
561

L
Linus Torvalds 已提交
562
	/* Transform each pair of bits into 01 (valid) or 00 (empty) */
563
	tmp = ~twd;
R
Roland McGrath 已提交
564
	tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
565 566 567 568
	/* and move the valid bits to the lower byte. */
	tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
	tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
	tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
I
Ingo Molnar 已提交
569

570
	return tmp;
L
Linus Torvalds 已提交
571 572
}

573
#define FPREG_ADDR(f, n)	((void *)&(f)->st_space + (n) * 16)
R
Roland McGrath 已提交
574 575 576 577 578 579 580 581 582 583 584 585 586
#define FP_EXP_TAG_VALID	0
#define FP_EXP_TAG_ZERO		1
#define FP_EXP_TAG_SPECIAL	2
#define FP_EXP_TAG_EMPTY	3

static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
{
	struct _fpxreg *st;
	u32 tos = (fxsave->swd >> 11) & 7;
	u32 twd = (unsigned long) fxsave->twd;
	u32 tag;
	u32 ret = 0xffff0000u;
	int i;
L
Linus Torvalds 已提交
587

R
Roland McGrath 已提交
588
	for (i = 0; i < 8; i++, twd >>= 1) {
589 590
		if (twd & 0x1) {
			st = FPREG_ADDR(fxsave, (i - tos) & 7);
L
Linus Torvalds 已提交
591

592
			switch (st->exponent & 0x7fff) {
L
Linus Torvalds 已提交
593
			case 0x7fff:
R
Roland McGrath 已提交
594
				tag = FP_EXP_TAG_SPECIAL;
L
Linus Torvalds 已提交
595 596
				break;
			case 0x0000:
597 598 599
				if (!st->significand[0] &&
				    !st->significand[1] &&
				    !st->significand[2] &&
R
Roland McGrath 已提交
600 601 602 603
				    !st->significand[3])
					tag = FP_EXP_TAG_ZERO;
				else
					tag = FP_EXP_TAG_SPECIAL;
L
Linus Torvalds 已提交
604 605
				break;
			default:
R
Roland McGrath 已提交
606 607 608 609
				if (st->significand[3] & 0x8000)
					tag = FP_EXP_TAG_VALID;
				else
					tag = FP_EXP_TAG_SPECIAL;
L
Linus Torvalds 已提交
610 611 612
				break;
			}
		} else {
R
Roland McGrath 已提交
613
			tag = FP_EXP_TAG_EMPTY;
L
Linus Torvalds 已提交
614
		}
R
Roland McGrath 已提交
615
		ret |= tag << (2 * i);
L
Linus Torvalds 已提交
616 617 618 619 620
	}
	return ret;
}

/*
R
Roland McGrath 已提交
621
 * FXSR floating point environment conversions.
L
Linus Torvalds 已提交
622 623
 */

624
void
I
Ingo Molnar 已提交
625
convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
L
Linus Torvalds 已提交
626
{
627
	struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state.fxsave;
R
Roland McGrath 已提交
628 629 630
	struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
	struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
	int i;
L
Linus Torvalds 已提交
631

R
Roland McGrath 已提交
632 633 634 635 636 637 638
	env->cwd = fxsave->cwd | 0xffff0000u;
	env->swd = fxsave->swd | 0xffff0000u;
	env->twd = twd_fxsr_to_i387(fxsave);

#ifdef CONFIG_X86_64
	env->fip = fxsave->rip;
	env->foo = fxsave->rdp;
639 640 641 642 643
	/*
	 * should be actually ds/cs at fpu exception time, but
	 * that information is not available in 64bit mode.
	 */
	env->fcs = task_pt_regs(tsk)->cs;
R
Roland McGrath 已提交
644
	if (tsk == current) {
645
		savesegment(ds, env->fos);
L
Linus Torvalds 已提交
646
	} else {
647
		env->fos = tsk->thread.ds;
L
Linus Torvalds 已提交
648
	}
649
	env->fos |= 0xffff0000;
R
Roland McGrath 已提交
650 651
#else
	env->fip = fxsave->fip;
J
Jan Beulich 已提交
652
	env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
R
Roland McGrath 已提交
653 654 655
	env->foo = fxsave->foo;
	env->fos = fxsave->fos;
#endif
L
Linus Torvalds 已提交
656

R
Roland McGrath 已提交
657 658
	for (i = 0; i < 8; ++i)
		memcpy(&to[i], &from[i], sizeof(to[0]));
L
Linus Torvalds 已提交
659 660
}

661 662
void convert_to_fxsr(struct task_struct *tsk,
		     const struct user_i387_ia32_struct *env)
L
Linus Torvalds 已提交
663 664

{
665
	struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state.fxsave;
R
Roland McGrath 已提交
666 667 668
	struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
	struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
	int i;
L
Linus Torvalds 已提交
669

R
Roland McGrath 已提交
670 671 672 673 674 675 676 677 678 679 680 681 682 683
	fxsave->cwd = env->cwd;
	fxsave->swd = env->swd;
	fxsave->twd = twd_i387_to_fxsr(env->twd);
	fxsave->fop = (u16) ((u32) env->fcs >> 16);
#ifdef CONFIG_X86_64
	fxsave->rip = env->fip;
	fxsave->rdp = env->foo;
	/* cs and ds ignored */
#else
	fxsave->fip = env->fip;
	fxsave->fcs = (env->fcs & 0xffff);
	fxsave->foo = env->foo;
	fxsave->fos = env->fos;
#endif
L
Linus Torvalds 已提交
684

R
Roland McGrath 已提交
685 686
	for (i = 0; i < 8; ++i)
		memcpy(&to[i], &from[i], sizeof(from[0]));
L
Linus Torvalds 已提交
687 688
}

R
Roland McGrath 已提交
689 690 691
int fpregs_get(struct task_struct *target, const struct user_regset *regset,
	       unsigned int pos, unsigned int count,
	       void *kbuf, void __user *ubuf)
L
Linus Torvalds 已提交
692
{
693
	struct fpu *fpu = &target->thread.fpu;
R
Roland McGrath 已提交
694
	struct user_i387_ia32_struct env;
L
Linus Torvalds 已提交
695

696
	fpu__activate_stopped(fpu);
L
Linus Torvalds 已提交
697

698
	if (!static_cpu_has(X86_FEATURE_FPU))
699 700
		return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);

701
	if (!cpu_has_fxsr)
R
Roland McGrath 已提交
702
		return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
703
					   &fpu->state.fsave, 0,
704
					   -1);
L
Linus Torvalds 已提交
705

706
	fpstate_sanitize_xstate(fpu);
707

R
Roland McGrath 已提交
708 709 710
	if (kbuf && pos == 0 && count == sizeof(env)) {
		convert_from_fxsr(kbuf, target);
		return 0;
L
Linus Torvalds 已提交
711
	}
R
Roland McGrath 已提交
712 713

	convert_from_fxsr(&env, target);
I
Ingo Molnar 已提交
714

R
Roland McGrath 已提交
715
	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
L
Linus Torvalds 已提交
716 717
}

R
Roland McGrath 已提交
718 719 720
int fpregs_set(struct task_struct *target, const struct user_regset *regset,
	       unsigned int pos, unsigned int count,
	       const void *kbuf, const void __user *ubuf)
L
Linus Torvalds 已提交
721
{
722
	struct fpu *fpu = &target->thread.fpu;
R
Roland McGrath 已提交
723 724
	struct user_i387_ia32_struct env;
	int ret;
L
Linus Torvalds 已提交
725

726
	fpu__activate_stopped(fpu);
727
	fpstate_sanitize_xstate(fpu);
728

729
	if (!static_cpu_has(X86_FEATURE_FPU))
730 731
		return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);

732
	if (!cpu_has_fxsr)
R
Roland McGrath 已提交
733
		return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
734
					  &fpu->state.fsave, 0,
735
					  -1);
R
Roland McGrath 已提交
736 737 738 739 740 741 742 743

	if (pos > 0 || count < sizeof(env))
		convert_from_fxsr(&env, target);

	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
	if (!ret)
		convert_to_fxsr(target, &env);

744 745 746 747 748
	/*
	 * update the header bit in the xsave header, indicating the
	 * presence of FP.
	 */
	if (cpu_has_xsave)
749
		fpu->state.xsave.header.xfeatures |= XSTATE_FP;
R
Roland McGrath 已提交
750
	return ret;
L
Linus Torvalds 已提交
751 752 753 754
}

/*
 * FPU state for core dumps.
R
Roland McGrath 已提交
755 756 757 758
 * This is only used for a.out dumps now.
 * It is declared generically using elf_fpregset_t (which is
 * struct user_i387_struct) but is in fact only used for 32-bit
 * dumps, so on 64-bit it is really struct user_i387_ia32_struct.
L
Linus Torvalds 已提交
759
 */
760
int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu)
L
Linus Torvalds 已提交
761 762
{
	struct task_struct *tsk = current;
763
	struct fpu *fpu = &tsk->thread.fpu;
I
Ingo Molnar 已提交
764
	int fpvalid;
L
Linus Torvalds 已提交
765

766
	fpvalid = fpu->fpstate_active;
R
Roland McGrath 已提交
767 768 769
	if (fpvalid)
		fpvalid = !fpregs_get(tsk, NULL,
				      0, sizeof(struct user_i387_ia32_struct),
770
				      ufpu, NULL);
L
Linus Torvalds 已提交
771 772 773

	return fpvalid;
}
774
EXPORT_SYMBOL(dump_fpu);
L
Linus Torvalds 已提交
775

R
Roland McGrath 已提交
776
#endif	/* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863

/*
 * x87 math exception handling:
 */

static inline unsigned short get_fpu_cwd(struct fpu *fpu)
{
	if (cpu_has_fxsr) {
		return fpu->state.fxsave.cwd;
	} else {
		return (unsigned short)fpu->state.fsave.cwd;
	}
}

static inline unsigned short get_fpu_swd(struct fpu *fpu)
{
	if (cpu_has_fxsr) {
		return fpu->state.fxsave.swd;
	} else {
		return (unsigned short)fpu->state.fsave.swd;
	}
}

static inline unsigned short get_fpu_mxcsr(struct fpu *fpu)
{
	if (cpu_has_xmm) {
		return fpu->state.fxsave.mxcsr;
	} else {
		return MXCSR_DEFAULT;
	}
}

int fpu__exception_code(struct fpu *fpu, int trap_nr)
{
	int err;

	if (trap_nr == X86_TRAP_MF) {
		unsigned short cwd, swd;
		/*
		 * (~cwd & swd) will mask out exceptions that are not set to unmasked
		 * status.  0x3f is the exception bits in these regs, 0x200 is the
		 * C1 reg you need in case of a stack fault, 0x040 is the stack
		 * fault bit.  We should only be taking one exception at a time,
		 * so if this combination doesn't produce any single exception,
		 * then we have a bad program that isn't synchronizing its FPU usage
		 * and it will suffer the consequences since we won't be able to
		 * fully reproduce the context of the exception
		 */
		cwd = get_fpu_cwd(fpu);
		swd = get_fpu_swd(fpu);

		err = swd & ~cwd;
	} else {
		/*
		 * The SIMD FPU exceptions are handled a little differently, as there
		 * is only a single status/control register.  Thus, to determine which
		 * unmasked exception was caught we must mask the exception mask bits
		 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
		 */
		unsigned short mxcsr = get_fpu_mxcsr(fpu);
		err = ~(mxcsr >> 7) & mxcsr;
	}

	if (err & 0x001) {	/* Invalid op */
		/*
		 * swd & 0x240 == 0x040: Stack Underflow
		 * swd & 0x240 == 0x240: Stack Overflow
		 * User must clear the SF bit (0x40) if set
		 */
		return FPE_FLTINV;
	} else if (err & 0x004) { /* Divide by Zero */
		return FPE_FLTDIV;
	} else if (err & 0x008) { /* Overflow */
		return FPE_FLTOVF;
	} else if (err & 0x012) { /* Denormal, Underflow */
		return FPE_FLTUND;
	} else if (err & 0x020) { /* Precision */
		return FPE_FLTRES;
	}

	/*
	 * If we're using IRQ 13, or supposedly even some trap
	 * X86_TRAP_MF implementations, it's possible
	 * we get a spurious trap, which is not an error.
	 */
	return 0;
}