core.c 14.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7
/*
 *  Copyright (C) 1994 Linus Torvalds
 *
 *  Pentium III FXSR, SSE support
 *  General FPU state handling cleanups
 *	Gareth Hughes <gareth@valinux.com>, May 2000
 */
8
#include <asm/fpu/internal.h>
9
#include <asm/fpu/regset.h>
10
#include <asm/fpu/signal.h>
11
#include <asm/fpu/types.h>
12
#include <asm/traps.h>
13

14
#include <linux/hardirq.h>
L
Linus Torvalds 已提交
15

16 17 18
#define CREATE_TRACE_POINTS
#include <asm/trace/fpu.h>

19 20 21 22
/*
 * Represents the initial FPU state. It's mostly (but not completely) zeroes,
 * depending on the FPU hardware format:
 */
23
union fpregs_state init_fpstate __read_mostly;
24

I
Ingo Molnar 已提交
25 26 27 28 29 30 31 32 33 34 35
/*
 * Track whether the kernel is using the FPU state
 * currently.
 *
 * This flag is used:
 *
 *   - by IRQ context code to potentially use the FPU
 *     if it's unused.
 *
 *   - to debug kernel_fpu_begin()/end() correctness
 */
36 37
static DEFINE_PER_CPU(bool, in_kernel_fpu);

38
/*
39
 * Track which context is using the FPU on the CPU:
40
 */
41
DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
42

43
static void kernel_fpu_disable(void)
44
{
45
	WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
46 47 48
	this_cpu_write(in_kernel_fpu, true);
}

49
static void kernel_fpu_enable(void)
50
{
51
	WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
52 53 54
	this_cpu_write(in_kernel_fpu, false);
}

I
Ingo Molnar 已提交
55 56 57 58 59
static bool kernel_fpu_disabled(void)
{
	return this_cpu_read(in_kernel_fpu);
}

60 61 62
/*
 * Were we in an interrupt that interrupted kernel mode?
 *
63
 * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
64 65 66 67
 * pair does nothing at all: the thread must not have fpu (so
 * that we don't try to save the FPU state), and TS must
 * be set (so that the clts/stts pair does nothing that is
 * visible in the interrupted kernel thread).
68
 *
69 70
 * Except for the eagerfpu case when we return true; in the likely case
 * the thread has FPU but we are not going to set/clear TS.
71
 */
72
static bool interrupted_kernel_fpu_idle(void)
73
{
I
Ingo Molnar 已提交
74
	if (kernel_fpu_disabled())
75 76
		return false;

77
	if (use_eager_fpu())
78
		return true;
79

80
	return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
81 82 83 84 85 86 87 88 89 90
}

/*
 * Were we in user mode (or vm86 mode) when we were
 * interrupted?
 *
 * Doing kernel_fpu_begin/end() is ok if we are running
 * in an interrupt context from user mode - we'll just
 * save the FPU state as required.
 */
91
static bool interrupted_user_mode(void)
92 93
{
	struct pt_regs *regs = get_irq_regs();
94
	return regs && user_mode(regs);
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
}

/*
 * Can we use the FPU in kernel mode with the
 * whole "kernel_fpu_begin/end()" sequence?
 *
 * It's always ok in process context (ie "not interrupt")
 * but it is sometimes ok even from an irq.
 */
bool irq_fpu_usable(void)
{
	return !in_interrupt() ||
		interrupted_user_mode() ||
		interrupted_kernel_fpu_idle();
}
EXPORT_SYMBOL(irq_fpu_usable);

112
void __kernel_fpu_begin(void)
113
{
114
	struct fpu *fpu = &current->thread.fpu;
115

116
	WARN_ON_FPU(!irq_fpu_usable());
117

118
	kernel_fpu_disable();
119

120
	if (fpu->fpregs_active) {
121 122 123 124
		/*
		 * Ignore return value -- we don't care if reg state
		 * is clobbered.
		 */
125
		copy_fpregs_to_fpstate(fpu);
126
	} else {
127
		this_cpu_write(fpu_fpregs_owner_ctx, NULL);
128
		__fpregs_activate_hw();
129 130
	}
}
131
EXPORT_SYMBOL(__kernel_fpu_begin);
132

133
void __kernel_fpu_end(void)
134
{
135
	struct fpu *fpu = &current->thread.fpu;
136

137
	if (fpu->fpregs_active)
138
		copy_kernel_to_fpregs(&fpu->state);
139
	else
140
		__fpregs_deactivate_hw();
141

142
	kernel_fpu_enable();
143
}
144
EXPORT_SYMBOL(__kernel_fpu_end);
145

146 147 148 149 150 151 152 153 154 155 156 157 158 159
void kernel_fpu_begin(void)
{
	preempt_disable();
	__kernel_fpu_begin();
}
EXPORT_SYMBOL_GPL(kernel_fpu_begin);

void kernel_fpu_end(void)
{
	__kernel_fpu_end();
	preempt_enable();
}
EXPORT_SYMBOL_GPL(kernel_fpu_end);

160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
/*
 * CR0::TS save/restore functions:
 */
int irq_ts_save(void)
{
	/*
	 * If in process context and not atomic, we can take a spurious DNA fault.
	 * Otherwise, doing clts() in process context requires disabling preemption
	 * or some heavy lifting like kernel_fpu_begin()
	 */
	if (!in_atomic())
		return 0;

	if (read_cr0() & X86_CR0_TS) {
		clts();
		return 1;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(irq_ts_save);

void irq_ts_restore(int TS_state)
{
	if (TS_state)
		stts();
}
EXPORT_SYMBOL_GPL(irq_ts_restore);

189
/*
I
Ingo Molnar 已提交
190
 * Save the FPU state (mark it for reload if necessary):
191 192
 *
 * This only ever gets called for the current task.
193
 */
194
void fpu__save(struct fpu *fpu)
195
{
196
	WARN_ON_FPU(fpu != &current->thread.fpu);
197

198
	preempt_disable();
199
	trace_x86_fpu_before_save(fpu);
200
	if (fpu->fpregs_active) {
201 202 203 204 205 206
		if (!copy_fpregs_to_fpstate(fpu)) {
			if (use_eager_fpu())
				copy_kernel_to_fpregs(&fpu->state);
			else
				fpregs_deactivate(fpu);
		}
207
	}
208
	trace_x86_fpu_after_save(fpu);
209 210
	preempt_enable();
}
211
EXPORT_SYMBOL_GPL(fpu__save);
212

213 214 215
/*
 * Legacy x87 fpstate state init:
 */
216
static inline void fpstate_init_fstate(struct fregs_state *fp)
217 218 219 220 221 222 223
{
	fp->cwd = 0xffff037fu;
	fp->swd = 0xffff0000u;
	fp->twd = 0xffffffffu;
	fp->fos = 0xffff0000u;
}

224
void fpstate_init(union fpregs_state *state)
L
Linus Torvalds 已提交
225
{
226
	if (!static_cpu_has(X86_FEATURE_FPU)) {
227
		fpstate_init_soft(&state->soft);
228
		return;
229 230
	}

231
	memset(state, 0, fpu_kernel_xstate_size);
232

233 234 235 236 237 238 239
	/*
	 * XRSTORS requires that this bit is set in xcomp_bv, or
	 * it will #GP. Make sure it is replaced after the memset().
	 */
	if (static_cpu_has(X86_FEATURE_XSAVES))
		state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT;

240
	if (static_cpu_has(X86_FEATURE_FXSR))
241
		fpstate_init_fxstate(&state->fxsave);
242
	else
243
		fpstate_init_fstate(&state->fsave);
244
}
245
EXPORT_SYMBOL_GPL(fpstate_init);
246

247
int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
248
{
249 250 251 252
	dst_fpu->counter = 0;
	dst_fpu->fpregs_active = 0;
	dst_fpu->last_cpu = -1;

253
	if (!src_fpu->fpstate_active || !static_cpu_has(X86_FEATURE_FPU))
254 255
		return 0;

256
	WARN_ON_FPU(src_fpu != &current->thread.fpu);
257

258 259 260 261 262
	/*
	 * Don't let 'init optimized' areas of the XSAVE area
	 * leak into the child task:
	 */
	if (use_eager_fpu())
263
		memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
264 265 266 267

	/*
	 * Save current FPU registers directly into the child
	 * FPU context, without any memory-to-memory copying.
268 269 270
	 * In lazy mode, if the FPU context isn't loaded into
	 * fpregs, CR0.TS will be set and do_device_not_available
	 * will load the FPU context.
271 272 273 274 275 276 277 278 279 280 281
	 *
	 * We have to do all this with preemption disabled,
	 * mostly because of the FNSAVE case, because in that
	 * case we must not allow preemption in the window
	 * between the FNSAVE and us marking the context lazy.
	 *
	 * It shouldn't be an issue as even FNSAVE is plenty
	 * fast in terms of critical section length.
	 */
	preempt_disable();
	if (!copy_fpregs_to_fpstate(dst_fpu)) {
282 283
		memcpy(&src_fpu->state, &dst_fpu->state,
		       fpu_kernel_xstate_size);
284 285 286 287 288

		if (use_eager_fpu())
			copy_kernel_to_fpregs(&src_fpu->state);
		else
			fpregs_deactivate(src_fpu);
289
	}
290
	preempt_enable();
291

292 293 294
	trace_x86_fpu_copy_src(src_fpu);
	trace_x86_fpu_copy_dst(dst_fpu);

I
Ingo Molnar 已提交
295 296 297
	return 0;
}

298
/*
299 300
 * Activate the current task's in-memory FPU context,
 * if it has not been used before:
301
 */
302
void fpu__activate_curr(struct fpu *fpu)
303
{
304
	WARN_ON_FPU(fpu != &current->thread.fpu);
305

306
	if (!fpu->fpstate_active) {
307
		fpstate_init(&fpu->state);
308
		trace_x86_fpu_init_state(fpu);
309

310
		trace_x86_fpu_activate_state(fpu);
311 312 313
		/* Safe to do for the current task: */
		fpu->fpstate_active = 1;
	}
314
}
315
EXPORT_SYMBOL_GPL(fpu__activate_curr);
316

317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
/*
 * This function must be called before we read a task's fpstate.
 *
 * If the task has not used the FPU before then initialize its
 * fpstate.
 *
 * If the task has used the FPU before then save it.
 */
void fpu__activate_fpstate_read(struct fpu *fpu)
{
	/*
	 * If fpregs are active (in the current CPU), then
	 * copy them to the fpstate:
	 */
	if (fpu->fpregs_active) {
		fpu__save(fpu);
	} else {
334
		if (!fpu->fpstate_active) {
335
			fpstate_init(&fpu->state);
336
			trace_x86_fpu_init_state(fpu);
337

338
			trace_x86_fpu_activate_state(fpu);
339 340 341 342 343 344
			/* Safe to do for current and for stopped child tasks: */
			fpu->fpstate_active = 1;
		}
	}
}

345
/*
346
 * This function must be called before we write a task's fpstate.
347
 *
348 349
 * If the task has used the FPU before then unlazy it.
 * If the task has not used the FPU before then initialize its fpstate.
350
 *
351 352 353 354 355 356
 * After this function call, after registers in the fpstate are
 * modified and the child task has woken up, the child task will
 * restore the modified FPU state from the modified context. If we
 * didn't clear its lazy status here then the lazy in-registers
 * state pending on its former CPU could be restored, corrupting
 * the modifications.
357
 */
358
void fpu__activate_fpstate_write(struct fpu *fpu)
359
{
360
	/*
361 362
	 * Only stopped child tasks can be used to modify the FPU
	 * state in the fpstate buffer:
363
	 */
364 365 366 367 368
	WARN_ON_FPU(fpu == &current->thread.fpu);

	if (fpu->fpstate_active) {
		/* Invalidate any lazy state: */
		fpu->last_cpu = -1;
369
	} else {
370
		fpstate_init(&fpu->state);
371
		trace_x86_fpu_init_state(fpu);
372

373
		trace_x86_fpu_activate_state(fpu);
374 375
		/* Safe to do for stopped child tasks: */
		fpu->fpstate_active = 1;
376
	}
L
Linus Torvalds 已提交
377 378
}

379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441
/*
 * This function must be called before we write the current
 * task's fpstate.
 *
 * This call gets the current FPU register state and moves
 * it in to the 'fpstate'.  Preemption is disabled so that
 * no writes to the 'fpstate' can occur from context
 * swiches.
 *
 * Must be followed by a fpu__current_fpstate_write_end().
 */
void fpu__current_fpstate_write_begin(void)
{
	struct fpu *fpu = &current->thread.fpu;

	/*
	 * Ensure that the context-switching code does not write
	 * over the fpstate while we are doing our update.
	 */
	preempt_disable();

	/*
	 * Move the fpregs in to the fpu's 'fpstate'.
	 */
	fpu__activate_fpstate_read(fpu);

	/*
	 * The caller is about to write to 'fpu'.  Ensure that no
	 * CPU thinks that its fpregs match the fpstate.  This
	 * ensures we will not be lazy and skip a XRSTOR in the
	 * future.
	 */
	fpu->last_cpu = -1;
}

/*
 * This function must be paired with fpu__current_fpstate_write_begin()
 *
 * This will ensure that the modified fpstate gets placed back in
 * the fpregs if necessary.
 *
 * Note: This function may be called whether or not an _actual_
 * write to the fpstate occurred.
 */
void fpu__current_fpstate_write_end(void)
{
	struct fpu *fpu = &current->thread.fpu;

	/*
	 * 'fpu' now has an updated copy of the state, but the
	 * registers may still be out of date.  Update them with
	 * an XRSTOR if they are active.
	 */
	if (fpregs_active())
		copy_kernel_to_fpregs(&fpu->state);

	/*
	 * Our update is done and the fpregs/fpstate are in sync
	 * if necessary.  Context switches can happen again.
	 */
	preempt_enable();
}

442
/*
443 444 445 446
 * 'fpu__restore()' is called to copy FPU registers from
 * the FPU fpstate to the live hw registers and to activate
 * access to the hardware registers, so that FPU instructions
 * can be used afterwards.
447
 *
448 449 450
 * Must be called with kernel preemption disabled (for example
 * with local interrupts disabled, as it is in the case of
 * do_device_not_available()).
451
 */
452
void fpu__restore(struct fpu *fpu)
453
{
454
	fpu__activate_curr(fpu);
455

456
	/* Avoid __kernel_fpu_begin() right after fpregs_activate() */
457
	kernel_fpu_disable();
458
	trace_x86_fpu_before_restore(fpu);
459
	fpregs_activate(fpu);
460
	copy_kernel_to_fpregs(&fpu->state);
461
	fpu->counter++;
462
	trace_x86_fpu_after_restore(fpu);
463 464
	kernel_fpu_enable();
}
465
EXPORT_SYMBOL_GPL(fpu__restore);
466

467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
/*
 * Drops current FPU state: deactivates the fpregs and
 * the fpstate. NOTE: it still leaves previous contents
 * in the fpregs in the eager-FPU case.
 *
 * This function can be used in cases where we know that
 * a state-restore is coming: either an explicit one,
 * or a reschedule.
 */
void fpu__drop(struct fpu *fpu)
{
	preempt_disable();
	fpu->counter = 0;

	if (fpu->fpregs_active) {
		/* Ignore delayed exceptions from user space */
		asm volatile("1: fwait\n"
			     "2:\n"
			     _ASM_EXTABLE(1b, 2b));
		fpregs_deactivate(fpu);
	}

	fpu->fpstate_active = 0;

491 492
	trace_x86_fpu_dropped(fpu);

493 494 495
	preempt_enable();
}

496 497 498 499 500 501 502
/*
 * Clear FPU registers by setting them up from
 * the init fpstate:
 */
static inline void copy_init_fpstate_to_fpregs(void)
{
	if (use_xsave())
503
		copy_kernel_to_xregs(&init_fpstate.xsave, -1);
504
	else if (static_cpu_has(X86_FEATURE_FXSR))
505
		copy_kernel_to_fxregs(&init_fpstate.fxsave);
506 507
	else
		copy_kernel_to_fregs(&init_fpstate.fsave);
508 509
}

510
/*
511 512 513 514
 * Clear the FPU state back to init state.
 *
 * Called by sys_execve(), by the signal handler code and by various
 * error paths.
515
 */
516
void fpu__clear(struct fpu *fpu)
517
{
518
	WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
519

520
	if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) {
521
		/* FPU state will be reallocated lazily at the first use. */
522
		fpu__drop(fpu);
523
	} else {
524
		if (!fpu->fpstate_active) {
525
			fpu__activate_curr(fpu);
526 527
			user_fpu_begin();
		}
528
		copy_init_fpstate_to_fpregs();
529 530 531
	}
}

532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549
/*
 * x87 math exception handling:
 */

int fpu__exception_code(struct fpu *fpu, int trap_nr)
{
	int err;

	if (trap_nr == X86_TRAP_MF) {
		unsigned short cwd, swd;
		/*
		 * (~cwd & swd) will mask out exceptions that are not set to unmasked
		 * status.  0x3f is the exception bits in these regs, 0x200 is the
		 * C1 reg you need in case of a stack fault, 0x040 is the stack
		 * fault bit.  We should only be taking one exception at a time,
		 * so if this combination doesn't produce any single exception,
		 * then we have a bad program that isn't synchronizing its FPU usage
		 * and it will suffer the consequences since we won't be able to
550
		 * fully reproduce the context of the exception.
551
		 */
552 553 554 555 556 557 558
		if (boot_cpu_has(X86_FEATURE_FXSR)) {
			cwd = fpu->state.fxsave.cwd;
			swd = fpu->state.fxsave.swd;
		} else {
			cwd = (unsigned short)fpu->state.fsave.cwd;
			swd = (unsigned short)fpu->state.fsave.swd;
		}
559 560 561 562 563 564 565 566 567

		err = swd & ~cwd;
	} else {
		/*
		 * The SIMD FPU exceptions are handled a little differently, as there
		 * is only a single status/control register.  Thus, to determine which
		 * unmasked exception was caught we must mask the exception mask bits
		 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
		 */
568 569 570 571 572
		unsigned short mxcsr = MXCSR_DEFAULT;

		if (boot_cpu_has(X86_FEATURE_XMM))
			mxcsr = fpu->state.fxsave.mxcsr;

573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
		err = ~(mxcsr >> 7) & mxcsr;
	}

	if (err & 0x001) {	/* Invalid op */
		/*
		 * swd & 0x240 == 0x040: Stack Underflow
		 * swd & 0x240 == 0x240: Stack Overflow
		 * User must clear the SF bit (0x40) if set
		 */
		return FPE_FLTINV;
	} else if (err & 0x004) { /* Divide by Zero */
		return FPE_FLTDIV;
	} else if (err & 0x008) { /* Overflow */
		return FPE_FLTOVF;
	} else if (err & 0x012) { /* Denormal, Underflow */
		return FPE_FLTUND;
	} else if (err & 0x020) { /* Precision */
		return FPE_FLTRES;
	}

	/*
	 * If we're using IRQ 13, or supposedly even some trap
	 * X86_TRAP_MF implementations, it's possible
	 * we get a spurious trap, which is not an error.
	 */
	return 0;
}