core.c 12.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7
/*
 *  Copyright (C) 1994 Linus Torvalds
 *
 *  Pentium III FXSR, SSE support
 *  General FPU state handling cleanups
 *	Gareth Hughes <gareth@valinux.com>, May 2000
 */
8
#include <asm/fpu/internal.h>
9
#include <asm/fpu/regset.h>
10
#include <asm/fpu/signal.h>
11
#include <asm/traps.h>
12

13
#include <linux/hardirq.h>
L
Linus Torvalds 已提交
14

15 16 17 18
/*
 * Represents the initial FPU state. It's mostly (but not completely) zeroes,
 * depending on the FPU hardware format:
 */
19
union fpregs_state init_fpstate __read_mostly;
20

I
Ingo Molnar 已提交
21 22 23 24 25 26 27 28 29 30 31
/*
 * Track whether the kernel is using the FPU state
 * currently.
 *
 * This flag is used:
 *
 *   - by IRQ context code to potentially use the FPU
 *     if it's unused.
 *
 *   - to debug kernel_fpu_begin()/end() correctness
 */
32 33
static DEFINE_PER_CPU(bool, in_kernel_fpu);

34
/*
35
 * Track which context is using the FPU on the CPU:
36
 */
37
DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
38

39
static void kernel_fpu_disable(void)
40
{
41
	WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
42 43 44
	this_cpu_write(in_kernel_fpu, true);
}

45
static void kernel_fpu_enable(void)
46
{
47
	WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
48 49 50
	this_cpu_write(in_kernel_fpu, false);
}

I
Ingo Molnar 已提交
51 52 53 54 55
static bool kernel_fpu_disabled(void)
{
	return this_cpu_read(in_kernel_fpu);
}

56 57 58
/*
 * Were we in an interrupt that interrupted kernel mode?
 *
59
 * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
60 61 62 63
 * pair does nothing at all: the thread must not have fpu (so
 * that we don't try to save the FPU state), and TS must
 * be set (so that the clts/stts pair does nothing that is
 * visible in the interrupted kernel thread).
64
 *
65 66
 * Except for the eagerfpu case when we return true; in the likely case
 * the thread has FPU but we are not going to set/clear TS.
67
 */
68
static bool interrupted_kernel_fpu_idle(void)
69
{
I
Ingo Molnar 已提交
70
	if (kernel_fpu_disabled())
71 72
		return false;

73
	if (use_eager_fpu())
74
		return true;
75

76
	return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
77 78 79 80 81 82 83 84 85 86
}

/*
 * Were we in user mode (or vm86 mode) when we were
 * interrupted?
 *
 * Doing kernel_fpu_begin/end() is ok if we are running
 * in an interrupt context from user mode - we'll just
 * save the FPU state as required.
 */
87
static bool interrupted_user_mode(void)
88 89
{
	struct pt_regs *regs = get_irq_regs();
90
	return regs && user_mode(regs);
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
}

/*
 * Can we use the FPU in kernel mode with the
 * whole "kernel_fpu_begin/end()" sequence?
 *
 * It's always ok in process context (ie "not interrupt")
 * but it is sometimes ok even from an irq.
 */
bool irq_fpu_usable(void)
{
	return !in_interrupt() ||
		interrupted_user_mode() ||
		interrupted_kernel_fpu_idle();
}
EXPORT_SYMBOL(irq_fpu_usable);

108
void __kernel_fpu_begin(void)
109
{
110
	struct fpu *fpu = &current->thread.fpu;
111

112
	WARN_ON_FPU(!irq_fpu_usable());
113

114
	kernel_fpu_disable();
115

116
	if (fpu->fpregs_active) {
117 118 119 120
		/*
		 * Ignore return value -- we don't care if reg state
		 * is clobbered.
		 */
121
		copy_fpregs_to_fpstate(fpu);
122
	} else {
123
		this_cpu_write(fpu_fpregs_owner_ctx, NULL);
124
		__fpregs_activate_hw();
125 126
	}
}
127
EXPORT_SYMBOL(__kernel_fpu_begin);
128

129
void __kernel_fpu_end(void)
130
{
131
	struct fpu *fpu = &current->thread.fpu;
132

133
	if (fpu->fpregs_active)
134
		copy_kernel_to_fpregs(&fpu->state);
135
	else
136
		__fpregs_deactivate_hw();
137

138
	kernel_fpu_enable();
139
}
140
EXPORT_SYMBOL(__kernel_fpu_end);
141

142 143 144 145 146 147 148 149 150 151 152 153 154 155
void kernel_fpu_begin(void)
{
	preempt_disable();
	__kernel_fpu_begin();
}
EXPORT_SYMBOL_GPL(kernel_fpu_begin);

void kernel_fpu_end(void)
{
	__kernel_fpu_end();
	preempt_enable();
}
EXPORT_SYMBOL_GPL(kernel_fpu_end);

156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
/*
 * CR0::TS save/restore functions:
 */
int irq_ts_save(void)
{
	/*
	 * If in process context and not atomic, we can take a spurious DNA fault.
	 * Otherwise, doing clts() in process context requires disabling preemption
	 * or some heavy lifting like kernel_fpu_begin()
	 */
	if (!in_atomic())
		return 0;

	if (read_cr0() & X86_CR0_TS) {
		clts();
		return 1;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(irq_ts_save);

void irq_ts_restore(int TS_state)
{
	if (TS_state)
		stts();
}
EXPORT_SYMBOL_GPL(irq_ts_restore);

185
/*
I
Ingo Molnar 已提交
186
 * Save the FPU state (mark it for reload if necessary):
187 188
 *
 * This only ever gets called for the current task.
189
 */
190
void fpu__save(struct fpu *fpu)
191
{
192
	WARN_ON_FPU(fpu != &current->thread.fpu);
193

194
	preempt_disable();
195
	if (fpu->fpregs_active) {
196 197 198 199 200 201
		if (!copy_fpregs_to_fpstate(fpu)) {
			if (use_eager_fpu())
				copy_kernel_to_fpregs(&fpu->state);
			else
				fpregs_deactivate(fpu);
		}
202
	}
203 204
	preempt_enable();
}
205
EXPORT_SYMBOL_GPL(fpu__save);
206

207 208 209
/*
 * Legacy x87 fpstate state init:
 */
210
static inline void fpstate_init_fstate(struct fregs_state *fp)
211 212 213 214 215 216 217
{
	fp->cwd = 0xffff037fu;
	fp->swd = 0xffff0000u;
	fp->twd = 0xffffffffu;
	fp->fos = 0xffff0000u;
}

218
void fpstate_init(union fpregs_state *state)
L
Linus Torvalds 已提交
219
{
220
	if (!cpu_has_fpu) {
221
		fpstate_init_soft(&state->soft);
222
		return;
223 224
	}

225
	memset(state, 0, xstate_size);
226

227
	if (cpu_has_fxsr)
228
		fpstate_init_fxstate(&state->fxsave);
229
	else
230
		fpstate_init_fstate(&state->fsave);
231
}
232
EXPORT_SYMBOL_GPL(fpstate_init);
233

234 235 236
/*
 * Copy the current task's FPU state to a new task's FPU context.
 *
237 238
 * In both the 'eager' and the 'lazy' case we save hardware registers
 * directly to the destination buffer.
239
 */
240
static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
241
{
242
	WARN_ON_FPU(src_fpu != &current->thread.fpu);
243

244 245 246 247 248
	/*
	 * Don't let 'init optimized' areas of the XSAVE area
	 * leak into the child task:
	 */
	if (use_eager_fpu())
249
		memset(&dst_fpu->state.xsave, 0, xstate_size);
250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269

	/*
	 * Save current FPU registers directly into the child
	 * FPU context, without any memory-to-memory copying.
	 *
	 * If the FPU context got destroyed in the process (FNSAVE
	 * done on old CPUs) then copy it back into the source
	 * context and mark the current task for lazy restore.
	 *
	 * We have to do all this with preemption disabled,
	 * mostly because of the FNSAVE case, because in that
	 * case we must not allow preemption in the window
	 * between the FNSAVE and us marking the context lazy.
	 *
	 * It shouldn't be an issue as even FNSAVE is plenty
	 * fast in terms of critical section length.
	 */
	preempt_disable();
	if (!copy_fpregs_to_fpstate(dst_fpu)) {
		memcpy(&src_fpu->state, &dst_fpu->state, xstate_size);
270 271 272 273 274

		if (use_eager_fpu())
			copy_kernel_to_fpregs(&src_fpu->state);
		else
			fpregs_deactivate(src_fpu);
275
	}
276
	preempt_enable();
277 278
}

279
int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
I
Ingo Molnar 已提交
280
{
281
	dst_fpu->counter = 0;
282
	dst_fpu->fpregs_active = 0;
283
	dst_fpu->last_cpu = -1;
I
Ingo Molnar 已提交
284

285
	if (src_fpu->fpstate_active && cpu_has_fpu)
286
		fpu_copy(dst_fpu, src_fpu);
287

I
Ingo Molnar 已提交
288 289 290
	return 0;
}

291
/*
292 293
 * Activate the current task's in-memory FPU context,
 * if it has not been used before:
294
 */
295
void fpu__activate_curr(struct fpu *fpu)
296
{
297
	WARN_ON_FPU(fpu != &current->thread.fpu);
298

299
	if (!fpu->fpstate_active) {
300
		fpstate_init(&fpu->state);
301

302 303 304
		/* Safe to do for the current task: */
		fpu->fpstate_active = 1;
	}
305
}
306
EXPORT_SYMBOL_GPL(fpu__activate_curr);
307

308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
/*
 * This function must be called before we read a task's fpstate.
 *
 * If the task has not used the FPU before then initialize its
 * fpstate.
 *
 * If the task has used the FPU before then save it.
 */
void fpu__activate_fpstate_read(struct fpu *fpu)
{
	/*
	 * If fpregs are active (in the current CPU), then
	 * copy them to the fpstate:
	 */
	if (fpu->fpregs_active) {
		fpu__save(fpu);
	} else {
325
		if (!fpu->fpstate_active) {
326 327 328 329 330 331 332 333
			fpstate_init(&fpu->state);

			/* Safe to do for current and for stopped child tasks: */
			fpu->fpstate_active = 1;
		}
	}
}

334
/*
335
 * This function must be called before we write a task's fpstate.
336
 *
337 338
 * If the task has used the FPU before then unlazy it.
 * If the task has not used the FPU before then initialize its fpstate.
339
 *
340 341 342 343 344 345
 * After this function call, after registers in the fpstate are
 * modified and the child task has woken up, the child task will
 * restore the modified FPU state from the modified context. If we
 * didn't clear its lazy status here then the lazy in-registers
 * state pending on its former CPU could be restored, corrupting
 * the modifications.
346
 */
347
void fpu__activate_fpstate_write(struct fpu *fpu)
348
{
349
	/*
350 351
	 * Only stopped child tasks can be used to modify the FPU
	 * state in the fpstate buffer:
352
	 */
353 354 355 356 357
	WARN_ON_FPU(fpu == &current->thread.fpu);

	if (fpu->fpstate_active) {
		/* Invalidate any lazy state: */
		fpu->last_cpu = -1;
358
	} else {
359
		fpstate_init(&fpu->state);
360

361 362
		/* Safe to do for stopped child tasks: */
		fpu->fpstate_active = 1;
363
	}
L
Linus Torvalds 已提交
364 365
}

366
/*
367 368 369 370
 * 'fpu__restore()' is called to copy FPU registers from
 * the FPU fpstate to the live hw registers and to activate
 * access to the hardware registers, so that FPU instructions
 * can be used afterwards.
371
 *
372 373 374
 * Must be called with kernel preemption disabled (for example
 * with local interrupts disabled, as it is in the case of
 * do_device_not_available()).
375
 */
376
void fpu__restore(struct fpu *fpu)
377
{
378
	fpu__activate_curr(fpu);
379

380
	/* Avoid __kernel_fpu_begin() right after fpregs_activate() */
381
	kernel_fpu_disable();
382
	fpregs_activate(fpu);
383
	copy_kernel_to_fpregs(&fpu->state);
384
	fpu->counter++;
385 386
	kernel_fpu_enable();
}
387
EXPORT_SYMBOL_GPL(fpu__restore);
388

389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415
/*
 * Drops current FPU state: deactivates the fpregs and
 * the fpstate. NOTE: it still leaves previous contents
 * in the fpregs in the eager-FPU case.
 *
 * This function can be used in cases where we know that
 * a state-restore is coming: either an explicit one,
 * or a reschedule.
 */
void fpu__drop(struct fpu *fpu)
{
	preempt_disable();
	fpu->counter = 0;

	if (fpu->fpregs_active) {
		/* Ignore delayed exceptions from user space */
		asm volatile("1: fwait\n"
			     "2:\n"
			     _ASM_EXTABLE(1b, 2b));
		fpregs_deactivate(fpu);
	}

	fpu->fpstate_active = 0;

	preempt_enable();
}

416 417 418 419 420 421 422
/*
 * Clear FPU registers by setting them up from
 * the init fpstate:
 */
static inline void copy_init_fpstate_to_fpregs(void)
{
	if (use_xsave())
423
		copy_kernel_to_xregs(&init_fpstate.xsave, -1);
424
	else
425
		copy_kernel_to_fxregs(&init_fpstate.fxsave);
426 427
}

428
/*
429 430 431 432
 * Clear the FPU state back to init state.
 *
 * Called by sys_execve(), by the signal handler code and by various
 * error paths.
433
 */
434
void fpu__clear(struct fpu *fpu)
435
{
436
	WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
437

438
	if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) {
439
		/* FPU state will be reallocated lazily at the first use. */
440
		fpu__drop(fpu);
441
	} else {
442
		if (!fpu->fpstate_active) {
443
			fpu__activate_curr(fpu);
444 445
			user_fpu_begin();
		}
446
		copy_init_fpstate_to_fpregs();
447 448 449
	}
}

450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535
/*
 * x87 math exception handling:
 */

static inline unsigned short get_fpu_cwd(struct fpu *fpu)
{
	if (cpu_has_fxsr) {
		return fpu->state.fxsave.cwd;
	} else {
		return (unsigned short)fpu->state.fsave.cwd;
	}
}

static inline unsigned short get_fpu_swd(struct fpu *fpu)
{
	if (cpu_has_fxsr) {
		return fpu->state.fxsave.swd;
	} else {
		return (unsigned short)fpu->state.fsave.swd;
	}
}

static inline unsigned short get_fpu_mxcsr(struct fpu *fpu)
{
	if (cpu_has_xmm) {
		return fpu->state.fxsave.mxcsr;
	} else {
		return MXCSR_DEFAULT;
	}
}

int fpu__exception_code(struct fpu *fpu, int trap_nr)
{
	int err;

	if (trap_nr == X86_TRAP_MF) {
		unsigned short cwd, swd;
		/*
		 * (~cwd & swd) will mask out exceptions that are not set to unmasked
		 * status.  0x3f is the exception bits in these regs, 0x200 is the
		 * C1 reg you need in case of a stack fault, 0x040 is the stack
		 * fault bit.  We should only be taking one exception at a time,
		 * so if this combination doesn't produce any single exception,
		 * then we have a bad program that isn't synchronizing its FPU usage
		 * and it will suffer the consequences since we won't be able to
		 * fully reproduce the context of the exception
		 */
		cwd = get_fpu_cwd(fpu);
		swd = get_fpu_swd(fpu);

		err = swd & ~cwd;
	} else {
		/*
		 * The SIMD FPU exceptions are handled a little differently, as there
		 * is only a single status/control register.  Thus, to determine which
		 * unmasked exception was caught we must mask the exception mask bits
		 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
		 */
		unsigned short mxcsr = get_fpu_mxcsr(fpu);
		err = ~(mxcsr >> 7) & mxcsr;
	}

	if (err & 0x001) {	/* Invalid op */
		/*
		 * swd & 0x240 == 0x040: Stack Underflow
		 * swd & 0x240 == 0x240: Stack Overflow
		 * User must clear the SF bit (0x40) if set
		 */
		return FPE_FLTINV;
	} else if (err & 0x004) { /* Divide by Zero */
		return FPE_FLTDIV;
	} else if (err & 0x008) { /* Overflow */
		return FPE_FLTOVF;
	} else if (err & 0x012) { /* Denormal, Underflow */
		return FPE_FLTUND;
	} else if (err & 0x020) { /* Precision */
		return FPE_FLTRES;
	}

	/*
	 * If we're using IRQ 13, or supposedly even some trap
	 * X86_TRAP_MF implementations, it's possible
	 * we get a spurious trap, which is not an error.
	 */
	return 0;
}