diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index e8f7134f0ffbd4adb73d98f337bd68e48308572a..76a1f35298813ffee8271866e8ff54a3024450ee 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -74,16 +74,6 @@ static inline void __cpu_disable_lazy_restore(unsigned int cpu) per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL; } -/* - * Used to indicate that the FPU state in memory is newer than the FPU - * state in registers, and the FPU state should be reloaded next time the - * task is run. Only safe on the current task, or non-running tasks. - */ -static inline void task_disable_lazy_fpu_restore(struct task_struct *tsk) -{ - tsk->thread.fpu.last_cpu = ~0; -} - static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu) { return &new->thread.fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && @@ -430,7 +420,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta if (old_fpu->has_fpu) { if (!fpu_save_init(&old->thread.fpu)) - task_disable_lazy_fpu_restore(old); + old->thread.fpu.last_cpu = -1; else old->thread.fpu.last_cpu = cpu; @@ -446,7 +436,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta stts(); } else { old->thread.fpu.counter = 0; - task_disable_lazy_fpu_restore(old); + old->thread.fpu.last_cpu = -1; if (fpu.preload) { new->thread.fpu.counter++; if (fpu_lazy_restore(new, cpu)) diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h index f6317d9aa808016bb9ef420630247cad6f676300..cad1c37d9ea2b006b7a2a642d4c62129bbe3b6e4 100644 --- a/arch/x86/include/asm/fpu/types.h +++ b/arch/x86/include/asm/fpu/types.h @@ -125,7 +125,18 @@ union thread_xstate { }; struct fpu { + /* + * Records the last CPU on which this context was loaded into + * FPU registers. (In the lazy-switching case we might be + * able to reuse FPU registers across multiple context switches + * this way, if no intermediate task used the FPU.) + * + * A value of -1 is used to indicate that the FPU state in context + * memory is newer than the FPU state in registers, and that the + * FPU state should be reloaded next time the task is run. + */ unsigned int last_cpu; + unsigned int has_fpu; union thread_xstate *state; /* diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index ba539fc018d749f3f9127ca911461643e8a182ce..230e93783c9997dad7e0e3713ba3288aa9ebf5a4 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -242,8 +242,7 @@ int fpu__copy(struct task_struct *dst, struct task_struct *src) dst->thread.fpu.counter = 0; dst->thread.fpu.has_fpu = 0; dst->thread.fpu.state = NULL; - - task_disable_lazy_fpu_restore(dst); + dst->thread.fpu.last_cpu = -1; if (src_fpu->fpstate_active) { int err = fpstate_alloc(dst_fpu); @@ -319,7 +318,7 @@ static int fpu__unlazy_stopped(struct task_struct *child) return -EINVAL; if (child_fpu->fpstate_active) { - task_disable_lazy_fpu_restore(child); + child->thread.fpu.last_cpu = -1; return 0; }