diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index 04c2807aab668e43a2a88ddbd781239456a5579c..e5f8f8eaf2258b38fc6e04866fc8b2afa5a64f1d 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -439,7 +439,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta if (__thread_has_fpu(old)) { if (!__save_init_fpu(old)) - old->thread.fpu.last_cpu = ~0; + task_disable_lazy_fpu_restore(old); else old->thread.fpu.last_cpu = cpu; @@ -455,7 +455,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta stts(); } else { old->thread.fpu_counter = 0; - old->thread.fpu.last_cpu = ~0; + task_disable_lazy_fpu_restore(old); if (fpu.preload) { new->thread.fpu_counter++; if (!use_eager_fpu() && fpu_lazy_restore(new, cpu)) diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index f3ced6f4b2b6f8f7d5e67b53658b603f58e20c76..5722ab6c7c3620fc6bef1971e90493e75ca85989 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c @@ -236,7 +236,7 @@ int init_fpu(struct task_struct *tsk) if (tsk_used_math(tsk)) { if (cpu_has_fpu && tsk == current) unlazy_fpu(tsk); - tsk->thread.fpu.last_cpu = ~0; + task_disable_lazy_fpu_restore(tsk); return 0; } diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index e127ddaa2d5ad3e139ff7a99fb5485b43fb19416..ce8b10351e28e836804df37ad409f9ad6f7d9190 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -68,8 +68,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) dst->thread.fpu_counter = 0; dst->thread.fpu.has_fpu = 0; - dst->thread.fpu.last_cpu = ~0; dst->thread.fpu.state = NULL; + task_disable_lazy_fpu_restore(dst); if (tsk_used_math(src)) { int err = fpu_alloc(&dst->thread.fpu); if (err)