From eb6a3251bfe34f327570993e9a95dbf3a592b912 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 23 Apr 2015 17:08:41 +0200 Subject: [PATCH] x86/fpu: Remove task_disable_lazy_fpu_restore() Replace task_disable_lazy_fpu_restore() with easier to read open-coded uses: we already update the fpu->last_cpu field explicitly in other cases. (This also removes yet another task_struct using FPU method.) Better explain the fpu::last_cpu field in the structure definition. Reviewed-by: Borislav Petkov Cc: Andy Lutomirski Cc: Dave Hansen Cc: Fenghua Yu Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/include/asm/fpu-internal.h | 14 ++------------ arch/x86/include/asm/fpu/types.h | 11 +++++++++++ arch/x86/kernel/fpu/core.c | 5 ++--- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index e8f7134f0ffb..76a1f3529881 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -74,16 +74,6 @@ static inline void __cpu_disable_lazy_restore(unsigned int cpu) per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL; } -/* - * Used to indicate that the FPU state in memory is newer than the FPU - * state in registers, and the FPU state should be reloaded next time the - * task is run. Only safe on the current task, or non-running tasks. - */ -static inline void task_disable_lazy_fpu_restore(struct task_struct *tsk) -{ - tsk->thread.fpu.last_cpu = ~0; -} - static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu) { return &new->thread.fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && @@ -430,7 +420,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta if (old_fpu->has_fpu) { if (!fpu_save_init(&old->thread.fpu)) - task_disable_lazy_fpu_restore(old); + old->thread.fpu.last_cpu = -1; else old->thread.fpu.last_cpu = cpu; @@ -446,7 +436,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta stts(); } else { old->thread.fpu.counter = 0; - task_disable_lazy_fpu_restore(old); + old->thread.fpu.last_cpu = -1; if (fpu.preload) { new->thread.fpu.counter++; if (fpu_lazy_restore(new, cpu)) diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h index f6317d9aa808..cad1c37d9ea2 100644 --- a/arch/x86/include/asm/fpu/types.h +++ b/arch/x86/include/asm/fpu/types.h @@ -125,7 +125,18 @@ union thread_xstate { }; struct fpu { + /* + * Records the last CPU on which this context was loaded into + * FPU registers. (In the lazy-switching case we might be + * able to reuse FPU registers across multiple context switches + * this way, if no intermediate task used the FPU.) + * + * A value of -1 is used to indicate that the FPU state in context + * memory is newer than the FPU state in registers, and that the + * FPU state should be reloaded next time the task is run. + */ unsigned int last_cpu; + unsigned int has_fpu; union thread_xstate *state; /* diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index ba539fc018d7..230e93783c99 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -242,8 +242,7 @@ int fpu__copy(struct task_struct *dst, struct task_struct *src) dst->thread.fpu.counter = 0; dst->thread.fpu.has_fpu = 0; dst->thread.fpu.state = NULL; - - task_disable_lazy_fpu_restore(dst); + dst->thread.fpu.last_cpu = -1; if (src_fpu->fpstate_active) { int err = fpstate_alloc(dst_fpu); @@ -319,7 +318,7 @@ static int fpu__unlazy_stopped(struct task_struct *child) return -EINVAL; if (child_fpu->fpstate_active) { - task_disable_lazy_fpu_restore(child); + child->thread.fpu.last_cpu = -1; return 0; } -- GitLab