diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 6193b7a9cf00bd68fba9527c00e991d9828fcc4e..da71d41227ff9a32c5ff46b125a76d699705a31b 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -141,7 +141,7 @@ static inline int copy_fxregs_to_user(struct fxregs_state __user *fx) return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx)); } -static inline int copy_kernel_to_fxregs(struct fxregs_state *fx) +static inline void copy_kernel_to_fxregs(struct fxregs_state *fx) { int err; @@ -157,8 +157,6 @@ static inline int copy_kernel_to_fxregs(struct fxregs_state *fx) } /* Copying from a kernel buffer to FPU registers should never fail: */ WARN_ON_FPU(err); - - return err; } static inline int copy_user_to_fxregs(struct fxregs_state __user *fx) @@ -173,13 +171,11 @@ static inline int copy_user_to_fxregs(struct fxregs_state __user *fx) "m" (*fx)); } -static inline int copy_kernel_to_fregs(struct fregs_state *fx) +static inline void copy_kernel_to_fregs(struct fregs_state *fx) { int err = check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); WARN_ON_FPU(err); - - return err; } static inline int copy_user_to_fregs(struct fregs_state __user *fx) @@ -450,20 +446,19 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu) return 0; } -static inline int __copy_kernel_to_fpregs(struct fpu *fpu) +static inline void __copy_kernel_to_fpregs(struct fpu *fpu) { if (use_xsave()) { copy_kernel_to_xregs(&fpu->state.xsave, -1); - return 0; } else { if (use_fxsr()) - return copy_kernel_to_fxregs(&fpu->state.fxsave); + copy_kernel_to_fxregs(&fpu->state.fxsave); else - return copy_kernel_to_fregs(&fpu->state.fsave); + copy_kernel_to_fregs(&fpu->state.fsave); } } -static inline int copy_kernel_to_fpregs(struct fpu *fpu) +static inline void copy_kernel_to_fpregs(struct fpu *fpu) { /* * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is @@ -478,7 +473,7 @@ static inline int copy_kernel_to_fpregs(struct fpu *fpu) : : [addr] "m" (fpu->fpregs_active)); } - return __copy_kernel_to_fpregs(fpu); + __copy_kernel_to_fpregs(fpu); } extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size); @@ -646,12 +641,8 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu) */ static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch) { - if (fpu_switch.preload) { - if (unlikely(copy_kernel_to_fpregs(new_fpu))) { - WARN_ON_FPU(1); - fpu__clear(new_fpu); - } - } + if (fpu_switch.preload) + copy_kernel_to_fpregs(new_fpu); } /* diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index e0e0ee565dc30fb533102300f5c2a8d20204e959..8470df44c06d1b45b928ad9e77b01214b53bca4c 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -126,12 +126,10 @@ void __kernel_fpu_end(void) { struct fpu *fpu = ¤t->thread.fpu; - if (fpu->fpregs_active) { - if (WARN_ON_FPU(copy_kernel_to_fpregs(fpu))) - fpu__clear(fpu); - } else { + if (fpu->fpregs_active) + copy_kernel_to_fpregs(fpu); + else __fpregs_deactivate_hw(); - } kernel_fpu_enable(); } @@ -370,14 +368,8 @@ void fpu__restore(struct fpu *fpu) /* Avoid __kernel_fpu_begin() right after fpregs_activate() */ kernel_fpu_disable(); fpregs_activate(fpu); - if (unlikely(copy_kernel_to_fpregs(fpu))) { - /* Copying the kernel state to FPU registers should never fail: */ - WARN_ON_FPU(1); - fpu__clear(fpu); - force_sig_info(SIGSEGV, SEND_SIG_PRIV, current); - } else { - fpu->counter++; - } + copy_kernel_to_fpregs(fpu); + fpu->counter++; kernel_fpu_enable(); } EXPORT_SYMBOL_GPL(fpu__restore);