提交 1193f408 编写于 作者: T Thomas Gleixner 提交者: Borislav Petkov

x86/fpu/signal: Change return type of __fpu_restore_sig() to boolean

Now that fpu__restore_sig() returns a boolean get rid of the individual
error codes in __fpu_restore_sig() as well.
Suggested-by: NAl Viro <viro@zeniv.linux.org.uk>
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
Signed-off-by: NBorislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/20210908132525.966197097@linutronix.de
上级 f3305be5
...@@ -309,8 +309,8 @@ static int restore_fpregs_from_user(void __user *buf, u64 xrestore, ...@@ -309,8 +309,8 @@ static int restore_fpregs_from_user(void __user *buf, u64 xrestore,
return 0; return 0;
} }
static int __fpu_restore_sig(void __user *buf, void __user *buf_fx, static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx,
bool ia32_fxstate) bool ia32_fxstate)
{ {
int state_size = fpu_kernel_xstate_size; int state_size = fpu_kernel_xstate_size;
struct task_struct *tsk = current; struct task_struct *tsk = current;
...@@ -318,14 +318,14 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx, ...@@ -318,14 +318,14 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
struct user_i387_ia32_struct env; struct user_i387_ia32_struct env;
u64 user_xfeatures = 0; u64 user_xfeatures = 0;
bool fx_only = false; bool fx_only = false;
int ret; bool success;
if (use_xsave()) { if (use_xsave()) {
struct _fpx_sw_bytes fx_sw_user; struct _fpx_sw_bytes fx_sw_user;
ret = check_xstate_in_sigframe(buf_fx, &fx_sw_user); if (check_xstate_in_sigframe(buf_fx, &fx_sw_user))
if (unlikely(ret)) return false;
return ret;
fx_only = !fx_sw_user.magic1; fx_only = !fx_sw_user.magic1;
state_size = fx_sw_user.xstate_size; state_size = fx_sw_user.xstate_size;
...@@ -341,8 +341,8 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx, ...@@ -341,8 +341,8 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
* faults. If it does, fall back to the slow path below, going * faults. If it does, fall back to the slow path below, going
* through the kernel buffer with the enabled pagefault handler. * through the kernel buffer with the enabled pagefault handler.
*/ */
return restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only, return !restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only,
state_size); state_size);
} }
/* /*
...@@ -350,9 +350,8 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx, ...@@ -350,9 +350,8 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
* to be ignored for histerical raisins. The legacy state is folded * to be ignored for histerical raisins. The legacy state is folded
* in once the larger state has been copied. * in once the larger state has been copied.
*/ */
ret = __copy_from_user(&env, buf, sizeof(env)); if (__copy_from_user(&env, buf, sizeof(env)))
if (ret) return false;
return ret;
/* /*
* By setting TIF_NEED_FPU_LOAD it is ensured that our xstate is * By setting TIF_NEED_FPU_LOAD it is ensured that our xstate is
...@@ -379,17 +378,16 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx, ...@@ -379,17 +378,16 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
fpregs_unlock(); fpregs_unlock();
if (use_xsave() && !fx_only) { if (use_xsave() && !fx_only) {
ret = copy_sigframe_from_user_to_xstate(&fpu->state.xsave, buf_fx); if (copy_sigframe_from_user_to_xstate(&fpu->state.xsave, buf_fx))
if (ret) return false;
return ret;
} else { } else {
if (__copy_from_user(&fpu->state.fxsave, buf_fx, if (__copy_from_user(&fpu->state.fxsave, buf_fx,
sizeof(fpu->state.fxsave))) sizeof(fpu->state.fxsave)))
return -EFAULT; return false;
/* Reject invalid MXCSR values. */ /* Reject invalid MXCSR values. */
if (fpu->state.fxsave.mxcsr & ~mxcsr_feature_mask) if (fpu->state.fxsave.mxcsr & ~mxcsr_feature_mask)
return -EINVAL; return false;
/* Enforce XFEATURE_MASK_FPSSE when XSAVE is enabled */ /* Enforce XFEATURE_MASK_FPSSE when XSAVE is enabled */
if (use_xsave()) if (use_xsave())
...@@ -413,17 +411,18 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx, ...@@ -413,17 +411,18 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
u64 mask = user_xfeatures | xfeatures_mask_supervisor(); u64 mask = user_xfeatures | xfeatures_mask_supervisor();
fpu->state.xsave.header.xfeatures &= mask; fpu->state.xsave.header.xfeatures &= mask;
ret = os_xrstor_safe(&fpu->state.xsave, xfeatures_mask_all) ? -EINVAL : 0; success = !os_xrstor_safe(&fpu->state.xsave, xfeatures_mask_all);
} else { } else {
ret = fxrstor_safe(&fpu->state.fxsave); success = !fxrstor_safe(&fpu->state.fxsave);
} }
if (likely(!ret)) if (likely(success))
fpregs_mark_activate(); fpregs_mark_activate();
fpregs_unlock(); fpregs_unlock();
return ret; return success;
} }
static inline int xstate_sigframe_size(void) static inline int xstate_sigframe_size(void)
{ {
return use_xsave() ? fpu_user_xstate_size + FP_XSTATE_MAGIC2_SIZE : return use_xsave() ? fpu_user_xstate_size + FP_XSTATE_MAGIC2_SIZE :
...@@ -467,7 +466,7 @@ bool fpu__restore_sig(void __user *buf, int ia32_frame) ...@@ -467,7 +466,7 @@ bool fpu__restore_sig(void __user *buf, int ia32_frame)
sizeof(struct user_i387_ia32_struct), sizeof(struct user_i387_ia32_struct),
NULL, buf); NULL, buf);
} else { } else {
success = !__fpu_restore_sig(buf, buf_fx, ia32_fxstate); success = __fpu_restore_sig(buf, buf_fx, ia32_fxstate);
} }
out: out:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册