提交 e1884d69 编写于 作者: I Ingo Molnar

x86/fpu: Pass 'struct fpu' to fpu__restore()

This cleans up the call sites and the function a bit,
and also makes it more symmetric with the other high
level FPU state handling functions.

It's still only valid for the current task, as we copy
to the FPU registers of the current CPU.

No change in functionality.

Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: NIngo Molnar <mingo@kernel.org>
上级 32231879
...@@ -50,7 +50,7 @@ extern int fpu__exception_code(struct fpu *fpu, int trap_nr); ...@@ -50,7 +50,7 @@ extern int fpu__exception_code(struct fpu *fpu, int trap_nr);
extern void fpu__activate_curr(struct fpu *fpu); extern void fpu__activate_curr(struct fpu *fpu);
extern void fpu__activate_stopped(struct fpu *fpu); extern void fpu__activate_stopped(struct fpu *fpu);
extern void fpu__save(struct fpu *fpu); extern void fpu__save(struct fpu *fpu);
extern void fpu__restore(void); extern void fpu__restore(struct fpu *fpu);
extern int fpu__restore_sig(void __user *buf, int ia32_frame); extern int fpu__restore_sig(void __user *buf, int ia32_frame);
extern void fpu__drop(struct fpu *fpu); extern void fpu__drop(struct fpu *fpu);
extern int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu); extern int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu);
......
...@@ -343,11 +343,8 @@ void fpu__activate_stopped(struct fpu *child_fpu) ...@@ -343,11 +343,8 @@ void fpu__activate_stopped(struct fpu *child_fpu)
* with local interrupts disabled, as it is in the case of * with local interrupts disabled, as it is in the case of
* do_device_not_available()). * do_device_not_available()).
*/ */
void fpu__restore(void) void fpu__restore(struct fpu *fpu)
{ {
struct task_struct *tsk = current;
struct fpu *fpu = &tsk->thread.fpu;
fpu__activate_curr(fpu); fpu__activate_curr(fpu);
/* Avoid __kernel_fpu_begin() right after fpregs_activate() */ /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
...@@ -355,9 +352,9 @@ void fpu__restore(void) ...@@ -355,9 +352,9 @@ void fpu__restore(void)
fpregs_activate(fpu); fpregs_activate(fpu);
if (unlikely(copy_fpstate_to_fpregs(fpu))) { if (unlikely(copy_fpstate_to_fpregs(fpu))) {
fpu__clear(fpu); fpu__clear(fpu);
force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); force_sig_info(SIGSEGV, SEND_SIG_PRIV, current);
} else { } else {
tsk->thread.fpu.counter++; fpu->counter++;
} }
kernel_fpu_enable(); kernel_fpu_enable();
} }
......
...@@ -319,7 +319,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) ...@@ -319,7 +319,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
fpu->fpstate_active = 1; fpu->fpstate_active = 1;
if (use_eager_fpu()) { if (use_eager_fpu()) {
preempt_disable(); preempt_disable();
fpu__restore(); fpu__restore(fpu);
preempt_enable(); preempt_enable();
} }
......
...@@ -803,7 +803,7 @@ do_device_not_available(struct pt_regs *regs, long error_code) ...@@ -803,7 +803,7 @@ do_device_not_available(struct pt_regs *regs, long error_code)
return; return;
} }
#endif #endif
fpu__restore(); /* interrupts still off */ fpu__restore(&current->thread.fpu); /* interrupts still off */
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
conditional_sti(regs); conditional_sti(regs);
#endif #endif
......
...@@ -302,7 +302,7 @@ void lguest_arch_run_guest(struct lg_cpu *cpu) ...@@ -302,7 +302,7 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
* before this. * before this.
*/ */
else if (cpu->regs->trapnum == 7 && !fpregs_active()) else if (cpu->regs->trapnum == 7 && !fpregs_active())
fpu__restore(); fpu__restore(&current->thread.fpu);
} }
/*H:130 /*H:130
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册