提交 58a992b9 编写于 作者: B Brian Gerst 提交者: H. Peter Anvin

x86-32, fpu: Rewrite fpu_save_init()

Rewrite fpu_save_init() to prepare for merging with 64-bit.
Signed-off-by: NBrian Gerst <brgerst@gmail.com>
Acked-by: NPekka Enberg <penberg@kernel.org>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
LKML-Reference: <1283563039-3466-12-git-send-email-brgerst@gmail.com>
Signed-off-by: NH. Peter Anvin <hpa@linux.intel.com>
上级 eec73f81
...@@ -73,6 +73,11 @@ static __always_inline __pure bool use_xsave(void) ...@@ -73,6 +73,11 @@ static __always_inline __pure bool use_xsave(void)
return static_cpu_has(X86_FEATURE_XSAVE); return static_cpu_has(X86_FEATURE_XSAVE);
} }
static __always_inline __pure bool use_fxsr(void)
{
return static_cpu_has(X86_FEATURE_FXSR);
}
extern void __sanitize_i387_state(struct task_struct *); extern void __sanitize_i387_state(struct task_struct *);
static inline void sanitize_i387_state(struct task_struct *tsk) static inline void sanitize_i387_state(struct task_struct *tsk)
...@@ -211,6 +216,12 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) ...@@ -211,6 +216,12 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
return 0; return 0;
} }
static inline void fpu_fxsave(struct fpu *fpu)
{
asm volatile("fxsave %[fx]"
: [fx] "=m" (fpu->state->fxsave));
}
/* We need a safe address that is cheap to find and that is already /* We need a safe address that is cheap to find and that is already
in L1 during context switch. The best choices are unfortunately in L1 during context switch. The best choices are unfortunately
different for UP and SMP */ different for UP and SMP */
...@@ -226,36 +237,24 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) ...@@ -226,36 +237,24 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
static inline void fpu_save_init(struct fpu *fpu) static inline void fpu_save_init(struct fpu *fpu)
{ {
if (use_xsave()) { if (use_xsave()) {
struct xsave_struct *xstate = &fpu->state->xsave;
struct i387_fxsave_struct *fx = &fpu->state->fxsave;
fpu_xsave(fpu); fpu_xsave(fpu);
/* /*
* xsave header may indicate the init state of the FP. * xsave header may indicate the init state of the FP.
*/ */
if (!(xstate->xsave_hdr.xstate_bv & XSTATE_FP)) if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
goto end; return;
} else if (use_fxsr()) {
if (unlikely(fx->swd & X87_FSW_ES)) fpu_fxsave(fpu);
asm volatile("fnclex"); } else {
asm volatile("fsave %[fx]; fwait"
/* : [fx] "=m" (fpu->state->fsave));
* we can do a simple return here or be paranoid :) return;
*/
goto clear_state;
} }
/* Use more nops than strictly needed in case the compiler if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES))
varies code */ asm volatile("fnclex");
alternative_input(
"fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4,
"fxsave %[fx]\n"
"bt $7,%[fsw] ; jnc 1f ; fnclex\n1:",
X86_FEATURE_FXSR,
[fx] "m" (fpu->state->fxsave),
[fsw] "m" (fpu->state->fxsave.swd) : "memory");
clear_state:
/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
is pending. Clear the x87 state here by setting it to fixed is pending. Clear the x87 state here by setting it to fixed
values. safe_address is a random variable that should be in L1 */ values. safe_address is a random variable that should be in L1 */
...@@ -265,8 +264,6 @@ static inline void fpu_save_init(struct fpu *fpu) ...@@ -265,8 +264,6 @@ static inline void fpu_save_init(struct fpu *fpu)
"fildl %[addr]", /* set F?P to defined value */ "fildl %[addr]", /* set F?P to defined value */
X86_FEATURE_FXSAVE_LEAK, X86_FEATURE_FXSAVE_LEAK,
[addr] "m" (safe_address)); [addr] "m" (safe_address));
end:
;
} }
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册