提交 18461960 编写于 作者: P Paul Mackerras 提交者: Benjamin Herrenschmidt

powerpc: Provide for giveup_fpu/altivec to save state in alternate location

This provides a facility which is intended for use by KVM, where the
contents of the FP/VSX and VMX (Altivec) registers can be saved away
to somewhere other than the thread_struct when kernel code wants to
use floating point or VMX instructions.  This is done by providing a
pointer in the thread_struct to indicate where the state should be
saved to.  The giveup_fpu() and giveup_altivec() functions test these
pointers and save state to the indicated location if they are non-NULL.
Note that the MSR_FP/VEC bits in task->thread.regs->msr are still used
to indicate whether the CPU register state is live, even when an
alternate save location is being used.

This also provides load_fp_state() and load_vr_state() functions, which
load up FP/VSX and VMX state from memory into the CPU registers, and
corresponding store_fp_state() and store_vr_state() functions, which
store FP/VSX and VMX state into memory from the CPU registers.
Signed-off-by: NPaul Mackerras <paulus@samba.org>
Signed-off-by: NBenjamin Herrenschmidt <benh@kernel.crashing.org>
上级 de79f7b9
...@@ -211,6 +211,7 @@ struct thread_struct { ...@@ -211,6 +211,7 @@ struct thread_struct {
#endif #endif
#endif #endif
struct thread_fp_state fp_state; struct thread_fp_state fp_state;
struct thread_fp_state *fp_save_area;
int fpexc_mode; /* floating-point exception mode */ int fpexc_mode; /* floating-point exception mode */
unsigned int align_ctl; /* alignment handling control */ unsigned int align_ctl; /* alignment handling control */
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
...@@ -229,6 +230,7 @@ struct thread_struct { ...@@ -229,6 +230,7 @@ struct thread_struct {
unsigned long trap_nr; /* last trap # on this thread */ unsigned long trap_nr; /* last trap # on this thread */
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
struct thread_vr_state vr_state; struct thread_vr_state vr_state;
struct thread_vr_state *vr_save_area;
unsigned long vrsave; unsigned long vrsave;
int used_vr; /* set if process has used altivec */ int used_vr; /* set if process has used altivec */
#endif /* CONFIG_ALTIVEC */ #endif /* CONFIG_ALTIVEC */
...@@ -357,6 +359,11 @@ extern int set_endian(struct task_struct *tsk, unsigned int val); ...@@ -357,6 +359,11 @@ extern int set_endian(struct task_struct *tsk, unsigned int val);
extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr); extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val); extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
extern void load_fp_state(struct thread_fp_state *fp);
extern void store_fp_state(struct thread_fp_state *fp);
extern void load_vr_state(struct thread_vr_state *vr);
extern void store_vr_state(struct thread_vr_state *vr);
static inline unsigned int __unpack_fe01(unsigned long msr_bits) static inline unsigned int __unpack_fe01(unsigned long msr_bits)
{ {
return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8); return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
......
...@@ -91,9 +91,11 @@ int main(void) ...@@ -91,9 +91,11 @@ int main(void)
#endif #endif
DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode)); DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fp_state)); DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fp_state));
DEFINE(THREAD_FPSAVEAREA, offsetof(struct thread_struct, fp_save_area));
DEFINE(FPSTATE_FPSCR, offsetof(struct thread_fp_state, fpscr)); DEFINE(FPSTATE_FPSCR, offsetof(struct thread_fp_state, fpscr));
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
DEFINE(THREAD_VRSTATE, offsetof(struct thread_struct, vr_state)); DEFINE(THREAD_VRSTATE, offsetof(struct thread_struct, vr_state));
DEFINE(THREAD_VRSAVEAREA, offsetof(struct thread_struct, vr_save_area));
DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave)); DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave));
DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr)); DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
DEFINE(VRSTATE_VSCR, offsetof(struct thread_vr_state, vscr)); DEFINE(VRSTATE_VSCR, offsetof(struct thread_vr_state, vscr));
......
...@@ -80,6 +80,26 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) ...@@ -80,6 +80,26 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
blr blr
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
/*
* Load state from memory into FP registers including FPSCR.
* Assumes the caller has enabled FP in the MSR.
*/
_GLOBAL(load_fp_state)
lfd fr0,FPSTATE_FPSCR(r3)
MTFSF_L(fr0)
REST_32FPVSRS(0, R4, R3)
blr
/*
* Store FP state into memory, including FPSCR
* Assumes the caller has enabled FP in the MSR.
*/
_GLOBAL(store_fp_state)
SAVE_32FPVSRS(0, R4, R3)
mffs fr0
stfd fr0,FPSTATE_FPSCR(r3)
blr
/* /*
* This task wants to use the FPU now. * This task wants to use the FPU now.
* On UP, disable FP for the task which had the FPU previously, * On UP, disable FP for the task which had the FPU previously,
...@@ -172,9 +192,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) ...@@ -172,9 +192,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
PPC_LCMPI 0,r3,0 PPC_LCMPI 0,r3,0
beqlr- /* if no previous owner, done */ beqlr- /* if no previous owner, done */
addi r3,r3,THREAD /* want THREAD of task */ addi r3,r3,THREAD /* want THREAD of task */
PPC_LL r6,THREAD_FPSAVEAREA(r3)
PPC_LL r5,PT_REGS(r3) PPC_LL r5,PT_REGS(r3)
PPC_LCMPI 0,r5,0 PPC_LCMPI 0,r6,0
bne 2f
addi r6,r3,THREAD_FPSTATE addi r6,r3,THREAD_FPSTATE
2: PPC_LCMPI 0,r5,0
SAVE_32FPVSRS(0, R4, R6) SAVE_32FPVSRS(0, R4, R6)
mffs fr0 mffs fr0
stfd fr0,FPSTATE_FPSCR(r6) stfd fr0,FPSTATE_FPSCR(r6)
......
...@@ -98,9 +98,13 @@ EXPORT_SYMBOL(start_thread); ...@@ -98,9 +98,13 @@ EXPORT_SYMBOL(start_thread);
#ifdef CONFIG_PPC_FPU #ifdef CONFIG_PPC_FPU
EXPORT_SYMBOL(giveup_fpu); EXPORT_SYMBOL(giveup_fpu);
EXPORT_SYMBOL(load_fp_state);
EXPORT_SYMBOL(store_fp_state);
#endif #endif
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
EXPORT_SYMBOL(giveup_altivec); EXPORT_SYMBOL(giveup_altivec);
EXPORT_SYMBOL(load_vr_state);
EXPORT_SYMBOL(store_vr_state);
#endif /* CONFIG_ALTIVEC */ #endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX #ifdef CONFIG_VSX
EXPORT_SYMBOL(giveup_vsx); EXPORT_SYMBOL(giveup_vsx);
......
...@@ -1008,6 +1008,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, ...@@ -1008,6 +1008,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
p->thread.ptrace_bps[0] = NULL; p->thread.ptrace_bps[0] = NULL;
#endif #endif
p->thread.fp_save_area = NULL;
#ifdef CONFIG_ALTIVEC
p->thread.vr_save_area = NULL;
#endif
#ifdef CONFIG_PPC_STD_MMU_64 #ifdef CONFIG_PPC_STD_MMU_64
if (mmu_has_feature(MMU_FTR_SLB)) { if (mmu_has_feature(MMU_FTR_SLB)) {
unsigned long sp_vsid; unsigned long sp_vsid;
...@@ -1114,9 +1119,11 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) ...@@ -1114,9 +1119,11 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
current->thread.used_vsr = 0; current->thread.used_vsr = 0;
#endif #endif
memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state)); memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
current->thread.fp_save_area = NULL;
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state)); memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state));
current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */ current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
current->thread.vr_save_area = NULL;
current->thread.vrsave = 0; current->thread.vrsave = 0;
current->thread.used_vr = 0; current->thread.used_vr = 0;
#endif /* CONFIG_ALTIVEC */ #endif /* CONFIG_ALTIVEC */
......
...@@ -36,6 +36,28 @@ _GLOBAL(do_load_up_transact_altivec) ...@@ -36,6 +36,28 @@ _GLOBAL(do_load_up_transact_altivec)
blr blr
#endif #endif
/*
* Load state from memory into VMX registers including VSCR.
* Assumes the caller has enabled VMX in the MSR.
*/
_GLOBAL(load_vr_state)
li r4,VRSTATE_VSCR
lvx vr0,r4,r3
mtvscr vr0
REST_32VRS(0,r4,r3)
blr
/*
* Store VMX state into memory, including VSCR.
* Assumes the caller has enabled VMX in the MSR.
*/
_GLOBAL(store_vr_state)
SAVE_32VRS(0, r4, r3)
mfvscr vr0
li r4, VRSTATE_VSCR
stvx vr0, r4, r3
blr
/* /*
* Disable VMX for the task which had it previously, * Disable VMX for the task which had it previously,
* and save its vector registers in its thread_struct. * and save its vector registers in its thread_struct.
...@@ -144,9 +166,12 @@ _GLOBAL(giveup_altivec) ...@@ -144,9 +166,12 @@ _GLOBAL(giveup_altivec)
PPC_LCMPI 0,r3,0 PPC_LCMPI 0,r3,0
beqlr /* if no previous owner, done */ beqlr /* if no previous owner, done */
addi r3,r3,THREAD /* want THREAD of task */ addi r3,r3,THREAD /* want THREAD of task */
addi r7,r3,THREAD_VRSTATE PPC_LL r7,THREAD_VRSAVEAREA(r3)
PPC_LL r5,PT_REGS(r3) PPC_LL r5,PT_REGS(r3)
PPC_LCMPI 0,r5,0 PPC_LCMPI 0,r7,0
bne 2f
addi r7,r3,THREAD_VRSTATE
2: PPC_LCMPI 0,r5,0
SAVE_32VRS(0,r4,r7) SAVE_32VRS(0,r4,r7)
mfvscr vr0 mfvscr vr0
li r4,VRSTATE_VSCR li r4,VRSTATE_VSCR
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册