提交 7ac59c62 编写于 作者: P Paul Mackerras

ppc: Fix various compile errors resulting from ptrace.c merge

This introduces flush_{fp,altivec,spe}_to_thread and fixes a
branch-too-far error in linking.
Signed-off-by: NPaul Mackerras <paulus@samba.org>
上级 3e63b9ec
...@@ -633,7 +633,8 @@ sigreturn_exit: ...@@ -633,7 +633,8 @@ sigreturn_exit:
rlwinm r12,r1,0,0,18 /* current_thread_info() */ rlwinm r12,r1,0,0,18 /* current_thread_info() */
lwz r9,TI_FLAGS(r12) lwz r9,TI_FLAGS(r12)
andi. r0,r9,_TIF_SYSCALL_T_OR_A andi. r0,r9,_TIF_SYSCALL_T_OR_A
bnel- do_syscall_trace_leave beq+ ret_from_except_full
bl do_syscall_trace_leave
/* fall through */ /* fall through */
.globl ret_from_except_full .globl ret_from_except_full
......
...@@ -152,18 +152,66 @@ int check_stack(struct task_struct *tsk) ...@@ -152,18 +152,66 @@ int check_stack(struct task_struct *tsk)
} }
#endif /* defined(CHECK_STACK) */ #endif /* defined(CHECK_STACK) */
#ifdef CONFIG_ALTIVEC /*
int * Make sure the floating-point register state in the
dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs) * the thread_struct is up to date for task tsk.
*/
void flush_fp_to_thread(struct task_struct *tsk)
{ {
if (regs->msr & MSR_VEC) if (tsk->thread.regs) {
giveup_altivec(current); /*
memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs)); * We need to disable preemption here because if we didn't,
* another process could get scheduled after the regs->msr
* test but before we have finished saving the FP registers
* to the thread_struct. That process could take over the
* FPU, and then when we get scheduled again we would store
* bogus values for the remaining FP registers.
*/
preempt_disable();
if (tsk->thread.regs->msr & MSR_FP) {
#ifdef CONFIG_SMP
/*
* This should only ever be called for current or
* for a stopped child process. Since we save away
* the FP register state on context switch on SMP,
* there is something wrong if a stopped child appears
* to still have its FP state in the CPU registers.
*/
BUG_ON(tsk != current);
#endif
giveup_fpu(current);
}
preempt_enable();
}
}
void enable_kernel_fp(void)
{
WARN_ON(preemptible());
#ifdef CONFIG_SMP
if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
giveup_fpu(current);
else
giveup_fpu(NULL); /* just enables FP for kernel */
#else
giveup_fpu(last_task_used_math);
#endif /* CONFIG_SMP */
}
EXPORT_SYMBOL(enable_kernel_fp);
int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
{
preempt_disable();
if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP))
giveup_fpu(tsk);
preempt_enable();
memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
return 1; return 1;
} }
void #ifdef CONFIG_ALTIVEC
enable_kernel_altivec(void) void enable_kernel_altivec(void)
{ {
WARN_ON(preemptible()); WARN_ON(preemptible());
...@@ -177,19 +225,35 @@ enable_kernel_altivec(void) ...@@ -177,19 +225,35 @@ enable_kernel_altivec(void)
#endif /* __SMP __ */ #endif /* __SMP __ */
} }
EXPORT_SYMBOL(enable_kernel_altivec); EXPORT_SYMBOL(enable_kernel_altivec);
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE /*
int * Make sure the VMX/Altivec register state in the
dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs) * the thread_struct is up to date for task tsk.
*/
void flush_altivec_to_thread(struct task_struct *tsk)
{ {
if (regs->msr & MSR_SPE) if (tsk->thread.regs) {
giveup_spe(current); preempt_disable();
/* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */ if (tsk->thread.regs->msr & MSR_VEC) {
memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35); #ifdef CONFIG_SMP
BUG_ON(tsk != current);
#endif
giveup_altivec(current);
}
preempt_enable();
}
}
int dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
{
if (regs->msr & MSR_VEC)
giveup_altivec(current);
memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
return 1; return 1;
} }
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
void void
enable_kernel_spe(void) enable_kernel_spe(void)
{ {
...@@ -205,34 +269,30 @@ enable_kernel_spe(void) ...@@ -205,34 +269,30 @@ enable_kernel_spe(void)
#endif /* __SMP __ */ #endif /* __SMP __ */
} }
EXPORT_SYMBOL(enable_kernel_spe); EXPORT_SYMBOL(enable_kernel_spe);
#endif /* CONFIG_SPE */
void void flush_spe_to_thread(struct task_struct *tsk)
enable_kernel_fp(void)
{ {
WARN_ON(preemptible()); if (tsk->thread.regs) {
preempt_disable();
if (tsk->thread.regs->msr & MSR_SPE) {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) BUG_ON(tsk != current);
giveup_fpu(current); #endif
else giveup_spe(current);
giveup_fpu(NULL); /* just enables FP for kernel */ }
#else preempt_enable();
giveup_fpu(last_task_used_math); }
#endif /* CONFIG_SMP */
} }
EXPORT_SYMBOL(enable_kernel_fp);
int int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
{ {
preempt_disable(); if (regs->msr & MSR_SPE)
if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP)) giveup_spe(current);
giveup_fpu(tsk); /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
preempt_enable(); memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
return 1; return 1;
} }
#endif /* CONFIG_SPE */
struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *new) struct task_struct *new)
......
...@@ -74,6 +74,7 @@ extern void read_rtc_time(void); ...@@ -74,6 +74,7 @@ extern void read_rtc_time(void);
extern void pmac_find_display(void); extern void pmac_find_display(void);
extern void giveup_fpu(struct task_struct *); extern void giveup_fpu(struct task_struct *);
extern void enable_kernel_fp(void); extern void enable_kernel_fp(void);
extern void flush_fp_to_thread(struct task_struct *);
extern void enable_kernel_altivec(void); extern void enable_kernel_altivec(void);
extern void giveup_altivec(struct task_struct *); extern void giveup_altivec(struct task_struct *);
extern void load_up_altivec(struct task_struct *); extern void load_up_altivec(struct task_struct *);
...@@ -83,6 +84,23 @@ extern void load_up_spe(struct task_struct *); ...@@ -83,6 +84,23 @@ extern void load_up_spe(struct task_struct *);
extern int fix_alignment(struct pt_regs *); extern int fix_alignment(struct pt_regs *);
extern void cvt_fd(float *from, double *to, unsigned long *fpscr); extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
extern void cvt_df(double *from, float *to, unsigned long *fpscr); extern void cvt_df(double *from, float *to, unsigned long *fpscr);
#ifdef CONFIG_ALTIVEC
extern void flush_altivec_to_thread(struct task_struct *);
#else
static inline void flush_altivec_to_thread(struct task_struct *t)
{
}
#endif
#ifdef CONFIG_SPE
extern void flush_spe_to_thread(struct task_struct *);
#else
static inline void flush_spe_to_thread(struct task_struct *t)
{
}
#endif
extern int call_rtas(const char *, int, int, unsigned long *, ...); extern int call_rtas(const char *, int, int, unsigned long *, ...);
extern void cacheable_memzero(void *p, unsigned int nb); extern void cacheable_memzero(void *p, unsigned int nb);
extern void *cacheable_memcpy(void *, const void *, unsigned int); extern void *cacheable_memcpy(void *, const void *, unsigned int);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册