提交 3ef2932b 编写于 作者: P Paul Mundt

sh64: Fix up the build for the thread_xstate changes.

This updates the sh64 processor info with the sh32 changes in order to
tie in to the generic task_xstate management code.
Signed-off-by: NPaul Mundt <lethal@linux-sh.org>
上级 cb6d0446
......@@ -98,9 +98,15 @@ extern struct sh_cpuinfo cpu_data[];
/* Forward decl */
struct seq_operations;
struct task_struct;
extern struct pt_regs fake_swapper_regs;
/* arch/sh/kernel/process.c */
extern unsigned int xstate_size;
extern void free_thread_xstate(struct task_struct *);
extern struct kmem_cache *task_xstate_cachep;
/* arch/sh/mm/init.c */
extern unsigned int mem_init_done;
......
......@@ -96,10 +96,6 @@ union thread_xstate {
struct sh_fpu_soft_struct softfpu;
};
extern unsigned int xstate_size;
extern void free_thread_xstate(struct task_struct *);
extern struct kmem_cache *task_xstate_cachep;
struct thread_struct {
/* Saved registers when thread is descheduled */
unsigned long sp;
......
......@@ -87,20 +87,21 @@ struct sh_fpu_hard_struct {
/* long status; * software status information */
};
#if 0
/* Dummy fpu emulator */
struct sh_fpu_soft_struct {
unsigned long long fp_regs[32];
unsigned long fp_regs[64];
unsigned int fpscr;
unsigned char lookahead;
unsigned long entry_pc;
};
#endif
union sh_fpu_union {
struct sh_fpu_hard_struct hard;
/* 'hard' itself only produces 32 bit alignment, yet we need
to access it using 64 bit load/store as well. */
union thread_xstate {
struct sh_fpu_hard_struct hardfpu;
struct sh_fpu_soft_struct softfpu;
/*
* The structure definitions only produce 32 bit alignment, yet we need
* to access them using 64 bit load/store as well.
*/
unsigned long long alignment_dummy;
};
......@@ -122,7 +123,7 @@ struct thread_struct {
/* Hardware debugging registers may come here */
/* floating point info */
union sh_fpu_union fpu;
union thread_xstate *xstate;
};
#define INIT_MMAP \
......@@ -137,7 +138,6 @@ struct thread_struct {
.trap_no = 0, \
.error_code = 0, \
.address = 0, \
.fpu = { { { 0, } }, } \
}
/*
......
......@@ -27,8 +27,8 @@
#define sNAN64 0xFFFFFFFFFFFFFFFFULL
#define sNAN32 0xFFFFFFFFUL
static union sh_fpu_union init_fpuregs = {
.hard = {
static union thread_xstate init_fpuregs = {
.hardfpu = {
.fp_regs = { [0 ... 63] = sNAN32 },
.fpscr = FPSCR_INIT
}
......@@ -72,7 +72,7 @@ void save_fpu(struct task_struct *tsk)
"fgetscr fr63\n\t"
"fst.s %0, (32*8), fr63\n\t"
: /* no output */
: "r" (&tsk->thread.fpu.hard)
: "r" (&tsk->thread.xstate->hardfpu)
: "memory");
}
......@@ -121,7 +121,7 @@ fpload(struct sh_fpu_hard_struct *fpregs)
void fpinit(struct sh_fpu_hard_struct *fpregs)
{
*fpregs = init_fpuregs.hard;
*fpregs = init_fpuregs.hardfpu;
}
asmlinkage void
......@@ -157,10 +157,10 @@ do_fpu_state_restore(unsigned long ex, struct pt_regs *regs)
last_task_used_math = current;
if (used_math()) {
fpload(&current->thread.fpu.hard);
fpload(&current->thread.xstate->hardfpu);
} else {
/* First time FPU user. */
fpload(&init_fpuregs.hard);
fpload(&init_fpuregs.hardfpu);
set_used_math();
}
disable_fpu();
......
......@@ -410,7 +410,7 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
regs->sr |= SR_FD;
}
memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
memcpy(fpu, &tsk->thread.xstate->hardfpu, sizeof(*fpu));
}
return fpvalid;
......
......@@ -88,7 +88,7 @@ get_fpu_long(struct task_struct *task, unsigned long addr)
regs->sr |= SR_FD;
}
tmp = ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)];
tmp = ((long *)task->thread.xstate)[addr / sizeof(unsigned long)];
return tmp;
}
......@@ -114,7 +114,7 @@ put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
if (!tsk_used_math(task)) {
fpinit(&task->thread.fpu.hard);
fpinit(&task->thread.xstate->hardfpu);
set_stopped_child_used_math(task);
} else if (last_task_used_math == task) {
enable_fpu();
......@@ -124,7 +124,7 @@ put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
regs->sr |= SR_FD;
}
((long *)&task->thread.fpu)[addr / sizeof(unsigned long)] = data;
((long *)task->thread.xstate)[addr / sizeof(unsigned long)] = data;
return 0;
}
......@@ -222,7 +222,7 @@ int fpregs_get(struct task_struct *target,
return ret;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.fpu.hard, 0, -1);
&target->thread.xstate->hardfpu, 0, -1);
}
static int fpregs_set(struct task_struct *target,
......@@ -239,7 +239,7 @@ static int fpregs_set(struct task_struct *target,
set_stopped_child_used_math(target);
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fpu.hard, 0, -1);
&target->thread.xstate->hardfpu, 0, -1);
}
static int fpregs_active(struct task_struct *target,
......
......@@ -295,7 +295,7 @@ restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
regs->sr |= SR_FD;
}
err |= __copy_from_user(&current->thread.fpu.hard, &sc->sc_fpregs[0],
err |= __copy_from_user(&current->thread.xstate->hardfpu, &sc->sc_fpregs[0],
(sizeof(long long) * 32) + (sizeof(int) * 1));
return err;
......@@ -320,7 +320,7 @@ setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
regs->sr |= SR_FD;
}
err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.fpu.hard,
err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.xstate->hardfpu,
(sizeof(long long) * 32) + (sizeof(int) * 1));
clear_used_math();
......
......@@ -611,19 +611,19 @@ static int misaligned_fpu_load(struct pt_regs *regs,
switch (width_shift) {
case 2:
current->thread.fpu.hard.fp_regs[destreg] = buflo;
current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
break;
case 3:
if (do_paired_load) {
current->thread.fpu.hard.fp_regs[destreg] = buflo;
current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
} else {
#if defined(CONFIG_CPU_LITTLE_ENDIAN)
current->thread.fpu.hard.fp_regs[destreg] = bufhi;
current->thread.fpu.hard.fp_regs[destreg+1] = buflo;
current->thread.xstate->hardfpu.fp_regs[destreg] = bufhi;
current->thread.xstate->hardfpu.fp_regs[destreg+1] = buflo;
#else
current->thread.fpu.hard.fp_regs[destreg] = buflo;
current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
#endif
}
break;
......@@ -681,19 +681,19 @@ static int misaligned_fpu_store(struct pt_regs *regs,
switch (width_shift) {
case 2:
buflo = current->thread.fpu.hard.fp_regs[srcreg];
buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
break;
case 3:
if (do_paired_load) {
buflo = current->thread.fpu.hard.fp_regs[srcreg];
bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
} else {
#if defined(CONFIG_CPU_LITTLE_ENDIAN)
bufhi = current->thread.fpu.hard.fp_regs[srcreg];
buflo = current->thread.fpu.hard.fp_regs[srcreg+1];
bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg];
buflo = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
#else
buflo = current->thread.fpu.hard.fp_regs[srcreg];
bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
#endif
}
break;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册