提交 47455911 编写于 作者: L Linus Torvalds

Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus

* 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus:
  [MIPS] Kconfig: Move missplaced NR_CPUS default from SMTC to VSMP.
  [MIPS] Lockdep: Fix recursion bug.
  [MIPS] RTLX: Handle copy_*_user return values.
  [MIPS] RTLX: Protect rtlx_{read,write} with mutex.
  [MIPS] RTLX: Harden against compiler reordering and optimization.
  [MIPS] RTLX: Don't use volatile; it's fragile.
  [MIPS] Lasat: Downgrade 64-bit kernel from experimental to broken.
  [MIPS] Compat: Fix build if CONFIG_SYSVIPC is disabled.
  [CHAR] lcd: Fix two warnings.
  [MIPS] FPU ownership management & preemption fixes
  [MIPS] Check FCSR for pending interrupts, alternative version
  [MIPS] IP27, IP35: Fix warnings.
...@@ -250,7 +250,7 @@ config LASAT ...@@ -250,7 +250,7 @@ config LASAT
select R5000_CPU_SCACHE select R5000_CPU_SCACHE
select SYS_HAS_CPU_R5000 select SYS_HAS_CPU_R5000
select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_64BIT_KERNEL if EXPERIMENTAL select SYS_SUPPORTS_64BIT_KERNEL if BROKEN
select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_SUPPORTS_LITTLE_ENDIAN
select GENERIC_HARDIRQS_NO__DO_IRQ select GENERIC_HARDIRQS_NO__DO_IRQ
...@@ -1559,6 +1559,7 @@ config MIPS_MT_SMP ...@@ -1559,6 +1559,7 @@ config MIPS_MT_SMP
select CPU_MIPSR2_IRQ_VI select CPU_MIPSR2_IRQ_VI
select CPU_MIPSR2_SRS select CPU_MIPSR2_SRS
select MIPS_MT select MIPS_MT
select NR_CPUS_DEFAULT_2
select SMP select SMP
select SYS_SUPPORTS_SMP select SYS_SUPPORTS_SMP
help help
...@@ -1573,7 +1574,6 @@ config MIPS_MT_SMTC ...@@ -1573,7 +1574,6 @@ config MIPS_MT_SMTC
select CPU_MIPSR2_IRQ_VI select CPU_MIPSR2_IRQ_VI
select CPU_MIPSR2_SRS select CPU_MIPSR2_SRS
select MIPS_MT select MIPS_MT
select NR_CPUS_DEFAULT_2
select NR_CPUS_DEFAULT_8 select NR_CPUS_DEFAULT_8
select SMP select SMP
select SYS_SUPPORTS_SMP select SYS_SUPPORTS_SMP
......
...@@ -191,6 +191,8 @@ void sp_work_handle_request(void) ...@@ -191,6 +191,8 @@ void sp_work_handle_request(void)
struct mtsp_syscall_generic generic; struct mtsp_syscall_generic generic;
struct mtsp_syscall_ret ret; struct mtsp_syscall_ret ret;
struct kspd_notifications *n; struct kspd_notifications *n;
unsigned long written;
mm_segment_t old_fs;
struct timeval tv; struct timeval tv;
struct timezone tz; struct timezone tz;
int cmd; int cmd;
...@@ -201,7 +203,11 @@ void sp_work_handle_request(void) ...@@ -201,7 +203,11 @@ void sp_work_handle_request(void)
ret.retval = -1; ret.retval = -1;
if (!rtlx_read(RTLX_CHANNEL_SYSIO, &sc, sizeof(struct mtsp_syscall), 0)) { old_fs = get_fs();
set_fs(KERNEL_DS);
if (!rtlx_read(RTLX_CHANNEL_SYSIO, &sc, sizeof(struct mtsp_syscall))) {
set_fs(old_fs);
printk(KERN_ERR "Expected request but nothing to read\n"); printk(KERN_ERR "Expected request but nothing to read\n");
return; return;
} }
...@@ -209,7 +215,8 @@ void sp_work_handle_request(void) ...@@ -209,7 +215,8 @@ void sp_work_handle_request(void)
size = sc.size; size = sc.size;
if (size) { if (size) {
if (!rtlx_read(RTLX_CHANNEL_SYSIO, &generic, size, 0)) { if (!rtlx_read(RTLX_CHANNEL_SYSIO, &generic, size)) {
set_fs(old_fs);
printk(KERN_ERR "Expected request but nothing to read\n"); printk(KERN_ERR "Expected request but nothing to read\n");
return; return;
} }
...@@ -282,8 +289,11 @@ void sp_work_handle_request(void) ...@@ -282,8 +289,11 @@ void sp_work_handle_request(void)
if (vpe_getuid(SP_VPE)) if (vpe_getuid(SP_VPE))
sp_setfsuidgid( 0, 0); sp_setfsuidgid( 0, 0);
if ((rtlx_write(RTLX_CHANNEL_SYSIO, &ret, sizeof(struct mtsp_syscall_ret), 0)) old_fs = get_fs();
< sizeof(struct mtsp_syscall_ret)) set_fs(KERNEL_DS);
written = rtlx_write(RTLX_CHANNEL_SYSIO, &ret, sizeof(ret));
set_fs(old_fs);
if (written < sizeof(ret))
printk("KSPD: sp_work_handle_request failed to send to SP\n"); printk("KSPD: sp_work_handle_request failed to send to SP\n");
} }
......
...@@ -311,6 +311,8 @@ asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid, ...@@ -311,6 +311,8 @@ asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid,
return ret; return ret;
} }
#ifdef CONFIG_SYSVIPC
asmlinkage long asmlinkage long
sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth) sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
{ {
...@@ -368,6 +370,16 @@ sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth) ...@@ -368,6 +370,16 @@ sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
return err; return err;
} }
#else
asmlinkage long
sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
{
return -ENOSYS;
}
#endif /* CONFIG_SYSVIPC */
#ifdef CONFIG_MIPS32_N32 #ifdef CONFIG_MIPS32_N32
asmlinkage long sysn32_semctl(int semid, int semnum, int cmd, u32 arg) asmlinkage long sysn32_semctl(int semid, int semnum, int cmd, u32 arg)
{ {
......
...@@ -49,8 +49,7 @@ LEAF(resume) ...@@ -49,8 +49,7 @@ LEAF(resume)
#ifndef CONFIG_CPU_HAS_LLSC #ifndef CONFIG_CPU_HAS_LLSC
sw zero, ll_bit sw zero, ll_bit
#endif #endif
mfc0 t1, CP0_STATUS mfc0 t2, CP0_STATUS
sw t1, THREAD_STATUS(a0)
cpu_save_nonscratch a0 cpu_save_nonscratch a0
sw ra, THREAD_REG31(a0) sw ra, THREAD_REG31(a0)
...@@ -60,8 +59,8 @@ LEAF(resume) ...@@ -60,8 +59,8 @@ LEAF(resume)
lw t3, TASK_THREAD_INFO(a0) lw t3, TASK_THREAD_INFO(a0)
lw t0, TI_FLAGS(t3) lw t0, TI_FLAGS(t3)
li t1, _TIF_USEDFPU li t1, _TIF_USEDFPU
and t2, t0, t1 and t1, t0
beqz t2, 1f beqz t1, 1f
nor t1, zero, t1 nor t1, zero, t1
and t0, t0, t1 and t0, t0, t1
...@@ -74,10 +73,13 @@ LEAF(resume) ...@@ -74,10 +73,13 @@ LEAF(resume)
li t1, ~ST0_CU1 li t1, ~ST0_CU1
and t0, t0, t1 and t0, t0, t1
sw t0, ST_OFF(t3) sw t0, ST_OFF(t3)
/* clear thread_struct CU1 bit */
and t2, t1
fpu_save_single a0, t0 # clobbers t0 fpu_save_single a0, t0 # clobbers t0
1: 1:
sw t2, THREAD_STATUS(a0)
/* /*
* The order of restoring the registers takes care of the race * The order of restoring the registers takes care of the race
* updating $28, $29 and kernelsp without disabling ints. * updating $28, $29 and kernelsp without disabling ints.
......
...@@ -114,14 +114,6 @@ LEAF(_save_fp_context32) ...@@ -114,14 +114,6 @@ LEAF(_save_fp_context32)
*/ */
LEAF(_restore_fp_context) LEAF(_restore_fp_context)
EX lw t0, SC_FPC_CSR(a0) EX lw t0, SC_FPC_CSR(a0)
/* Fail if the CSR has exceptions pending */
srl t1, t0, 5
and t1, t0
andi t1, 0x1f << 7
bnez t1, fault
nop
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
EX ldc1 $f1, SC_FPREGS+8(a0) EX ldc1 $f1, SC_FPREGS+8(a0)
EX ldc1 $f3, SC_FPREGS+24(a0) EX ldc1 $f3, SC_FPREGS+24(a0)
...@@ -165,14 +157,6 @@ LEAF(_restore_fp_context) ...@@ -165,14 +157,6 @@ LEAF(_restore_fp_context)
LEAF(_restore_fp_context32) LEAF(_restore_fp_context32)
/* Restore an o32 sigcontext. */ /* Restore an o32 sigcontext. */
EX lw t0, SC32_FPC_CSR(a0) EX lw t0, SC32_FPC_CSR(a0)
/* Fail if the CSR has exceptions pending */
srl t1, t0, 5
and t1, t0
andi t1, 0x1f << 7
bnez t1, fault
nop
EX ldc1 $f0, SC32_FPREGS+0(a0) EX ldc1 $f0, SC32_FPREGS+0(a0)
EX ldc1 $f2, SC32_FPREGS+16(a0) EX ldc1 $f2, SC32_FPREGS+16(a0)
EX ldc1 $f4, SC32_FPREGS+32(a0) EX ldc1 $f4, SC32_FPREGS+32(a0)
......
...@@ -48,8 +48,7 @@ ...@@ -48,8 +48,7 @@
#ifndef CONFIG_CPU_HAS_LLSC #ifndef CONFIG_CPU_HAS_LLSC
sw zero, ll_bit sw zero, ll_bit
#endif #endif
mfc0 t1, CP0_STATUS mfc0 t2, CP0_STATUS
LONG_S t1, THREAD_STATUS(a0)
cpu_save_nonscratch a0 cpu_save_nonscratch a0
LONG_S ra, THREAD_REG31(a0) LONG_S ra, THREAD_REG31(a0)
...@@ -59,8 +58,8 @@ ...@@ -59,8 +58,8 @@
PTR_L t3, TASK_THREAD_INFO(a0) PTR_L t3, TASK_THREAD_INFO(a0)
LONG_L t0, TI_FLAGS(t3) LONG_L t0, TI_FLAGS(t3)
li t1, _TIF_USEDFPU li t1, _TIF_USEDFPU
and t2, t0, t1 and t1, t0
beqz t2, 1f beqz t1, 1f
nor t1, zero, t1 nor t1, zero, t1
and t0, t0, t1 and t0, t0, t1
...@@ -73,10 +72,13 @@ ...@@ -73,10 +72,13 @@
li t1, ~ST0_CU1 li t1, ~ST0_CU1
and t0, t0, t1 and t0, t0, t1
LONG_S t0, ST_OFF(t3) LONG_S t0, ST_OFF(t3)
/* clear thread_struct CU1 bit */
and t2, t1
fpu_save_double a0 t0 t1 # c0_status passed in t0 fpu_save_double a0 t0 t1 # c0_status passed in t0
# clobbers t1 # clobbers t1
1: 1:
LONG_S t2, THREAD_STATUS(a0)
/* /*
* The order of restoring the registers takes care of the race * The order of restoring the registers takes care of the race
......
...@@ -54,6 +54,7 @@ static struct chan_waitqueues { ...@@ -54,6 +54,7 @@ static struct chan_waitqueues {
wait_queue_head_t rt_queue; wait_queue_head_t rt_queue;
wait_queue_head_t lx_queue; wait_queue_head_t lx_queue;
atomic_t in_open; atomic_t in_open;
struct mutex mutex;
} channel_wqs[RTLX_CHANNELS]; } channel_wqs[RTLX_CHANNELS];
static struct irqaction irq; static struct irqaction irq;
...@@ -146,7 +147,7 @@ static void stopping(int vpe) ...@@ -146,7 +147,7 @@ static void stopping(int vpe)
int rtlx_open(int index, int can_sleep) int rtlx_open(int index, int can_sleep)
{ {
volatile struct rtlx_info **p; struct rtlx_info **p;
struct rtlx_channel *chan; struct rtlx_channel *chan;
enum rtlx_state state; enum rtlx_state state;
int ret = 0; int ret = 0;
...@@ -179,13 +180,24 @@ int rtlx_open(int index, int can_sleep) ...@@ -179,13 +180,24 @@ int rtlx_open(int index, int can_sleep)
} }
} }
smp_rmb();
if (*p == NULL) { if (*p == NULL) {
if (can_sleep) { if (can_sleep) {
__wait_event_interruptible(channel_wqs[index].lx_queue, DEFINE_WAIT(wait);
*p != NULL,
ret); for (;;) {
if (ret) prepare_to_wait(&channel_wqs[index].lx_queue, &wait, TASK_INTERRUPTIBLE);
smp_rmb();
if (*p != NULL)
break;
if (!signal_pending(current)) {
schedule();
continue;
}
ret = -ERESTARTSYS;
goto out_fail; goto out_fail;
}
finish_wait(&channel_wqs[index].lx_queue, &wait);
} else { } else {
printk(" *vpe_get_shared is NULL. " printk(" *vpe_get_shared is NULL. "
"Has an SP program been loaded?\n"); "Has an SP program been loaded?\n");
...@@ -277,56 +289,52 @@ unsigned int rtlx_write_poll(int index) ...@@ -277,56 +289,52 @@ unsigned int rtlx_write_poll(int index)
return write_spacefree(chan->rt_read, chan->rt_write, chan->buffer_size); return write_spacefree(chan->rt_read, chan->rt_write, chan->buffer_size);
} }
static inline void copy_to(void *dst, void *src, size_t count, int user) ssize_t rtlx_read(int index, void __user *buff, size_t count, int user)
{ {
if (user) size_t lx_write, fl = 0L;
copy_to_user(dst, src, count);
else
memcpy(dst, src, count);
}
static inline void copy_from(void *dst, void *src, size_t count, int user)
{
if (user)
copy_from_user(dst, src, count);
else
memcpy(dst, src, count);
}
ssize_t rtlx_read(int index, void *buff, size_t count, int user)
{
size_t fl = 0L;
struct rtlx_channel *lx; struct rtlx_channel *lx;
unsigned long failed;
if (rtlx == NULL) if (rtlx == NULL)
return -ENOSYS; return -ENOSYS;
lx = &rtlx->channel[index]; lx = &rtlx->channel[index];
mutex_lock(&channel_wqs[index].mutex);
smp_rmb();
lx_write = lx->lx_write;
/* find out how much in total */ /* find out how much in total */
count = min(count, count = min(count,
(size_t)(lx->lx_write + lx->buffer_size - lx->lx_read) (size_t)(lx_write + lx->buffer_size - lx->lx_read)
% lx->buffer_size); % lx->buffer_size);
/* then how much from the read pointer onwards */ /* then how much from the read pointer onwards */
fl = min( count, (size_t)lx->buffer_size - lx->lx_read); fl = min(count, (size_t)lx->buffer_size - lx->lx_read);
copy_to(buff, &lx->lx_buffer[lx->lx_read], fl, user); failed = copy_to_user(buff, lx->lx_buffer + lx->lx_read, fl);
if (failed)
goto out;
/* and if there is anything left at the beginning of the buffer */ /* and if there is anything left at the beginning of the buffer */
if ( count - fl ) if (count - fl)
copy_to (buff + fl, lx->lx_buffer, count - fl, user); failed = copy_to_user(buff + fl, lx->lx_buffer, count - fl);
out:
count -= failed;
/* update the index */ smp_wmb();
lx->lx_read += count; lx->lx_read = (lx->lx_read + count) % lx->buffer_size;
lx->lx_read %= lx->buffer_size; smp_wmb();
mutex_unlock(&channel_wqs[index].mutex);
return count; return count;
} }
ssize_t rtlx_write(int index, void *buffer, size_t count, int user) ssize_t rtlx_write(int index, const void __user *buffer, size_t count, int user)
{ {
struct rtlx_channel *rt; struct rtlx_channel *rt;
size_t rt_read;
size_t fl; size_t fl;
if (rtlx == NULL) if (rtlx == NULL)
...@@ -334,24 +342,35 @@ ssize_t rtlx_write(int index, void *buffer, size_t count, int user) ...@@ -334,24 +342,35 @@ ssize_t rtlx_write(int index, void *buffer, size_t count, int user)
rt = &rtlx->channel[index]; rt = &rtlx->channel[index];
mutex_lock(&channel_wqs[index].mutex);
smp_rmb();
rt_read = rt->rt_read;
/* total number of bytes to copy */ /* total number of bytes to copy */
count = min(count, count = min(count,
(size_t)write_spacefree(rt->rt_read, rt->rt_write, (size_t)write_spacefree(rt_read, rt->rt_write, rt->buffer_size));
rt->buffer_size));
/* first bit from write pointer to the end of the buffer, or count */ /* first bit from write pointer to the end of the buffer, or count */
fl = min(count, (size_t) rt->buffer_size - rt->rt_write); fl = min(count, (size_t) rt->buffer_size - rt->rt_write);
copy_from (&rt->rt_buffer[rt->rt_write], buffer, fl, user); failed = copy_from_user(rt->rt_buffer + rt->rt_write, buffer, fl);
if (failed)
goto out;
/* if there's any left copy to the beginning of the buffer */ /* if there's any left copy to the beginning of the buffer */
if( count - fl ) if (count - fl) {
copy_from (rt->rt_buffer, buffer + fl, count - fl, user); failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl);
}
rt->rt_write += count; out:
rt->rt_write %= rt->buffer_size; count -= cailed;
return(count); smp_wmb();
rt->rt_write = (rt->rt_write + count) % rt->buffer_size;
smp_wmb();
mutex_unlock(&channel_wqs[index].mutex);
return count;
} }
...@@ -403,7 +422,7 @@ static ssize_t file_read(struct file *file, char __user * buffer, size_t count, ...@@ -403,7 +422,7 @@ static ssize_t file_read(struct file *file, char __user * buffer, size_t count,
return 0; // -EAGAIN makes cat whinge return 0; // -EAGAIN makes cat whinge
} }
return rtlx_read(minor, buffer, count, 1); return rtlx_read(minor, buffer, count);
} }
static ssize_t file_write(struct file *file, const char __user * buffer, static ssize_t file_write(struct file *file, const char __user * buffer,
...@@ -429,7 +448,7 @@ static ssize_t file_write(struct file *file, const char __user * buffer, ...@@ -429,7 +448,7 @@ static ssize_t file_write(struct file *file, const char __user * buffer,
return ret; return ret;
} }
return rtlx_write(minor, (void *)buffer, count, 1); return rtlx_write(minor, buffer, count);
} }
static const struct file_operations rtlx_fops = { static const struct file_operations rtlx_fops = {
...@@ -468,6 +487,7 @@ static int rtlx_module_init(void) ...@@ -468,6 +487,7 @@ static int rtlx_module_init(void)
init_waitqueue_head(&channel_wqs[i].rt_queue); init_waitqueue_head(&channel_wqs[i].rt_queue);
init_waitqueue_head(&channel_wqs[i].lx_queue); init_waitqueue_head(&channel_wqs[i].lx_queue);
atomic_set(&channel_wqs[i].in_open, 0); atomic_set(&channel_wqs[i].in_open, 0);
mutex_init(&channel_wqs[i].mutex);
dev = device_create(mt_class, NULL, MKDEV(major, i), dev = device_create(mt_class, NULL, MKDEV(major, i),
"%s%d", module_name, i); "%s%d", module_name, i);
......
...@@ -31,4 +31,7 @@ extern void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, ...@@ -31,4 +31,7 @@ extern void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
*/ */
extern int install_sigtramp(unsigned int __user *tramp, unsigned int syscall); extern int install_sigtramp(unsigned int __user *tramp, unsigned int syscall);
/* Check and clear pending FPU exceptions in saved CSR */
extern int fpcsr_pending(unsigned int __user *fpcsr);
#endif /* __SIGNAL_COMMON_H */ #endif /* __SIGNAL_COMMON_H */
...@@ -82,6 +82,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) ...@@ -82,6 +82,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
{ {
int err = 0; int err = 0;
int i; int i;
unsigned int used_math;
err |= __put_user(regs->cp0_epc, &sc->sc_pc); err |= __put_user(regs->cp0_epc, &sc->sc_pc);
...@@ -104,26 +105,53 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) ...@@ -104,26 +105,53 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
} }
err |= __put_user(!!used_math(), &sc->sc_used_math); used_math = !!used_math();
err |= __put_user(used_math, &sc->sc_used_math);
if (used_math()) { if (used_math) {
/* /*
* Save FPU state to signal context. Signal handler * Save FPU state to signal context. Signal handler
* will "inherit" current FPU state. * will "inherit" current FPU state.
*/ */
preempt_disable(); own_fpu(1);
enable_fp_in_kernel();
if (!is_fpu_owner()) {
own_fpu();
restore_fp(current);
}
err |= save_fp_context(sc); err |= save_fp_context(sc);
disable_fp_in_kernel();
preempt_enable();
} }
return err; return err;
} }
int fpcsr_pending(unsigned int __user *fpcsr)
{
int err, sig = 0;
unsigned int csr, enabled;
err = __get_user(csr, fpcsr);
enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5);
/*
* If the signal handler set some FPU exceptions, clear it and
* send SIGFPE.
*/
if (csr & enabled) {
csr &= ~enabled;
err |= __put_user(csr, fpcsr);
sig = SIGFPE;
}
return err ?: sig;
}
static int
check_and_restore_fp_context(struct sigcontext __user *sc)
{
int err, sig;
err = sig = fpcsr_pending(&sc->sc_fpc_csr);
if (err > 0)
err = 0;
err |= restore_fp_context(sc);
return err ?: sig;
}
int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
{ {
unsigned int used_math; unsigned int used_math;
...@@ -157,19 +185,18 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) ...@@ -157,19 +185,18 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
err |= __get_user(used_math, &sc->sc_used_math); err |= __get_user(used_math, &sc->sc_used_math);
conditional_used_math(used_math); conditional_used_math(used_math);
preempt_disable(); if (used_math) {
if (used_math()) {
/* restore fpu context if we have used it before */ /* restore fpu context if we have used it before */
own_fpu(); own_fpu(0);
err |= restore_fp_context(sc); enable_fp_in_kernel();
if (!err)
err = check_and_restore_fp_context(sc);
disable_fp_in_kernel();
} else { } else {
/* signal handler may have used FPU. Give it up. */ /* signal handler may have used FPU. Give it up. */
lose_fpu(); lose_fpu(0);
} }
preempt_enable();
return err; return err;
} }
...@@ -332,6 +359,7 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs) ...@@ -332,6 +359,7 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
{ {
struct sigframe __user *frame; struct sigframe __user *frame;
sigset_t blocked; sigset_t blocked;
int sig;
frame = (struct sigframe __user *) regs.regs[29]; frame = (struct sigframe __user *) regs.regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
...@@ -345,8 +373,11 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs) ...@@ -345,8 +373,11 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
recalc_sigpending(); recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock); spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext(&regs, &frame->sf_sc)) sig = restore_sigcontext(&regs, &frame->sf_sc);
if (sig < 0)
goto badframe; goto badframe;
else if (sig)
force_sig(sig, current);
/* /*
* Don't let your children do this ... * Don't let your children do this ...
...@@ -368,6 +399,7 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) ...@@ -368,6 +399,7 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
struct rt_sigframe __user *frame; struct rt_sigframe __user *frame;
sigset_t set; sigset_t set;
stack_t st; stack_t st;
int sig;
frame = (struct rt_sigframe __user *) regs.regs[29]; frame = (struct rt_sigframe __user *) regs.regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
...@@ -381,8 +413,11 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) ...@@ -381,8 +413,11 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
recalc_sigpending(); recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock); spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext)) sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
if (sig < 0)
goto badframe; goto badframe;
else if (sig)
force_sig(sig, current);
if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st))) if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st)))
goto badframe; goto badframe;
......
...@@ -181,6 +181,7 @@ static int setup_sigcontext32(struct pt_regs *regs, ...@@ -181,6 +181,7 @@ static int setup_sigcontext32(struct pt_regs *regs,
{ {
int err = 0; int err = 0;
int i; int i;
u32 used_math;
err |= __put_user(regs->cp0_epc, &sc->sc_pc); err |= __put_user(regs->cp0_epc, &sc->sc_pc);
...@@ -200,26 +201,34 @@ static int setup_sigcontext32(struct pt_regs *regs, ...@@ -200,26 +201,34 @@ static int setup_sigcontext32(struct pt_regs *regs,
err |= __put_user(mflo3(), &sc->sc_lo3); err |= __put_user(mflo3(), &sc->sc_lo3);
} }
err |= __put_user(!!used_math(), &sc->sc_used_math); used_math = !!used_math();
err |= __put_user(used_math, &sc->sc_used_math);
if (used_math()) { if (used_math) {
/* /*
* Save FPU state to signal context. Signal handler * Save FPU state to signal context. Signal handler
* will "inherit" current FPU state. * will "inherit" current FPU state.
*/ */
preempt_disable(); own_fpu(1);
enable_fp_in_kernel();
if (!is_fpu_owner()) {
own_fpu();
restore_fp(current);
}
err |= save_fp_context32(sc); err |= save_fp_context32(sc);
disable_fp_in_kernel();
preempt_enable();
} }
return err; return err;
} }
static int
check_and_restore_fp_context32(struct sigcontext32 __user *sc)
{
int err, sig;
err = sig = fpcsr_pending(&sc->sc_fpc_csr);
if (err > 0)
err = 0;
err |= restore_fp_context32(sc);
return err ?: sig;
}
static int restore_sigcontext32(struct pt_regs *regs, static int restore_sigcontext32(struct pt_regs *regs,
struct sigcontext32 __user *sc) struct sigcontext32 __user *sc)
{ {
...@@ -250,19 +259,18 @@ static int restore_sigcontext32(struct pt_regs *regs, ...@@ -250,19 +259,18 @@ static int restore_sigcontext32(struct pt_regs *regs,
err |= __get_user(used_math, &sc->sc_used_math); err |= __get_user(used_math, &sc->sc_used_math);
conditional_used_math(used_math); conditional_used_math(used_math);
preempt_disable(); if (used_math) {
if (used_math()) {
/* restore fpu context if we have used it before */ /* restore fpu context if we have used it before */
own_fpu(); own_fpu(0);
err |= restore_fp_context32(sc); enable_fp_in_kernel();
if (!err)
err = check_and_restore_fp_context32(sc);
disable_fp_in_kernel();
} else { } else {
/* signal handler may have used FPU. Give it up. */ /* signal handler may have used FPU. Give it up. */
lose_fpu(); lose_fpu(0);
} }
preempt_enable();
return err; return err;
} }
...@@ -508,6 +516,7 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs) ...@@ -508,6 +516,7 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
{ {
struct sigframe32 __user *frame; struct sigframe32 __user *frame;
sigset_t blocked; sigset_t blocked;
int sig;
frame = (struct sigframe32 __user *) regs.regs[29]; frame = (struct sigframe32 __user *) regs.regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
...@@ -521,8 +530,11 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs) ...@@ -521,8 +530,11 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
recalc_sigpending(); recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock); spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext32(&regs, &frame->sf_sc)) sig = restore_sigcontext32(&regs, &frame->sf_sc);
if (sig < 0)
goto badframe; goto badframe;
else if (sig)
force_sig(sig, current);
/* /*
* Don't let your children do this ... * Don't let your children do this ...
...@@ -545,6 +557,7 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) ...@@ -545,6 +557,7 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
sigset_t set; sigset_t set;
stack_t st; stack_t st;
s32 sp; s32 sp;
int sig;
frame = (struct rt_sigframe32 __user *) regs.regs[29]; frame = (struct rt_sigframe32 __user *) regs.regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
...@@ -558,8 +571,11 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) ...@@ -558,8 +571,11 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
recalc_sigpending(); recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock); spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext)) sig = restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext);
if (sig < 0)
goto badframe; goto badframe;
else if (sig)
force_sig(sig, current);
/* The ucontext contains a stack32_t, so we must convert! */ /* The ucontext contains a stack32_t, so we must convert! */
if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp)) if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp))
......
...@@ -127,6 +127,7 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) ...@@ -127,6 +127,7 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
sigset_t set; sigset_t set;
stack_t st; stack_t st;
s32 sp; s32 sp;
int sig;
frame = (struct rt_sigframe_n32 __user *) regs.regs[29]; frame = (struct rt_sigframe_n32 __user *) regs.regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
...@@ -140,8 +141,11 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) ...@@ -140,8 +141,11 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
recalc_sigpending(); recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock); spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext)) sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
if (sig < 0)
goto badframe; goto badframe;
else if (sig)
force_sig(sig, current);
/* The ucontext contains a stack32_t, so we must convert! */ /* The ucontext contains a stack32_t, so we must convert! */
if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp)) if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp))
......
...@@ -610,16 +610,6 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) ...@@ -610,16 +610,6 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
if (fcr31 & FPU_CSR_UNI_X) { if (fcr31 & FPU_CSR_UNI_X) {
int sig; int sig;
preempt_disable();
#ifdef CONFIG_PREEMPT
if (!is_fpu_owner()) {
/* We might lose fpu before disabling preempt... */
own_fpu();
BUG_ON(!used_math());
restore_fp(current);
}
#endif
/* /*
* Unimplemented operation exception. If we've got the full * Unimplemented operation exception. If we've got the full
* software emulator on-board, let's use it... * software emulator on-board, let's use it...
...@@ -630,18 +620,12 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) ...@@ -630,18 +620,12 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
* register operands before invoking the emulator, which seems * register operands before invoking the emulator, which seems
* a bit extreme for what should be an infrequent event. * a bit extreme for what should be an infrequent event.
*/ */
save_fp(current);
/* Ensure 'resume' not overwrite saved fp context again. */ /* Ensure 'resume' not overwrite saved fp context again. */
lose_fpu(); lose_fpu(1);
preempt_enable();
/* Run the emulator */ /* Run the emulator */
sig = fpu_emulator_cop1Handler (regs, &current->thread.fpu, 1); sig = fpu_emulator_cop1Handler (regs, &current->thread.fpu, 1);
preempt_disable();
own_fpu(); /* Using the FPU again. */
/* /*
* We can't allow the emulated instruction to leave any of * We can't allow the emulated instruction to leave any of
* the cause bit set in $fcr31. * the cause bit set in $fcr31.
...@@ -649,9 +633,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) ...@@ -649,9 +633,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
/* Restore the hardware register state */ /* Restore the hardware register state */
restore_fp(current); own_fpu(1); /* Using the FPU again. */
preempt_enable();
/* If something went wrong, signal */ /* If something went wrong, signal */
if (sig) if (sig)
...@@ -775,12 +757,11 @@ asmlinkage void do_cpu(struct pt_regs *regs) ...@@ -775,12 +757,11 @@ asmlinkage void do_cpu(struct pt_regs *regs)
{ {
unsigned int cpid; unsigned int cpid;
die_if_kernel("do_cpu invoked from kernel context!", regs);
cpid = (regs->cp0_cause >> CAUSEB_CE) & 3; cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
switch (cpid) { switch (cpid) {
case 0: case 0:
die_if_kernel("do_cpu invoked from kernel context!", regs);
if (!cpu_has_llsc) if (!cpu_has_llsc)
if (!simulate_llsc(regs)) if (!simulate_llsc(regs))
return; return;
...@@ -791,21 +772,30 @@ asmlinkage void do_cpu(struct pt_regs *regs) ...@@ -791,21 +772,30 @@ asmlinkage void do_cpu(struct pt_regs *regs)
break; break;
case 1: case 1:
preempt_disable(); if (!test_thread_flag(TIF_ALLOW_FP_IN_KERNEL))
die_if_kernel("do_cpu invoked from kernel context!",
own_fpu(); regs);
if (used_math()) { /* Using the FPU again. */ if (used_math()) /* Using the FPU again. */
restore_fp(current); own_fpu(1);
} else { /* First time FPU user. */ else { /* First time FPU user. */
init_fpu(); init_fpu();
set_used_math(); set_used_math();
} }
if (cpu_has_fpu) { if (raw_cpu_has_fpu) {
preempt_enable(); if (test_thread_flag(TIF_ALLOW_FP_IN_KERNEL)) {
local_irq_disable();
if (cpu_has_fpu)
regs->cp0_status |= ST0_CU1;
/*
* We must return without enabling
* interrupts to ensure keep FPU
* ownership until resume.
*/
return;
}
} else { } else {
int sig; int sig;
preempt_enable();
sig = fpu_emulator_cop1Handler(regs, sig = fpu_emulator_cop1Handler(regs,
&current->thread.fpu, 0); &current->thread.fpu, 0);
if (sig) if (sig)
...@@ -1259,26 +1249,26 @@ static inline void mips_srs_init(void) ...@@ -1259,26 +1249,26 @@ static inline void mips_srs_init(void)
/* /*
* This is used by native signal handling * This is used by native signal handling
*/ */
asmlinkage int (*save_fp_context)(struct sigcontext *sc); asmlinkage int (*save_fp_context)(struct sigcontext __user *sc);
asmlinkage int (*restore_fp_context)(struct sigcontext *sc); asmlinkage int (*restore_fp_context)(struct sigcontext __user *sc);
extern asmlinkage int _save_fp_context(struct sigcontext *sc); extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
extern asmlinkage int _restore_fp_context(struct sigcontext *sc); extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc); extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc);
extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc); extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static int smp_save_fp_context(struct sigcontext *sc) static int smp_save_fp_context(struct sigcontext __user *sc)
{ {
return cpu_has_fpu return raw_cpu_has_fpu
? _save_fp_context(sc) ? _save_fp_context(sc)
: fpu_emulator_save_context(sc); : fpu_emulator_save_context(sc);
} }
static int smp_restore_fp_context(struct sigcontext *sc) static int smp_restore_fp_context(struct sigcontext __user *sc)
{ {
return cpu_has_fpu return raw_cpu_has_fpu
? _restore_fp_context(sc) ? _restore_fp_context(sc)
: fpu_emulator_restore_context(sc); : fpu_emulator_restore_context(sc);
} }
...@@ -1306,14 +1296,14 @@ static inline void signal_init(void) ...@@ -1306,14 +1296,14 @@ static inline void signal_init(void)
/* /*
* This is used by 32-bit signal stuff on the 64-bit kernel * This is used by 32-bit signal stuff on the 64-bit kernel
*/ */
asmlinkage int (*save_fp_context32)(struct sigcontext32 *sc); asmlinkage int (*save_fp_context32)(struct sigcontext32 __user *sc);
asmlinkage int (*restore_fp_context32)(struct sigcontext32 *sc); asmlinkage int (*restore_fp_context32)(struct sigcontext32 __user *sc);
extern asmlinkage int _save_fp_context32(struct sigcontext32 *sc); extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
extern asmlinkage int _restore_fp_context32(struct sigcontext32 *sc); extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 *sc); extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 __user *sc);
extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 *sc); extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user *sc);
static inline void signal32_init(void) static inline void signal32_init(void)
{ {
......
...@@ -51,7 +51,7 @@ void fpu_emulator_init_fpu(void) ...@@ -51,7 +51,7 @@ void fpu_emulator_init_fpu(void)
* with appropriate macros from uaccess.h * with appropriate macros from uaccess.h
*/ */
int fpu_emulator_save_context(struct sigcontext *sc) int fpu_emulator_save_context(struct sigcontext __user *sc)
{ {
int i; int i;
int err = 0; int err = 0;
...@@ -65,7 +65,7 @@ int fpu_emulator_save_context(struct sigcontext *sc) ...@@ -65,7 +65,7 @@ int fpu_emulator_save_context(struct sigcontext *sc)
return err; return err;
} }
int fpu_emulator_restore_context(struct sigcontext *sc) int fpu_emulator_restore_context(struct sigcontext __user *sc)
{ {
int i; int i;
int err = 0; int err = 0;
...@@ -84,7 +84,7 @@ int fpu_emulator_restore_context(struct sigcontext *sc) ...@@ -84,7 +84,7 @@ int fpu_emulator_restore_context(struct sigcontext *sc)
* This is the o32 version * This is the o32 version
*/ */
int fpu_emulator_save_context32(struct sigcontext32 *sc) int fpu_emulator_save_context32(struct sigcontext32 __user *sc)
{ {
int i; int i;
int err = 0; int err = 0;
...@@ -98,7 +98,7 @@ int fpu_emulator_save_context32(struct sigcontext32 *sc) ...@@ -98,7 +98,7 @@ int fpu_emulator_save_context32(struct sigcontext32 *sc)
return err; return err;
} }
int fpu_emulator_restore_context32(struct sigcontext32 *sc) int fpu_emulator_restore_context32(struct sigcontext32 __user *sc)
{ {
int i; int i;
int err = 0; int err = 0;
......
...@@ -11,9 +11,6 @@ ...@@ -11,9 +11,6 @@
* March 2001: Ported from 2.0.34 by Liam Davies * March 2001: Ported from 2.0.34 by Liam Davies
* *
*/ */
#define RTC_IO_EXTENT 0x10 /*Only really two ports, but... */
#include <linux/types.h> #include <linux/types.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/miscdevice.h> #include <linux/miscdevice.h>
...@@ -32,8 +29,6 @@ ...@@ -32,8 +29,6 @@
#include "lcd.h" #include "lcd.h"
static DEFINE_SPINLOCK(lcd_lock);
static int lcd_ioctl(struct inode *inode, struct file *file, static int lcd_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg); unsigned int cmd, unsigned long arg);
......
...@@ -79,9 +79,9 @@ static __inline__ void atomic_add(int i, atomic_t * v) ...@@ -79,9 +79,9 @@ static __inline__ void atomic_add(int i, atomic_t * v)
} else { } else {
unsigned long flags; unsigned long flags;
local_irq_save(flags); raw_local_irq_save(flags);
v->counter += i; v->counter += i;
local_irq_restore(flags); raw_local_irq_restore(flags);
} }
} }
...@@ -124,9 +124,9 @@ static __inline__ void atomic_sub(int i, atomic_t * v) ...@@ -124,9 +124,9 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
} else { } else {
unsigned long flags; unsigned long flags;
local_irq_save(flags); raw_local_irq_save(flags);
v->counter -= i; v->counter -= i;
local_irq_restore(flags); raw_local_irq_restore(flags);
} }
} }
...@@ -173,11 +173,11 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) ...@@ -173,11 +173,11 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
} else { } else {
unsigned long flags; unsigned long flags;
local_irq_save(flags); raw_local_irq_save(flags);
result = v->counter; result = v->counter;
result += i; result += i;
v->counter = result; v->counter = result;
local_irq_restore(flags); raw_local_irq_restore(flags);
} }
smp_mb(); smp_mb();
...@@ -225,11 +225,11 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) ...@@ -225,11 +225,11 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
} else { } else {
unsigned long flags; unsigned long flags;
local_irq_save(flags); raw_local_irq_save(flags);
result = v->counter; result = v->counter;
result -= i; result -= i;
v->counter = result; v->counter = result;
local_irq_restore(flags); raw_local_irq_restore(flags);
} }
smp_mb(); smp_mb();
...@@ -293,12 +293,12 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) ...@@ -293,12 +293,12 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
} else { } else {
unsigned long flags; unsigned long flags;
local_irq_save(flags); raw_local_irq_save(flags);
result = v->counter; result = v->counter;
result -= i; result -= i;
if (result >= 0) if (result >= 0)
v->counter = result; v->counter = result;
local_irq_restore(flags); raw_local_irq_restore(flags);
} }
smp_mb(); smp_mb();
...@@ -454,9 +454,9 @@ static __inline__ void atomic64_add(long i, atomic64_t * v) ...@@ -454,9 +454,9 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
} else { } else {
unsigned long flags; unsigned long flags;
local_irq_save(flags); raw_local_irq_save(flags);
v->counter += i; v->counter += i;
local_irq_restore(flags); raw_local_irq_restore(flags);
} }
} }
...@@ -499,9 +499,9 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v) ...@@ -499,9 +499,9 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
} else { } else {
unsigned long flags; unsigned long flags;
local_irq_save(flags); raw_local_irq_save(flags);
v->counter -= i; v->counter -= i;
local_irq_restore(flags); raw_local_irq_restore(flags);
} }
} }
...@@ -548,11 +548,11 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) ...@@ -548,11 +548,11 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
} else { } else {
unsigned long flags; unsigned long flags;
local_irq_save(flags); raw_local_irq_save(flags);
result = v->counter; result = v->counter;
result += i; result += i;
v->counter = result; v->counter = result;
local_irq_restore(flags); raw_local_irq_restore(flags);
} }
smp_mb(); smp_mb();
...@@ -600,11 +600,11 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) ...@@ -600,11 +600,11 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
} else { } else {
unsigned long flags; unsigned long flags;
local_irq_save(flags); raw_local_irq_save(flags);
result = v->counter; result = v->counter;
result -= i; result -= i;
v->counter = result; v->counter = result;
local_irq_restore(flags); raw_local_irq_restore(flags);
} }
smp_mb(); smp_mb();
...@@ -668,12 +668,12 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) ...@@ -668,12 +668,12 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
} else { } else {
unsigned long flags; unsigned long flags;
local_irq_save(flags); raw_local_irq_save(flags);
result = v->counter; result = v->counter;
result -= i; result -= i;
if (result >= 0) if (result >= 0)
v->counter = result; v->counter = result;
local_irq_restore(flags); raw_local_irq_restore(flags);
} }
smp_mb(); smp_mb();
......
...@@ -100,9 +100,9 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) ...@@ -100,9 +100,9 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
a += nr >> SZLONG_LOG; a += nr >> SZLONG_LOG;
mask = 1UL << bit; mask = 1UL << bit;
local_irq_save(flags); raw_local_irq_save(flags);
*a |= mask; *a |= mask;
local_irq_restore(flags); raw_local_irq_restore(flags);
} }
} }
...@@ -165,9 +165,9 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) ...@@ -165,9 +165,9 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
a += nr >> SZLONG_LOG; a += nr >> SZLONG_LOG;
mask = 1UL << bit; mask = 1UL << bit;
local_irq_save(flags); raw_local_irq_save(flags);
*a &= ~mask; *a &= ~mask;
local_irq_restore(flags); raw_local_irq_restore(flags);
} }
} }
...@@ -220,9 +220,9 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) ...@@ -220,9 +220,9 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
a += nr >> SZLONG_LOG; a += nr >> SZLONG_LOG;
mask = 1UL << bit; mask = 1UL << bit;
local_irq_save(flags); raw_local_irq_save(flags);
*a ^= mask; *a ^= mask;
local_irq_restore(flags); raw_local_irq_restore(flags);
} }
} }
...@@ -287,10 +287,10 @@ static inline int test_and_set_bit(unsigned long nr, ...@@ -287,10 +287,10 @@ static inline int test_and_set_bit(unsigned long nr,
a += nr >> SZLONG_LOG; a += nr >> SZLONG_LOG;
mask = 1UL << bit; mask = 1UL << bit;
local_irq_save(flags); raw_local_irq_save(flags);
retval = (mask & *a) != 0; retval = (mask & *a) != 0;
*a |= mask; *a |= mask;
local_irq_restore(flags); raw_local_irq_restore(flags);
return retval; return retval;
} }
...@@ -381,10 +381,10 @@ static inline int test_and_clear_bit(unsigned long nr, ...@@ -381,10 +381,10 @@ static inline int test_and_clear_bit(unsigned long nr,
a += nr >> SZLONG_LOG; a += nr >> SZLONG_LOG;
mask = 1UL << bit; mask = 1UL << bit;
local_irq_save(flags); raw_local_irq_save(flags);
retval = (mask & *a) != 0; retval = (mask & *a) != 0;
*a &= ~mask; *a &= ~mask;
local_irq_restore(flags); raw_local_irq_restore(flags);
return retval; return retval;
} }
...@@ -452,10 +452,10 @@ static inline int test_and_change_bit(unsigned long nr, ...@@ -452,10 +452,10 @@ static inline int test_and_change_bit(unsigned long nr,
a += nr >> SZLONG_LOG; a += nr >> SZLONG_LOG;
mask = 1UL << bit; mask = 1UL << bit;
local_irq_save(flags); raw_local_irq_save(flags);
retval = (mask & *a) != 0; retval = (mask & *a) != 0;
*a ^= mask; *a ^= mask;
local_irq_restore(flags); raw_local_irq_restore(flags);
return retval; return retval;
} }
......
...@@ -40,6 +40,9 @@ ...@@ -40,6 +40,9 @@
#endif #endif
#ifndef cpu_has_fpu #ifndef cpu_has_fpu
#define cpu_has_fpu (current_cpu_data.options & MIPS_CPU_FPU) #define cpu_has_fpu (current_cpu_data.options & MIPS_CPU_FPU)
#define raw_cpu_has_fpu (raw_current_cpu_data.options & MIPS_CPU_FPU)
#else
#define raw_cpu_has_fpu cpu_has_fpu
#endif #endif
#ifndef cpu_has_32fpr #ifndef cpu_has_32fpr
#define cpu_has_32fpr (cpu_data[0].options & MIPS_CPU_32FPR) #define cpu_has_32fpr (cpu_data[0].options & MIPS_CPU_32FPR)
......
...@@ -87,6 +87,7 @@ struct cpuinfo_mips { ...@@ -87,6 +87,7 @@ struct cpuinfo_mips {
extern struct cpuinfo_mips cpu_data[]; extern struct cpuinfo_mips cpu_data[];
#define current_cpu_data cpu_data[smp_processor_id()] #define current_cpu_data cpu_data[smp_processor_id()]
#define raw_current_cpu_data cpu_data[raw_smp_processor_id()]
extern void cpu_probe(void); extern void cpu_probe(void);
extern void cpu_report(void); extern void cpu_report(void);
......
...@@ -27,11 +27,11 @@ ...@@ -27,11 +27,11 @@
struct sigcontext; struct sigcontext;
struct sigcontext32; struct sigcontext32;
extern asmlinkage int (*save_fp_context)(struct sigcontext *sc); extern asmlinkage int (*save_fp_context)(struct sigcontext __user *sc);
extern asmlinkage int (*restore_fp_context)(struct sigcontext *sc); extern asmlinkage int (*restore_fp_context)(struct sigcontext __user *sc);
extern asmlinkage int (*save_fp_context32)(struct sigcontext32 *sc); extern asmlinkage int (*save_fp_context32)(struct sigcontext32 __user *sc);
extern asmlinkage int (*restore_fp_context32)(struct sigcontext32 *sc); extern asmlinkage int (*restore_fp_context32)(struct sigcontext32 __user *sc);
extern void fpu_emulator_init_fpu(void); extern void fpu_emulator_init_fpu(void);
extern void _init_fpu(void); extern void _init_fpu(void);
...@@ -68,6 +68,8 @@ do { \ ...@@ -68,6 +68,8 @@ do { \
/* We don't care about the c0 hazard here */ \ /* We don't care about the c0 hazard here */ \
} while (0) } while (0)
#define __fpu_enabled() (read_c0_status() & ST0_CU1)
#define enable_fpu() \ #define enable_fpu() \
do { \ do { \
if (cpu_has_fpu) \ if (cpu_has_fpu) \
...@@ -93,31 +95,47 @@ static inline int is_fpu_owner(void) ...@@ -93,31 +95,47 @@ static inline int is_fpu_owner(void)
return cpu_has_fpu && __is_fpu_owner(); return cpu_has_fpu && __is_fpu_owner();
} }
static inline void own_fpu(void) static inline void __own_fpu(void)
{ {
if (cpu_has_fpu) {
__enable_fpu(); __enable_fpu();
KSTK_STATUS(current) |= ST0_CU1; KSTK_STATUS(current) |= ST0_CU1;
set_thread_flag(TIF_USEDFPU); set_thread_flag(TIF_USEDFPU);
}
static inline void own_fpu(int restore)
{
preempt_disable();
if (cpu_has_fpu && !__is_fpu_owner()) {
__own_fpu();
if (restore)
_restore_fp(current);
} }
preempt_enable();
} }
static inline void lose_fpu(void) static inline void lose_fpu(int save)
{ {
if (cpu_has_fpu) { preempt_disable();
if (is_fpu_owner()) {
if (save)
_save_fp(current);
KSTK_STATUS(current) &= ~ST0_CU1; KSTK_STATUS(current) &= ~ST0_CU1;
clear_thread_flag(TIF_USEDFPU); clear_thread_flag(TIF_USEDFPU);
__disable_fpu(); __disable_fpu();
} }
preempt_enable();
} }
static inline void init_fpu(void) static inline void init_fpu(void)
{ {
preempt_disable();
if (cpu_has_fpu) { if (cpu_has_fpu) {
__own_fpu();
_init_fpu(); _init_fpu();
} else { } else {
fpu_emulator_init_fpu(); fpu_emulator_init_fpu();
} }
preempt_enable();
} }
static inline void save_fp(struct task_struct *tsk) static inline void save_fp(struct task_struct *tsk)
...@@ -144,4 +162,18 @@ static inline fpureg_t *get_fpu_regs(struct task_struct *tsk) ...@@ -144,4 +162,18 @@ static inline fpureg_t *get_fpu_regs(struct task_struct *tsk)
return tsk->thread.fpu.fpr; return tsk->thread.fpu.fpr;
} }
static inline void enable_fp_in_kernel(void)
{
set_thread_flag(TIF_ALLOW_FP_IN_KERNEL);
/* make sure CU1 and FPU ownership are consistent */
if (!__is_fpu_owner() && __fpu_enabled())
__disable_fpu();
}
static inline void disable_fp_in_kernel(void)
{
BUG_ON(!__is_fpu_owner() && __fpu_enabled());
clear_thread_flag(TIF_ALLOW_FP_IN_KERNEL);
}
#endif /* _ASM_FPU_H */ #endif /* _ASM_FPU_H */
...@@ -18,7 +18,8 @@ ...@@ -18,7 +18,8 @@
struct device; struct device;
static dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size) static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
size_t size)
{ {
dma_addr_t pa = dev_to_baddr(dev, virt_to_phys(addr)); dma_addr_t pa = dev_to_baddr(dev, virt_to_phys(addr));
...@@ -37,7 +38,7 @@ static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) ...@@ -37,7 +38,7 @@ static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
return dma_addr & (0xffUL << 56); return dma_addr & (0xffUL << 56);
} }
static void plat_unmap_dma_mem(dma_addr_t dma_addr) static inline void plat_unmap_dma_mem(dma_addr_t dma_addr)
{ {
} }
......
...@@ -26,7 +26,8 @@ struct device; ...@@ -26,7 +26,8 @@ struct device;
#define RAM_OFFSET_MASK 0x3fffffffUL #define RAM_OFFSET_MASK 0x3fffffffUL
static dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size) static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
size_t size)
{ {
dma_addr_t pa = virt_to_phys(addr) & RAM_OFFSET_MASK; dma_addr_t pa = virt_to_phys(addr) & RAM_OFFSET_MASK;
...@@ -59,7 +60,7 @@ static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) ...@@ -59,7 +60,7 @@ static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
return addr; return addr;
} }
static void plat_unmap_dma_mem(dma_addr_t dma_addr) static inline void plat_unmap_dma_mem(dma_addr_t dma_addr)
{ {
} }
......
...@@ -23,8 +23,8 @@ ...@@ -23,8 +23,8 @@
extern int rtlx_open(int index, int can_sleep); extern int rtlx_open(int index, int can_sleep);
extern int rtlx_release(int index); extern int rtlx_release(int index);
extern ssize_t rtlx_read(int index, void *buff, size_t count, int user); extern ssize_t rtlx_read(int index, void __user *buff, size_t count);
extern ssize_t rtlx_write(int index, void *buffer, size_t count, int user); extern ssize_t rtlx_write(int index, const void __user *buffer, size_t count);
extern unsigned int rtlx_read_poll(int index, int can_sleep); extern unsigned int rtlx_read_poll(int index, int can_sleep);
extern unsigned int rtlx_write_poll(int index); extern unsigned int rtlx_write_poll(int index);
......
...@@ -121,10 +121,10 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) ...@@ -121,10 +121,10 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
} else { } else {
unsigned long flags; unsigned long flags;
local_irq_save(flags); raw_local_irq_save(flags);
retval = *m; retval = *m;
*m = val; *m = val;
local_irq_restore(flags); /* implies memory barrier */ raw_local_irq_restore(flags); /* implies memory barrier */
} }
smp_mb(); smp_mb();
...@@ -169,10 +169,10 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) ...@@ -169,10 +169,10 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
} else { } else {
unsigned long flags; unsigned long flags;
local_irq_save(flags); raw_local_irq_save(flags);
retval = *m; retval = *m;
*m = val; *m = val;
local_irq_restore(flags); /* implies memory barrier */ raw_local_irq_restore(flags); /* implies memory barrier */
} }
smp_mb(); smp_mb();
...@@ -250,11 +250,11 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, ...@@ -250,11 +250,11 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
} else { } else {
unsigned long flags; unsigned long flags;
local_irq_save(flags); raw_local_irq_save(flags);
retval = *m; retval = *m;
if (retval == old) if (retval == old)
*m = new; *m = new;
local_irq_restore(flags); /* implies memory barrier */ raw_local_irq_restore(flags); /* implies memory barrier */
} }
smp_mb(); smp_mb();
...@@ -304,11 +304,11 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, ...@@ -304,11 +304,11 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
} else { } else {
unsigned long flags; unsigned long flags;
local_irq_save(flags); raw_local_irq_save(flags);
retval = *m; retval = *m;
if (retval == old) if (retval == old)
*m = new; *m = new;
local_irq_restore(flags); /* implies memory barrier */ raw_local_irq_restore(flags); /* implies memory barrier */
} }
smp_mb(); smp_mb();
......
...@@ -119,6 +119,7 @@ register struct thread_info *__current_thread_info __asm__("$28"); ...@@ -119,6 +119,7 @@ register struct thread_info *__current_thread_info __asm__("$28");
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 18 #define TIF_MEMDIE 18
#define TIF_FREEZE 19 #define TIF_FREEZE 19
#define TIF_ALLOW_FP_IN_KERNEL 20
#define TIF_SYSCALL_TRACE 31 /* syscall trace active */ #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册