提交 f0c39113 编写于 作者: L Linus Torvalds

Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus

Pull MIPS update from Ralf Baechle:
 "To avoid unnecessary risk and work the preemption fixes are combined
  with some preparatory work that isn't strictly required.  So it's
  really just 3 fixes:

   - Get is_compat_task() to do the right thing while simplifying it.
     The unnecessary complexity hid a rarely striking bug which could be
     triggered by ext3/ext4 under certain circumstances.
   - Resolve a preemption issue in the irqflags.h functions for kernels
     built to support pre-MIPS32 / pre-MIPS64 Release 2 processors.
   - Fix the interrupt number of the MIPS Malta's CBUS UART."

* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus:
  MIPS: Malta: Fix interupt number of CBUS UART.
  MIPS: Make irqflags.h functions preempt-safe for non-mipsr2 cpus
  MIPS: Remove irqflags.h dependency from bitops.h
  MIPS: bitops.h: Change use of 'unsigned short' to 'int'
  MIPS: compat: Delete now unused TIF_32BIT.
  MIPS: compat: Implement is_compat_task() by testing for 32-bit address space.
  MIPS: compat: Fix use of TIF_32BIT_ADDR vs _TIF_32BIT_ADDR
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
* measurement, and debugging facilities. * measurement, and debugging facilities.
*/ */
#include <linux/irqflags.h>
#include <asm/octeon/cvmx.h> #include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-l2c.h> #include <asm/octeon/cvmx-l2c.h>
#include <asm/octeon/cvmx-spinlock.h> #include <asm/octeon/cvmx-spinlock.h>
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
#endif #endif
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/irqflags.h>
#include <linux/types.h> #include <linux/types.h>
#include <asm/barrier.h> #include <asm/barrier.h>
#include <asm/byteorder.h> /* sigh ... */ #include <asm/byteorder.h> /* sigh ... */
...@@ -44,6 +43,24 @@ ...@@ -44,6 +43,24 @@
#define smp_mb__before_clear_bit() smp_mb__before_llsc() #define smp_mb__before_clear_bit() smp_mb__before_llsc()
#define smp_mb__after_clear_bit() smp_llsc_mb() #define smp_mb__after_clear_bit() smp_llsc_mb()
/*
* These are the "slower" versions of the functions and are in bitops.c.
* These functions call raw_local_irq_{save,restore}().
*/
void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
int __mips_test_and_set_bit(unsigned long nr,
volatile unsigned long *addr);
int __mips_test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr);
int __mips_test_and_clear_bit(unsigned long nr,
volatile unsigned long *addr);
int __mips_test_and_change_bit(unsigned long nr,
volatile unsigned long *addr);
/* /*
* set_bit - Atomically set a bit in memory * set_bit - Atomically set a bit in memory
* @nr: the bit to set * @nr: the bit to set
...@@ -57,7 +74,7 @@ ...@@ -57,7 +74,7 @@
static inline void set_bit(unsigned long nr, volatile unsigned long *addr) static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned short bit = nr & SZLONG_MASK; int bit = nr & SZLONG_MASK;
unsigned long temp; unsigned long temp;
if (kernel_uses_llsc && R10000_LLSC_WAR) { if (kernel_uses_llsc && R10000_LLSC_WAR) {
...@@ -92,17 +109,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) ...@@ -92,17 +109,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
: "=&r" (temp), "+m" (*m) : "=&r" (temp), "+m" (*m)
: "ir" (1UL << bit)); : "ir" (1UL << bit));
} while (unlikely(!temp)); } while (unlikely(!temp));
} else { } else
volatile unsigned long *a = addr; __mips_set_bit(nr, addr);
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a |= mask;
raw_local_irq_restore(flags);
}
} }
/* /*
...@@ -118,7 +126,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) ...@@ -118,7 +126,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned short bit = nr & SZLONG_MASK; int bit = nr & SZLONG_MASK;
unsigned long temp; unsigned long temp;
if (kernel_uses_llsc && R10000_LLSC_WAR) { if (kernel_uses_llsc && R10000_LLSC_WAR) {
...@@ -153,17 +161,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) ...@@ -153,17 +161,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
: "=&r" (temp), "+m" (*m) : "=&r" (temp), "+m" (*m)
: "ir" (~(1UL << bit))); : "ir" (~(1UL << bit)));
} while (unlikely(!temp)); } while (unlikely(!temp));
} else { } else
volatile unsigned long *a = addr; __mips_clear_bit(nr, addr);
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a &= ~mask;
raw_local_irq_restore(flags);
}
} }
/* /*
...@@ -191,7 +190,7 @@ static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *ad ...@@ -191,7 +190,7 @@ static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *ad
*/ */
static inline void change_bit(unsigned long nr, volatile unsigned long *addr) static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned short bit = nr & SZLONG_MASK; int bit = nr & SZLONG_MASK;
if (kernel_uses_llsc && R10000_LLSC_WAR) { if (kernel_uses_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
...@@ -220,17 +219,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) ...@@ -220,17 +219,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
: "=&r" (temp), "+m" (*m) : "=&r" (temp), "+m" (*m)
: "ir" (1UL << bit)); : "ir" (1UL << bit));
} while (unlikely(!temp)); } while (unlikely(!temp));
} else { } else
volatile unsigned long *a = addr; __mips_change_bit(nr, addr);
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a ^= mask;
raw_local_irq_restore(flags);
}
} }
/* /*
...@@ -244,7 +234,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) ...@@ -244,7 +234,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
static inline int test_and_set_bit(unsigned long nr, static inline int test_and_set_bit(unsigned long nr,
volatile unsigned long *addr) volatile unsigned long *addr)
{ {
unsigned short bit = nr & SZLONG_MASK; int bit = nr & SZLONG_MASK;
unsigned long res; unsigned long res;
smp_mb__before_llsc(); smp_mb__before_llsc();
...@@ -281,18 +271,8 @@ static inline int test_and_set_bit(unsigned long nr, ...@@ -281,18 +271,8 @@ static inline int test_and_set_bit(unsigned long nr,
} while (unlikely(!res)); } while (unlikely(!res));
res = temp & (1UL << bit); res = temp & (1UL << bit);
} else { } else
volatile unsigned long *a = addr; res = __mips_test_and_set_bit(nr, addr);
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a |= mask;
raw_local_irq_restore(flags);
}
smp_llsc_mb(); smp_llsc_mb();
...@@ -310,7 +290,7 @@ static inline int test_and_set_bit(unsigned long nr, ...@@ -310,7 +290,7 @@ static inline int test_and_set_bit(unsigned long nr,
static inline int test_and_set_bit_lock(unsigned long nr, static inline int test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr) volatile unsigned long *addr)
{ {
unsigned short bit = nr & SZLONG_MASK; int bit = nr & SZLONG_MASK;
unsigned long res; unsigned long res;
if (kernel_uses_llsc && R10000_LLSC_WAR) { if (kernel_uses_llsc && R10000_LLSC_WAR) {
...@@ -345,18 +325,8 @@ static inline int test_and_set_bit_lock(unsigned long nr, ...@@ -345,18 +325,8 @@ static inline int test_and_set_bit_lock(unsigned long nr,
} while (unlikely(!res)); } while (unlikely(!res));
res = temp & (1UL << bit); res = temp & (1UL << bit);
} else { } else
volatile unsigned long *a = addr; res = __mips_test_and_set_bit_lock(nr, addr);
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a |= mask;
raw_local_irq_restore(flags);
}
smp_llsc_mb(); smp_llsc_mb();
...@@ -373,7 +343,7 @@ static inline int test_and_set_bit_lock(unsigned long nr, ...@@ -373,7 +343,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
static inline int test_and_clear_bit(unsigned long nr, static inline int test_and_clear_bit(unsigned long nr,
volatile unsigned long *addr) volatile unsigned long *addr)
{ {
unsigned short bit = nr & SZLONG_MASK; int bit = nr & SZLONG_MASK;
unsigned long res; unsigned long res;
smp_mb__before_llsc(); smp_mb__before_llsc();
...@@ -428,18 +398,8 @@ static inline int test_and_clear_bit(unsigned long nr, ...@@ -428,18 +398,8 @@ static inline int test_and_clear_bit(unsigned long nr,
} while (unlikely(!res)); } while (unlikely(!res));
res = temp & (1UL << bit); res = temp & (1UL << bit);
} else { } else
volatile unsigned long *a = addr; res = __mips_test_and_clear_bit(nr, addr);
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a &= ~mask;
raw_local_irq_restore(flags);
}
smp_llsc_mb(); smp_llsc_mb();
...@@ -457,7 +417,7 @@ static inline int test_and_clear_bit(unsigned long nr, ...@@ -457,7 +417,7 @@ static inline int test_and_clear_bit(unsigned long nr,
static inline int test_and_change_bit(unsigned long nr, static inline int test_and_change_bit(unsigned long nr,
volatile unsigned long *addr) volatile unsigned long *addr)
{ {
unsigned short bit = nr & SZLONG_MASK; int bit = nr & SZLONG_MASK;
unsigned long res; unsigned long res;
smp_mb__before_llsc(); smp_mb__before_llsc();
...@@ -494,18 +454,8 @@ static inline int test_and_change_bit(unsigned long nr, ...@@ -494,18 +454,8 @@ static inline int test_and_change_bit(unsigned long nr,
} while (unlikely(!res)); } while (unlikely(!res));
res = temp & (1UL << bit); res = temp & (1UL << bit);
} else { } else
volatile unsigned long *a = addr; res = __mips_test_and_change_bit(nr, addr);
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a ^= mask;
raw_local_irq_restore(flags);
}
smp_llsc_mb(); smp_llsc_mb();
......
...@@ -290,7 +290,7 @@ struct compat_shmid64_ds { ...@@ -290,7 +290,7 @@ struct compat_shmid64_ds {
static inline int is_compat_task(void) static inline int is_compat_task(void)
{ {
return test_thread_flag(TIF_32BIT); return test_thread_flag(TIF_32BIT_ADDR);
} }
#endif /* _ASM_COMPAT_H */ #endif /* _ASM_COMPAT_H */
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/irqflags.h>
#include <asm/addrspace.h> #include <asm/addrspace.h>
#include <asm/bug.h> #include <asm/bug.h>
......
...@@ -16,83 +16,13 @@ ...@@ -16,83 +16,13 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <asm/hazards.h> #include <asm/hazards.h>
__asm__( #if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
" .macro arch_local_irq_enable \n"
" .set push \n"
" .set reorder \n"
" .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
" mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
" ori $1, 0x400 \n"
" xori $1, 0x400 \n"
" mtc0 $1, $2, 1 \n"
#elif defined(CONFIG_CPU_MIPSR2)
" ei \n"
#else
" mfc0 $1,$12 \n"
" ori $1,0x1f \n"
" xori $1,0x1e \n"
" mtc0 $1,$12 \n"
#endif
" irq_enable_hazard \n"
" .set pop \n"
" .endm");
extern void smtc_ipi_replay(void);
static inline void arch_local_irq_enable(void)
{
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC kernel needs to do a software replay of queued
* IPIs, at the cost of call overhead on each local_irq_enable()
*/
smtc_ipi_replay();
#endif
__asm__ __volatile__(
"arch_local_irq_enable"
: /* no outputs */
: /* no inputs */
: "memory");
}
/*
* For cli() we have to insert nops to make sure that the new value
* has actually arrived in the status register before the end of this
* macro.
* R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
* no nops at all.
*/
/*
* For TX49, operating only IE bit is not enough.
*
* If mfc0 $12 follows store and the mfc0 is last instruction of a
* page and fetching the next instruction causes TLB miss, the result
* of the mfc0 might wrongly contain EXL bit.
*
* ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
*
* Workaround: mask EXL bit of the result or place a nop before mfc0.
*/
__asm__( __asm__(
" .macro arch_local_irq_disable\n" " .macro arch_local_irq_disable\n"
" .set push \n" " .set push \n"
" .set noat \n" " .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
" mfc0 $1, $2, 1 \n"
" ori $1, 0x400 \n"
" .set noreorder \n"
" mtc0 $1, $2, 1 \n"
#elif defined(CONFIG_CPU_MIPSR2)
" di \n" " di \n"
#else
" mfc0 $1,$12 \n"
" ori $1,0x1f \n"
" xori $1,0x1f \n"
" .set noreorder \n"
" mtc0 $1,$12 \n"
#endif
" irq_disable_hazard \n" " irq_disable_hazard \n"
" .set pop \n" " .set pop \n"
" .endm \n"); " .endm \n");
...@@ -106,46 +36,14 @@ static inline void arch_local_irq_disable(void) ...@@ -106,46 +36,14 @@ static inline void arch_local_irq_disable(void)
: "memory"); : "memory");
} }
__asm__(
" .macro arch_local_save_flags flags \n"
" .set push \n"
" .set reorder \n"
#ifdef CONFIG_MIPS_MT_SMTC
" mfc0 \\flags, $2, 1 \n"
#else
" mfc0 \\flags, $12 \n"
#endif
" .set pop \n"
" .endm \n");
static inline unsigned long arch_local_save_flags(void)
{
unsigned long flags;
asm volatile("arch_local_save_flags %0" : "=r" (flags));
return flags;
}
__asm__( __asm__(
" .macro arch_local_irq_save result \n" " .macro arch_local_irq_save result \n"
" .set push \n" " .set push \n"
" .set reorder \n" " .set reorder \n"
" .set noat \n" " .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
" mfc0 \\result, $2, 1 \n"
" ori $1, \\result, 0x400 \n"
" .set noreorder \n"
" mtc0 $1, $2, 1 \n"
" andi \\result, \\result, 0x400 \n"
#elif defined(CONFIG_CPU_MIPSR2)
" di \\result \n" " di \\result \n"
" andi \\result, 1 \n" " andi \\result, 1 \n"
#else
" mfc0 \\result, $12 \n"
" ori $1, \\result, 0x1f \n"
" xori $1, 0x1f \n"
" .set noreorder \n"
" mtc0 $1, $12 \n"
#endif
" irq_disable_hazard \n" " irq_disable_hazard \n"
" .set pop \n" " .set pop \n"
" .endm \n"); " .endm \n");
...@@ -160,61 +58,37 @@ static inline unsigned long arch_local_irq_save(void) ...@@ -160,61 +58,37 @@ static inline unsigned long arch_local_irq_save(void)
return flags; return flags;
} }
__asm__( __asm__(
" .macro arch_local_irq_restore flags \n" " .macro arch_local_irq_restore flags \n"
" .set push \n" " .set push \n"
" .set noreorder \n" " .set noreorder \n"
" .set noat \n" " .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC #if defined(CONFIG_IRQ_CPU)
"mfc0 $1, $2, 1 \n"
"andi \\flags, 0x400 \n"
"ori $1, 0x400 \n"
"xori $1, 0x400 \n"
"or \\flags, $1 \n"
"mtc0 \\flags, $2, 1 \n"
#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
/* /*
* Slow, but doesn't suffer from a relatively unlikely race * Slow, but doesn't suffer from a relatively unlikely race
* condition we're having since days 1. * condition we're having since days 1.
*/ */
" beqz \\flags, 1f \n" " beqz \\flags, 1f \n"
" di \n" " di \n"
" ei \n" " ei \n"
"1: \n" "1: \n"
#elif defined(CONFIG_CPU_MIPSR2) #else
/* /*
* Fast, dangerous. Life is fun, life is good. * Fast, dangerous. Life is fun, life is good.
*/ */
" mfc0 $1, $12 \n" " mfc0 $1, $12 \n"
" ins $1, \\flags, 0, 1 \n" " ins $1, \\flags, 0, 1 \n"
" mtc0 $1, $12 \n" " mtc0 $1, $12 \n"
#else
" mfc0 $1, $12 \n"
" andi \\flags, 1 \n"
" ori $1, 0x1f \n"
" xori $1, 0x1f \n"
" or \\flags, $1 \n"
" mtc0 \\flags, $12 \n"
#endif #endif
" irq_disable_hazard \n" " irq_disable_hazard \n"
" .set pop \n" " .set pop \n"
" .endm \n"); " .endm \n");
static inline void arch_local_irq_restore(unsigned long flags) static inline void arch_local_irq_restore(unsigned long flags)
{ {
unsigned long __tmp1; unsigned long __tmp1;
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC kernel needs to do a software replay of queued
* IPIs, at the cost of branch and call overhead on each
* local_irq_restore()
*/
if (unlikely(!(flags & 0x0400)))
smtc_ipi_replay();
#endif
__asm__ __volatile__( __asm__ __volatile__(
"arch_local_irq_restore\t%0" "arch_local_irq_restore\t%0"
: "=r" (__tmp1) : "=r" (__tmp1)
...@@ -232,6 +106,75 @@ static inline void __arch_local_irq_restore(unsigned long flags) ...@@ -232,6 +106,75 @@ static inline void __arch_local_irq_restore(unsigned long flags)
: "0" (flags) : "0" (flags)
: "memory"); : "memory");
} }
#else
/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
void arch_local_irq_disable(void);
unsigned long arch_local_irq_save(void);
void arch_local_irq_restore(unsigned long flags);
void __arch_local_irq_restore(unsigned long flags);
#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
__asm__(
" .macro arch_local_irq_enable \n"
" .set push \n"
" .set reorder \n"
" .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
" mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
" ori $1, 0x400 \n"
" xori $1, 0x400 \n"
" mtc0 $1, $2, 1 \n"
#elif defined(CONFIG_CPU_MIPSR2)
" ei \n"
#else
" mfc0 $1,$12 \n"
" ori $1,0x1f \n"
" xori $1,0x1e \n"
" mtc0 $1,$12 \n"
#endif
" irq_enable_hazard \n"
" .set pop \n"
" .endm");
extern void smtc_ipi_replay(void);
static inline void arch_local_irq_enable(void)
{
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC kernel needs to do a software replay of queued
* IPIs, at the cost of call overhead on each local_irq_enable()
*/
smtc_ipi_replay();
#endif
__asm__ __volatile__(
"arch_local_irq_enable"
: /* no outputs */
: /* no inputs */
: "memory");
}
__asm__(
" .macro arch_local_save_flags flags \n"
" .set push \n"
" .set reorder \n"
#ifdef CONFIG_MIPS_MT_SMTC
" mfc0 \\flags, $2, 1 \n"
#else
" mfc0 \\flags, $12 \n"
#endif
" .set pop \n"
" .endm \n");
static inline unsigned long arch_local_save_flags(void)
{
unsigned long flags;
asm volatile("arch_local_save_flags %0" : "=r" (flags));
return flags;
}
static inline int arch_irqs_disabled_flags(unsigned long flags) static inline int arch_irqs_disabled_flags(unsigned long flags)
{ {
...@@ -245,7 +188,7 @@ static inline int arch_irqs_disabled_flags(unsigned long flags) ...@@ -245,7 +188,7 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
#endif #endif
} }
#endif #endif /* #ifndef __ASSEMBLY__ */
/* /*
* Do the CPU's IRQ-state tracing from assembly code. * Do the CPU's IRQ-state tracing from assembly code.
......
...@@ -112,12 +112,6 @@ register struct thread_info *__current_thread_info __asm__("$28"); ...@@ -112,12 +112,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
#define TIF_LOAD_WATCH 25 /* If set, load watch registers */ #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
#define TIF_SYSCALL_TRACE 31 /* syscall trace active */ #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
#ifdef CONFIG_MIPS32_O32
#define TIF_32BIT TIF_32BIT_REGS
#elif defined(CONFIG_MIPS32_N32)
#define TIF_32BIT _TIF_32BIT_ADDR
#endif /* CONFIG_MIPS32_O32 */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
......
...@@ -2,8 +2,9 @@ ...@@ -2,8 +2,9 @@
# Makefile for MIPS-specific library files.. # Makefile for MIPS-specific library files..
# #
lib-y += csum_partial.o delay.o memcpy.o memset.o \ lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \
strlen_user.o strncpy_user.o strnlen_user.o uncached.o mips-atomic.o strlen_user.o strncpy_user.o \
strnlen_user.o uncached.o
obj-y += iomap.o obj-y += iomap.o
obj-$(CONFIG_PCI) += iomap-pci.o obj-$(CONFIG_PCI) += iomap-pci.o
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 1994-1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
* Copyright (c) 1999, 2000 Silicon Graphics, Inc.
*/
#include <linux/bitops.h>
#include <linux/irqflags.h>
#include <linux/export.h>
/**
* __mips_set_bit - Atomically set a bit in memory. This is called by
* set_bit() if it cannot find a faster solution.
* @nr: the bit to set
* @addr: the address to start counting from
*/
void __mips_set_bit(unsigned long nr, volatile unsigned long *addr)
{
volatile unsigned long *a = addr;
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a |= mask;
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL(__mips_set_bit);
/**
* __mips_clear_bit - Clears a bit in memory. This is called by clear_bit() if
* it cannot find a faster solution.
* @nr: Bit to clear
* @addr: Address to start counting from
*/
void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
volatile unsigned long *a = addr;
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a &= ~mask;
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL(__mips_clear_bit);
/**
* __mips_change_bit - Toggle a bit in memory. This is called by change_bit()
* if it cannot find a faster solution.
* @nr: Bit to change
* @addr: Address to start counting from
*/
void __mips_change_bit(unsigned long nr, volatile unsigned long *addr)
{
volatile unsigned long *a = addr;
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a ^= mask;
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL(__mips_change_bit);
/**
* __mips_test_and_set_bit - Set a bit and return its old value. This is
* called by test_and_set_bit() if it cannot find a faster solution.
* @nr: Bit to set
* @addr: Address to count from
*/
int __mips_test_and_set_bit(unsigned long nr,
volatile unsigned long *addr)
{
volatile unsigned long *a = addr;
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
unsigned long res;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a |= mask;
raw_local_irq_restore(flags);
return res;
}
EXPORT_SYMBOL(__mips_test_and_set_bit);
/**
* __mips_test_and_set_bit_lock - Set a bit and return its old value. This is
* called by test_and_set_bit_lock() if it cannot find a faster solution.
* @nr: Bit to set
* @addr: Address to count from
*/
int __mips_test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr)
{
volatile unsigned long *a = addr;
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
unsigned long res;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a |= mask;
raw_local_irq_restore(flags);
return res;
}
EXPORT_SYMBOL(__mips_test_and_set_bit_lock);
/**
* __mips_test_and_clear_bit - Clear a bit and return its old value. This is
* called by test_and_clear_bit() if it cannot find a faster solution.
* @nr: Bit to clear
* @addr: Address to count from
*/
int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
volatile unsigned long *a = addr;
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
unsigned long res;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a &= ~mask;
raw_local_irq_restore(flags);
return res;
}
EXPORT_SYMBOL(__mips_test_and_clear_bit);
/**
* __mips_test_and_change_bit - Change a bit and return its old value. This is
* called by test_and_change_bit() if it cannot find a faster solution.
* @nr: Bit to change
* @addr: Address to count from
*/
int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
volatile unsigned long *a = addr;
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
unsigned long res;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a ^= mask;
raw_local_irq_restore(flags);
return res;
}
EXPORT_SYMBOL(__mips_test_and_change_bit);
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
* Copyright (C) 1996 by Paul M. Antoine
* Copyright (C) 1999 Silicon Graphics
* Copyright (C) 2000 MIPS Technologies, Inc.
*/
#include <asm/irqflags.h>
#include <asm/hazards.h>
#include <linux/compiler.h>
#include <linux/preempt.h>
#include <linux/export.h>
#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
/*
* For cli() we have to insert nops to make sure that the new value
* has actually arrived in the status register before the end of this
* macro.
* R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
* no nops at all.
*/
/*
* For TX49, operating only IE bit is not enough.
*
* If mfc0 $12 follows store and the mfc0 is last instruction of a
* page and fetching the next instruction causes TLB miss, the result
* of the mfc0 might wrongly contain EXL bit.
*
* ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
*
* Workaround: mask EXL bit of the result or place a nop before mfc0.
*/
__asm__(
" .macro arch_local_irq_disable\n"
" .set push \n"
" .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
" mfc0 $1, $2, 1 \n"
" ori $1, 0x400 \n"
" .set noreorder \n"
" mtc0 $1, $2, 1 \n"
#elif defined(CONFIG_CPU_MIPSR2)
/* see irqflags.h for inline function */
#else
" mfc0 $1,$12 \n"
" ori $1,0x1f \n"
" xori $1,0x1f \n"
" .set noreorder \n"
" mtc0 $1,$12 \n"
#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
void arch_local_irq_disable(void)
{
preempt_disable();
__asm__ __volatile__(
"arch_local_irq_disable"
: /* no outputs */
: /* no inputs */
: "memory");
preempt_enable();
}
EXPORT_SYMBOL(arch_local_irq_disable);
__asm__(
" .macro arch_local_irq_save result \n"
" .set push \n"
" .set reorder \n"
" .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
" mfc0 \\result, $2, 1 \n"
" ori $1, \\result, 0x400 \n"
" .set noreorder \n"
" mtc0 $1, $2, 1 \n"
" andi \\result, \\result, 0x400 \n"
#elif defined(CONFIG_CPU_MIPSR2)
/* see irqflags.h for inline function */
#else
" mfc0 \\result, $12 \n"
" ori $1, \\result, 0x1f \n"
" xori $1, 0x1f \n"
" .set noreorder \n"
" mtc0 $1, $12 \n"
#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
unsigned long arch_local_irq_save(void)
{
unsigned long flags;
preempt_disable();
asm volatile("arch_local_irq_save\t%0"
: "=r" (flags)
: /* no inputs */
: "memory");
preempt_enable();
return flags;
}
EXPORT_SYMBOL(arch_local_irq_save);
__asm__(
" .macro arch_local_irq_restore flags \n"
" .set push \n"
" .set noreorder \n"
" .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
"mfc0 $1, $2, 1 \n"
"andi \\flags, 0x400 \n"
"ori $1, 0x400 \n"
"xori $1, 0x400 \n"
"or \\flags, $1 \n"
"mtc0 \\flags, $2, 1 \n"
#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
/* see irqflags.h for inline function */
#elif defined(CONFIG_CPU_MIPSR2)
/* see irqflags.h for inline function */
#else
" mfc0 $1, $12 \n"
" andi \\flags, 1 \n"
" ori $1, 0x1f \n"
" xori $1, 0x1f \n"
" or \\flags, $1 \n"
" mtc0 \\flags, $12 \n"
#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
void arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC kernel needs to do a software replay of queued
* IPIs, at the cost of branch and call overhead on each
* local_irq_restore()
*/
if (unlikely(!(flags & 0x0400)))
smtc_ipi_replay();
#endif
preempt_disable();
__asm__ __volatile__(
"arch_local_irq_restore\t%0"
: "=r" (__tmp1)
: "0" (flags)
: "memory");
preempt_enable();
}
EXPORT_SYMBOL(arch_local_irq_restore);
void __arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
preempt_disable();
__asm__ __volatile__(
"arch_local_irq_restore\t%0"
: "=r" (__tmp1)
: "0" (flags)
: "memory");
preempt_enable();
}
EXPORT_SYMBOL(__arch_local_irq_restore);
#endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <linux/mtd/partitions.h> #include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h> #include <linux/mtd/physmap.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <asm/mips-boards/maltaint.h>
#include <mtd/mtd-abi.h> #include <mtd/mtd-abi.h>
#define SMC_PORT(base, int) \ #define SMC_PORT(base, int) \
...@@ -48,7 +49,7 @@ static struct plat_serial8250_port uart8250_data[] = { ...@@ -48,7 +49,7 @@ static struct plat_serial8250_port uart8250_data[] = {
SMC_PORT(0x2F8, 3), SMC_PORT(0x2F8, 3),
{ {
.mapbase = 0x1f000900, /* The CBUS UART */ .mapbase = 0x1f000900, /* The CBUS UART */
.irq = MIPS_CPU_IRQ_BASE + 2, .irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB2,
.uartclk = 3686400, /* Twice the usual clk! */ .uartclk = 3686400, /* Twice the usual clk! */
.iotype = UPIO_MEM32, .iotype = UPIO_MEM32,
.flags = CBUS_UART_FLAGS, .flags = CBUS_UART_FLAGS,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册