提交 d99cf715 编写于 作者: A Adrian Bunk 提交者: Linus Torvalds

[PATCH] xtensa: replace 'extern inline' with 'static inline'

"extern inline" doesn't make sense.
Signed-off-by: NAdrian Bunk <bunk@stusta.de>
Signed-off-by: NChris Zankel <chris@zankel.net>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 7ef93905
...@@ -66,7 +66,7 @@ typedef struct { volatile int counter; } atomic_t; ...@@ -66,7 +66,7 @@ typedef struct { volatile int counter; } atomic_t;
* *
* Atomically adds @i to @v. * Atomically adds @i to @v.
*/ */
extern __inline__ void atomic_add(int i, atomic_t * v) static inline void atomic_add(int i, atomic_t * v)
{ {
unsigned int vval; unsigned int vval;
...@@ -90,7 +90,7 @@ extern __inline__ void atomic_add(int i, atomic_t * v) ...@@ -90,7 +90,7 @@ extern __inline__ void atomic_add(int i, atomic_t * v)
* *
* Atomically subtracts @i from @v. * Atomically subtracts @i from @v.
*/ */
extern __inline__ void atomic_sub(int i, atomic_t *v) static inline void atomic_sub(int i, atomic_t *v)
{ {
unsigned int vval; unsigned int vval;
...@@ -111,7 +111,7 @@ extern __inline__ void atomic_sub(int i, atomic_t *v) ...@@ -111,7 +111,7 @@ extern __inline__ void atomic_sub(int i, atomic_t *v)
* We use atomic_{add|sub}_return to define other functions. * We use atomic_{add|sub}_return to define other functions.
*/ */
extern __inline__ int atomic_add_return(int i, atomic_t * v) static inline int atomic_add_return(int i, atomic_t * v)
{ {
unsigned int vval; unsigned int vval;
...@@ -130,7 +130,7 @@ extern __inline__ int atomic_add_return(int i, atomic_t * v) ...@@ -130,7 +130,7 @@ extern __inline__ int atomic_add_return(int i, atomic_t * v)
return vval; return vval;
} }
extern __inline__ int atomic_sub_return(int i, atomic_t * v) static inline int atomic_sub_return(int i, atomic_t * v)
{ {
unsigned int vval; unsigned int vval;
...@@ -224,7 +224,7 @@ extern __inline__ int atomic_sub_return(int i, atomic_t * v) ...@@ -224,7 +224,7 @@ extern __inline__ int atomic_sub_return(int i, atomic_t * v)
#define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0) #define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)
extern __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{ {
unsigned int all_f = -1; unsigned int all_f = -1;
unsigned int vval; unsigned int vval;
...@@ -243,7 +243,7 @@ extern __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) ...@@ -243,7 +243,7 @@ extern __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
); );
} }
extern __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v) static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{ {
unsigned int vval; unsigned int vval;
......
...@@ -47,14 +47,14 @@ asmlinkage unsigned int csum_partial_copy_generic( const char *src, char *dst, i ...@@ -47,14 +47,14 @@ asmlinkage unsigned int csum_partial_copy_generic( const char *src, char *dst, i
* If you use these functions directly please don't forget the * If you use these functions directly please don't forget the
* verify_area(). * verify_area().
*/ */
extern __inline__ static inline
unsigned int csum_partial_copy_nocheck ( const char *src, char *dst, unsigned int csum_partial_copy_nocheck ( const char *src, char *dst,
int len, int sum) int len, int sum)
{ {
return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL); return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL);
} }
extern __inline__ static inline
unsigned int csum_partial_copy_from_user ( const char *src, char *dst, unsigned int csum_partial_copy_from_user ( const char *src, char *dst,
int len, int sum, int *err_ptr) int len, int sum, int *err_ptr)
{ {
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
extern unsigned long loops_per_jiffy; extern unsigned long loops_per_jiffy;
extern __inline__ void __delay(unsigned long loops) static inline void __delay(unsigned long loops)
{ {
/* 2 cycles per loop. */ /* 2 cycles per loop. */
__asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b" __asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b"
......
...@@ -41,12 +41,12 @@ static inline unsigned int _swapl (unsigned int v) ...@@ -41,12 +41,12 @@ static inline unsigned int _swapl (unsigned int v)
* These are trivial on the 1:1 Linux/Xtensa mapping * These are trivial on the 1:1 Linux/Xtensa mapping
*/ */
extern inline unsigned long virt_to_phys(volatile void * address) static inline unsigned long virt_to_phys(volatile void * address)
{ {
return PHYSADDR((unsigned long)address); return PHYSADDR((unsigned long)address);
} }
extern inline void * phys_to_virt(unsigned long address) static inline void * phys_to_virt(unsigned long address)
{ {
return (void*) CACHED_ADDR(address); return (void*) CACHED_ADDR(address);
} }
...@@ -55,12 +55,12 @@ extern inline void * phys_to_virt(unsigned long address) ...@@ -55,12 +55,12 @@ extern inline void * phys_to_virt(unsigned long address)
* IO bus memory addresses are also 1:1 with the physical address * IO bus memory addresses are also 1:1 with the physical address
*/ */
extern inline unsigned long virt_to_bus(volatile void * address) static inline unsigned long virt_to_bus(volatile void * address)
{ {
return PHYSADDR((unsigned long)address); return PHYSADDR((unsigned long)address);
} }
extern inline void * bus_to_virt (unsigned long address) static inline void * bus_to_virt (unsigned long address)
{ {
return (void *) CACHED_ADDR(address); return (void *) CACHED_ADDR(address);
} }
...@@ -69,17 +69,17 @@ extern inline void * bus_to_virt (unsigned long address) ...@@ -69,17 +69,17 @@ extern inline void * bus_to_virt (unsigned long address)
* Change "struct page" to physical address. * Change "struct page" to physical address.
*/ */
extern inline void *ioremap(unsigned long offset, unsigned long size) static inline void *ioremap(unsigned long offset, unsigned long size)
{ {
return (void *) CACHED_ADDR_IO(offset); return (void *) CACHED_ADDR_IO(offset);
} }
extern inline void *ioremap_nocache(unsigned long offset, unsigned long size) static inline void *ioremap_nocache(unsigned long offset, unsigned long size)
{ {
return (void *) BYPASS_ADDR_IO(offset); return (void *) BYPASS_ADDR_IO(offset);
} }
extern inline void iounmap(void *addr) static inline void iounmap(void *addr)
{ {
} }
......
...@@ -199,13 +199,13 @@ extern pgd_t *current_pgd; ...@@ -199,13 +199,13 @@ extern pgd_t *current_pgd;
#define ASID_FIRST_VERSION \ #define ASID_FIRST_VERSION \
((unsigned long)(~ASID_VERSION_MASK) + 1 + ASID_FIRST_NONRESERVED) ((unsigned long)(~ASID_VERSION_MASK) + 1 + ASID_FIRST_NONRESERVED)
extern inline void set_rasid_register (unsigned long val) static inline void set_rasid_register (unsigned long val)
{ {
__asm__ __volatile__ (" wsr %0, "__stringify(RASID)"\n\t" __asm__ __volatile__ (" wsr %0, "__stringify(RASID)"\n\t"
" isync\n" : : "a" (val)); " isync\n" : : "a" (val));
} }
extern inline unsigned long get_rasid_register (void) static inline unsigned long get_rasid_register (void)
{ {
unsigned long tmp; unsigned long tmp;
__asm__ __volatile__ (" rsr %0, "__stringify(RASID)"\n\t" : "=a" (tmp)); __asm__ __volatile__ (" rsr %0, "__stringify(RASID)"\n\t" : "=a" (tmp));
...@@ -215,7 +215,7 @@ extern inline unsigned long get_rasid_register (void) ...@@ -215,7 +215,7 @@ extern inline unsigned long get_rasid_register (void)
#if ((XCHAL_MMU_ASID_INVALID == 0) && (XCHAL_MMU_ASID_KERNEL == 1)) #if ((XCHAL_MMU_ASID_INVALID == 0) && (XCHAL_MMU_ASID_KERNEL == 1))
extern inline void static inline void
get_new_mmu_context(struct mm_struct *mm, unsigned long asid) get_new_mmu_context(struct mm_struct *mm, unsigned long asid)
{ {
extern void flush_tlb_all(void); extern void flush_tlb_all(void);
...@@ -234,7 +234,7 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long asid) ...@@ -234,7 +234,7 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long asid)
/* XCHAL_MMU_ASID_INVALID == 0 and XCHAL_MMU_ASID_KERNEL ==1 are /* XCHAL_MMU_ASID_INVALID == 0 and XCHAL_MMU_ASID_KERNEL ==1 are
really the best, but if you insist... */ really the best, but if you insist... */
extern inline int validate_asid (unsigned long asid) static inline int validate_asid (unsigned long asid)
{ {
switch (asid) { switch (asid) {
case XCHAL_MMU_ASID_INVALID: case XCHAL_MMU_ASID_INVALID:
...@@ -247,7 +247,7 @@ extern inline int validate_asid (unsigned long asid) ...@@ -247,7 +247,7 @@ extern inline int validate_asid (unsigned long asid)
return 1; /* valid */ return 1; /* valid */
} }
extern inline void static inline void
get_new_mmu_context(struct mm_struct *mm, unsigned long asid) get_new_mmu_context(struct mm_struct *mm, unsigned long asid)
{ {
extern void flush_tlb_all(void); extern void flush_tlb_all(void);
...@@ -274,14 +274,14 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long asid) ...@@ -274,14 +274,14 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long asid)
* instance. * instance.
*/ */
extern inline int static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{ {
mm->context = NO_CONTEXT; mm->context = NO_CONTEXT;
return 0; return 0;
} }
extern inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk) struct task_struct *tsk)
{ {
unsigned long asid = asid_cache; unsigned long asid = asid_cache;
...@@ -301,7 +301,7 @@ extern inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -301,7 +301,7 @@ extern inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* Destroy context related info for an mm_struct that is about * Destroy context related info for an mm_struct that is about
* to be put to rest. * to be put to rest.
*/ */
extern inline void destroy_context(struct mm_struct *mm) static inline void destroy_context(struct mm_struct *mm)
{ {
/* Nothing to do. */ /* Nothing to do. */
} }
...@@ -310,7 +310,7 @@ extern inline void destroy_context(struct mm_struct *mm) ...@@ -310,7 +310,7 @@ extern inline void destroy_context(struct mm_struct *mm)
* After we have set current->mm to a new value, this activates * After we have set current->mm to a new value, this activates
* the context for the new mm so we see the new mappings. * the context for the new mm so we see the new mappings.
*/ */
extern inline void static inline void
activate_mm(struct mm_struct *prev, struct mm_struct *next) activate_mm(struct mm_struct *prev, struct mm_struct *next)
{ {
/* Unconditionally get a new ASID. */ /* Unconditionally get a new ASID. */
......
...@@ -55,7 +55,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; ...@@ -55,7 +55,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
* Pure 2^n version of get_order * Pure 2^n version of get_order
*/ */
extern __inline__ int get_order(unsigned long size) static inline int get_order(unsigned long size)
{ {
int order; int order;
#ifndef XCHAL_HAVE_NSU #ifndef XCHAL_HAVE_NSU
......
...@@ -22,12 +22,12 @@ ...@@ -22,12 +22,12 @@
extern struct pci_controller* pcibios_alloc_controller(void); extern struct pci_controller* pcibios_alloc_controller(void);
extern inline void pcibios_set_master(struct pci_dev *dev) static inline void pcibios_set_master(struct pci_dev *dev)
{ {
/* No special bus mastering setup handling */ /* No special bus mastering setup handling */
} }
extern inline void pcibios_penalize_isa_irq(int irq) static inline void pcibios_penalize_isa_irq(int irq)
{ {
/* We don't do dynamic PCI IRQ allocation */ /* We don't do dynamic PCI IRQ allocation */
} }
......
...@@ -260,7 +260,7 @@ static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_RW; return pt ...@@ -260,7 +260,7 @@ static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_RW; return pt
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{ {
return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
} }
...@@ -278,14 +278,14 @@ static inline void update_pte(pte_t *ptep, pte_t pteval) ...@@ -278,14 +278,14 @@ static inline void update_pte(pte_t *ptep, pte_t pteval)
#endif #endif
} }
extern inline void static inline void
set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval) set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval)
{ {
update_pte(ptep, pteval); update_pte(ptep, pteval);
} }
extern inline void static inline void
set_pmd(pmd_t *pmdp, pmd_t pmdval) set_pmd(pmd_t *pmdp, pmd_t pmdval)
{ {
*pmdp = pmdval; *pmdp = pmdval;
......
...@@ -47,7 +47,7 @@ struct semaphore { ...@@ -47,7 +47,7 @@ struct semaphore {
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0) #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
extern inline void sema_init (struct semaphore *sem, int val) static inline void sema_init (struct semaphore *sem, int val)
{ {
/* /*
* *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
...@@ -79,7 +79,7 @@ asmlinkage void __up(struct semaphore * sem); ...@@ -79,7 +79,7 @@ asmlinkage void __up(struct semaphore * sem);
extern spinlock_t semaphore_wake_lock; extern spinlock_t semaphore_wake_lock;
extern __inline__ void down(struct semaphore * sem) static inline void down(struct semaphore * sem)
{ {
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic); CHECK_MAGIC(sem->__magic);
...@@ -89,7 +89,7 @@ extern __inline__ void down(struct semaphore * sem) ...@@ -89,7 +89,7 @@ extern __inline__ void down(struct semaphore * sem)
__down(sem); __down(sem);
} }
extern __inline__ int down_interruptible(struct semaphore * sem) static inline int down_interruptible(struct semaphore * sem)
{ {
int ret = 0; int ret = 0;
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
...@@ -101,7 +101,7 @@ extern __inline__ int down_interruptible(struct semaphore * sem) ...@@ -101,7 +101,7 @@ extern __inline__ int down_interruptible(struct semaphore * sem)
return ret; return ret;
} }
extern __inline__ int down_trylock(struct semaphore * sem) static inline int down_trylock(struct semaphore * sem)
{ {
int ret = 0; int ret = 0;
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
...@@ -117,7 +117,7 @@ extern __inline__ int down_trylock(struct semaphore * sem) ...@@ -117,7 +117,7 @@ extern __inline__ int down_trylock(struct semaphore * sem)
* Note! This is subtle. We jump to wake people up only if * Note! This is subtle. We jump to wake people up only if
* the semaphore was negative (== somebody was waiting on it). * the semaphore was negative (== somebody was waiting on it).
*/ */
extern __inline__ void up(struct semaphore * sem) static inline void up(struct semaphore * sem)
{ {
#if WAITQUEUE_DEBUG #if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic); CHECK_MAGIC(sem->__magic);
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#define _XTENSA_STRING_H #define _XTENSA_STRING_H
#define __HAVE_ARCH_STRCPY #define __HAVE_ARCH_STRCPY
extern __inline__ char *strcpy(char *__dest, const char *__src) static inline char *strcpy(char *__dest, const char *__src)
{ {
register char *__xdest = __dest; register char *__xdest = __dest;
unsigned long __dummy; unsigned long __dummy;
...@@ -35,7 +35,7 @@ extern __inline__ char *strcpy(char *__dest, const char *__src) ...@@ -35,7 +35,7 @@ extern __inline__ char *strcpy(char *__dest, const char *__src)
} }
#define __HAVE_ARCH_STRNCPY #define __HAVE_ARCH_STRNCPY
extern __inline__ char *strncpy(char *__dest, const char *__src, size_t __n) static inline char *strncpy(char *__dest, const char *__src, size_t __n)
{ {
register char *__xdest = __dest; register char *__xdest = __dest;
unsigned long __dummy; unsigned long __dummy;
...@@ -60,7 +60,7 @@ extern __inline__ char *strncpy(char *__dest, const char *__src, size_t __n) ...@@ -60,7 +60,7 @@ extern __inline__ char *strncpy(char *__dest, const char *__src, size_t __n)
} }
#define __HAVE_ARCH_STRCMP #define __HAVE_ARCH_STRCMP
extern __inline__ int strcmp(const char *__cs, const char *__ct) static inline int strcmp(const char *__cs, const char *__ct)
{ {
register int __res; register int __res;
unsigned long __dummy; unsigned long __dummy;
...@@ -82,7 +82,7 @@ extern __inline__ int strcmp(const char *__cs, const char *__ct) ...@@ -82,7 +82,7 @@ extern __inline__ int strcmp(const char *__cs, const char *__ct)
} }
#define __HAVE_ARCH_STRNCMP #define __HAVE_ARCH_STRNCMP
extern __inline__ int strncmp(const char *__cs, const char *__ct, size_t __n) static inline int strncmp(const char *__cs, const char *__ct, size_t __n)
{ {
register int __res; register int __res;
unsigned long __dummy; unsigned long __dummy;
......
...@@ -56,7 +56,7 @@ static inline int irqs_disabled(void) ...@@ -56,7 +56,7 @@ static inline int irqs_disabled(void)
#define clear_cpenable() __clear_cpenable() #define clear_cpenable() __clear_cpenable()
extern __inline__ void __clear_cpenable(void) static inline void __clear_cpenable(void)
{ {
#if XCHAL_HAVE_CP #if XCHAL_HAVE_CP
unsigned long i = 0; unsigned long i = 0;
...@@ -64,7 +64,7 @@ extern __inline__ void __clear_cpenable(void) ...@@ -64,7 +64,7 @@ extern __inline__ void __clear_cpenable(void)
#endif #endif
} }
extern __inline__ void enable_coprocessor(int i) static inline void enable_coprocessor(int i)
{ {
#if XCHAL_HAVE_CP #if XCHAL_HAVE_CP
int cp; int cp;
...@@ -74,7 +74,7 @@ extern __inline__ void enable_coprocessor(int i) ...@@ -74,7 +74,7 @@ extern __inline__ void enable_coprocessor(int i)
#endif #endif
} }
extern __inline__ void disable_coprocessor(int i) static inline void disable_coprocessor(int i)
{ {
#if XCHAL_HAVE_CP #if XCHAL_HAVE_CP
int cp; int cp;
...@@ -123,7 +123,7 @@ do { \ ...@@ -123,7 +123,7 @@ do { \
* cmpxchg * cmpxchg
*/ */
extern __inline__ unsigned long static inline unsigned long
__cmpxchg_u32(volatile int *p, int old, int new) __cmpxchg_u32(volatile int *p, int old, int new)
{ {
__asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
...@@ -173,7 +173,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) ...@@ -173,7 +173,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
* where no register reference will cause an overflow. * where no register reference will cause an overflow.
*/ */
extern __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val) static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
{ {
unsigned long tmp; unsigned long tmp;
__asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
......
...@@ -39,7 +39,7 @@ extern void flush_tlb_range(struct vm_area_struct*,unsigned long,unsigned long); ...@@ -39,7 +39,7 @@ extern void flush_tlb_range(struct vm_area_struct*,unsigned long,unsigned long);
* page-table pages. * page-table pages.
*/ */
extern inline void flush_tlb_pgtables(struct mm_struct *mm, static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
} }
...@@ -51,26 +51,26 @@ extern inline void flush_tlb_pgtables(struct mm_struct *mm, ...@@ -51,26 +51,26 @@ extern inline void flush_tlb_pgtables(struct mm_struct *mm,
#define ITLB_PROBE_SUCCESS (1 << ITLB_WAYS_LOG2) #define ITLB_PROBE_SUCCESS (1 << ITLB_WAYS_LOG2)
#define DTLB_PROBE_SUCCESS (1 << DTLB_WAYS_LOG2) #define DTLB_PROBE_SUCCESS (1 << DTLB_WAYS_LOG2)
extern inline unsigned long itlb_probe(unsigned long addr) static inline unsigned long itlb_probe(unsigned long addr)
{ {
unsigned long tmp; unsigned long tmp;
__asm__ __volatile__("pitlb %0, %1\n\t" : "=a" (tmp) : "a" (addr)); __asm__ __volatile__("pitlb %0, %1\n\t" : "=a" (tmp) : "a" (addr));
return tmp; return tmp;
} }
extern inline unsigned long dtlb_probe(unsigned long addr) static inline unsigned long dtlb_probe(unsigned long addr)
{ {
unsigned long tmp; unsigned long tmp;
__asm__ __volatile__("pdtlb %0, %1\n\t" : "=a" (tmp) : "a" (addr)); __asm__ __volatile__("pdtlb %0, %1\n\t" : "=a" (tmp) : "a" (addr));
return tmp; return tmp;
} }
extern inline void invalidate_itlb_entry (unsigned long probe) static inline void invalidate_itlb_entry (unsigned long probe)
{ {
__asm__ __volatile__("iitlb %0; isync\n\t" : : "a" (probe)); __asm__ __volatile__("iitlb %0; isync\n\t" : : "a" (probe));
} }
extern inline void invalidate_dtlb_entry (unsigned long probe) static inline void invalidate_dtlb_entry (unsigned long probe)
{ {
__asm__ __volatile__("idtlb %0; dsync\n\t" : : "a" (probe)); __asm__ __volatile__("idtlb %0; dsync\n\t" : : "a" (probe));
} }
...@@ -80,68 +80,68 @@ extern inline void invalidate_dtlb_entry (unsigned long probe) ...@@ -80,68 +80,68 @@ extern inline void invalidate_dtlb_entry (unsigned long probe)
* caller must follow up with an 'isync', which can be relatively * caller must follow up with an 'isync', which can be relatively
* expensive on some Xtensa implementations. * expensive on some Xtensa implementations.
*/ */
extern inline void invalidate_itlb_entry_no_isync (unsigned entry) static inline void invalidate_itlb_entry_no_isync (unsigned entry)
{ {
/* Caller must follow up with 'isync'. */ /* Caller must follow up with 'isync'. */
__asm__ __volatile__ ("iitlb %0\n" : : "a" (entry) ); __asm__ __volatile__ ("iitlb %0\n" : : "a" (entry) );
} }
extern inline void invalidate_dtlb_entry_no_isync (unsigned entry) static inline void invalidate_dtlb_entry_no_isync (unsigned entry)
{ {
/* Caller must follow up with 'isync'. */ /* Caller must follow up with 'isync'. */
__asm__ __volatile__ ("idtlb %0\n" : : "a" (entry) ); __asm__ __volatile__ ("idtlb %0\n" : : "a" (entry) );
} }
extern inline void set_itlbcfg_register (unsigned long val) static inline void set_itlbcfg_register (unsigned long val)
{ {
__asm__ __volatile__("wsr %0, "__stringify(ITLBCFG)"\n\t" "isync\n\t" __asm__ __volatile__("wsr %0, "__stringify(ITLBCFG)"\n\t" "isync\n\t"
: : "a" (val)); : : "a" (val));
} }
extern inline void set_dtlbcfg_register (unsigned long val) static inline void set_dtlbcfg_register (unsigned long val)
{ {
__asm__ __volatile__("wsr %0, "__stringify(DTLBCFG)"; dsync\n\t" __asm__ __volatile__("wsr %0, "__stringify(DTLBCFG)"; dsync\n\t"
: : "a" (val)); : : "a" (val));
} }
extern inline void set_ptevaddr_register (unsigned long val) static inline void set_ptevaddr_register (unsigned long val)
{ {
__asm__ __volatile__(" wsr %0, "__stringify(PTEVADDR)"; isync\n" __asm__ __volatile__(" wsr %0, "__stringify(PTEVADDR)"; isync\n"
: : "a" (val)); : : "a" (val));
} }
extern inline unsigned long read_ptevaddr_register (void) static inline unsigned long read_ptevaddr_register (void)
{ {
unsigned long tmp; unsigned long tmp;
__asm__ __volatile__("rsr %0, "__stringify(PTEVADDR)"\n\t" : "=a" (tmp)); __asm__ __volatile__("rsr %0, "__stringify(PTEVADDR)"\n\t" : "=a" (tmp));
return tmp; return tmp;
} }
extern inline void write_dtlb_entry (pte_t entry, int way) static inline void write_dtlb_entry (pte_t entry, int way)
{ {
__asm__ __volatile__("wdtlb %1, %0; dsync\n\t" __asm__ __volatile__("wdtlb %1, %0; dsync\n\t"
: : "r" (way), "r" (entry) ); : : "r" (way), "r" (entry) );
} }
extern inline void write_itlb_entry (pte_t entry, int way) static inline void write_itlb_entry (pte_t entry, int way)
{ {
__asm__ __volatile__("witlb %1, %0; isync\n\t" __asm__ __volatile__("witlb %1, %0; isync\n\t"
: : "r" (way), "r" (entry) ); : : "r" (way), "r" (entry) );
} }
extern inline void invalidate_page_directory (void) static inline void invalidate_page_directory (void)
{ {
invalidate_dtlb_entry (DTLB_WAY_PGTABLE); invalidate_dtlb_entry (DTLB_WAY_PGTABLE);
} }
extern inline void invalidate_itlb_mapping (unsigned address) static inline void invalidate_itlb_mapping (unsigned address)
{ {
unsigned long tlb_entry; unsigned long tlb_entry;
while ((tlb_entry = itlb_probe (address)) & ITLB_PROBE_SUCCESS) while ((tlb_entry = itlb_probe (address)) & ITLB_PROBE_SUCCESS)
invalidate_itlb_entry (tlb_entry); invalidate_itlb_entry (tlb_entry);
} }
extern inline void invalidate_dtlb_mapping (unsigned address) static inline void invalidate_dtlb_mapping (unsigned address)
{ {
unsigned long tlb_entry; unsigned long tlb_entry;
while ((tlb_entry = dtlb_probe (address)) & DTLB_PROBE_SUCCESS) while ((tlb_entry = dtlb_probe (address)) & DTLB_PROBE_SUCCESS)
...@@ -165,28 +165,28 @@ extern inline void invalidate_dtlb_mapping (unsigned address) ...@@ -165,28 +165,28 @@ extern inline void invalidate_dtlb_mapping (unsigned address)
* as[07..00] contain the asid * as[07..00] contain the asid
*/ */
extern inline unsigned long read_dtlb_virtual (int way) static inline unsigned long read_dtlb_virtual (int way)
{ {
unsigned long tmp; unsigned long tmp;
__asm__ __volatile__("rdtlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way)); __asm__ __volatile__("rdtlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way));
return tmp; return tmp;
} }
extern inline unsigned long read_dtlb_translation (int way) static inline unsigned long read_dtlb_translation (int way)
{ {
unsigned long tmp; unsigned long tmp;
__asm__ __volatile__("rdtlb1 %0, %1\n\t" : "=a" (tmp), "+a" (way)); __asm__ __volatile__("rdtlb1 %0, %1\n\t" : "=a" (tmp), "+a" (way));
return tmp; return tmp;
} }
extern inline unsigned long read_itlb_virtual (int way) static inline unsigned long read_itlb_virtual (int way)
{ {
unsigned long tmp; unsigned long tmp;
__asm__ __volatile__("ritlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way)); __asm__ __volatile__("ritlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way));
return tmp; return tmp;
} }
extern inline unsigned long read_itlb_translation (int way) static inline unsigned long read_itlb_translation (int way)
{ {
unsigned long tmp; unsigned long tmp;
__asm__ __volatile__("ritlb1 %0, %1\n\t" : "=a" (tmp), "+a" (way)); __asm__ __volatile__("ritlb1 %0, %1\n\t" : "=a" (tmp), "+a" (way));
......
...@@ -211,7 +211,7 @@ ...@@ -211,7 +211,7 @@
#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size))) #define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size)) #define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
extern inline int verify_area(int type, const void * addr, unsigned long size) static inline int verify_area(int type, const void * addr, unsigned long size)
{ {
return access_ok(type,addr,size) ? 0 : -EFAULT; return access_ok(type,addr,size) ? 0 : -EFAULT;
} }
...@@ -464,7 +464,7 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n) ...@@ -464,7 +464,7 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n)
* success. * success.
*/ */
extern inline unsigned long static inline unsigned long
__xtensa_clear_user(void *addr, unsigned long size) __xtensa_clear_user(void *addr, unsigned long size)
{ {
if ( ! memset(addr, 0, size) ) if ( ! memset(addr, 0, size) )
...@@ -472,7 +472,7 @@ __xtensa_clear_user(void *addr, unsigned long size) ...@@ -472,7 +472,7 @@ __xtensa_clear_user(void *addr, unsigned long size)
return 0; return 0;
} }
extern inline unsigned long static inline unsigned long
clear_user(void *addr, unsigned long size) clear_user(void *addr, unsigned long size)
{ {
if (access_ok(VERIFY_WRITE, addr, size)) if (access_ok(VERIFY_WRITE, addr, size))
...@@ -486,7 +486,7 @@ clear_user(void *addr, unsigned long size) ...@@ -486,7 +486,7 @@ clear_user(void *addr, unsigned long size)
extern long __strncpy_user(char *, const char *, long); extern long __strncpy_user(char *, const char *, long);
#define __strncpy_from_user __strncpy_user #define __strncpy_from_user __strncpy_user
extern inline long static inline long
strncpy_from_user(char *dst, const char *src, long count) strncpy_from_user(char *dst, const char *src, long count)
{ {
if (access_ok(VERIFY_READ, src, 1)) if (access_ok(VERIFY_READ, src, 1))
...@@ -502,7 +502,7 @@ strncpy_from_user(char *dst, const char *src, long count) ...@@ -502,7 +502,7 @@ strncpy_from_user(char *dst, const char *src, long count)
*/ */
extern long __strnlen_user(const char *, long); extern long __strnlen_user(const char *, long);
extern inline long strnlen_user(const char *str, long len) static inline long strnlen_user(const char *str, long len)
{ {
unsigned long top = __kernel_ok ? ~0UL : TASK_SIZE - 1; unsigned long top = __kernel_ok ? ~0UL : TASK_SIZE - 1;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册