提交 9c0aa0f9 编写于 作者: A Adrian Bunk 提交者: Linus Torvalds

[PATCH] Replace extern inline with static inline in asm-x86_64/*

They should be identical in the kernel now, but this
makes it consistent with other code.
Signed-off-by: NAdrian Bunk <bunk@stusta.de>
Signed-off-by: NAndi Kleen <ak@suse.de>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 47e5701e
...@@ -191,7 +191,7 @@ static inline void load_TLS(struct thread_struct *t, unsigned int cpu) ...@@ -191,7 +191,7 @@ static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
/* /*
* load one particular LDT into the current CPU * load one particular LDT into the current CPU
*/ */
extern inline void load_LDT_nolock (mm_context_t *pc, int cpu) static inline void load_LDT_nolock (mm_context_t *pc, int cpu)
{ {
int count = pc->size; int count = pc->size;
......
...@@ -76,7 +76,7 @@ extern void __this_fixmap_does_not_exist(void); ...@@ -76,7 +76,7 @@ extern void __this_fixmap_does_not_exist(void);
* directly without translation, we catch the bug with a NULL-deference * directly without translation, we catch the bug with a NULL-deference
* kernel oops. Illegal ranges of incoming indices are caught too. * kernel oops. Illegal ranges of incoming indices are caught too.
*/ */
extern inline unsigned long fix_to_virt(const unsigned int idx) static inline unsigned long fix_to_virt(const unsigned int idx)
{ {
/* /*
* this branch gets completely eliminated after inlining, * this branch gets completely eliminated after inlining,
......
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
* Talk about misusing macros.. * Talk about misusing macros..
*/ */
#define __OUT1(s,x) \ #define __OUT1(s,x) \
extern inline void out##s(unsigned x value, unsigned short port) { static inline void out##s(unsigned x value, unsigned short port) {
#define __OUT2(s,s1,s2) \ #define __OUT2(s,s1,s2) \
__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1" __asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
...@@ -58,7 +58,7 @@ __OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \ ...@@ -58,7 +58,7 @@ __OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
__OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \ __OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \
#define __IN1(s) \ #define __IN1(s) \
extern inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v; static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
#define __IN2(s,s1,s2) \ #define __IN2(s,s1,s2) \
__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0" __asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
...@@ -68,12 +68,12 @@ __IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ ...@@ -68,12 +68,12 @@ __IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
__IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ __IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
#define __INS(s) \ #define __INS(s) \
extern inline void ins##s(unsigned short port, void * addr, unsigned long count) \ static inline void ins##s(unsigned short port, void * addr, unsigned long count) \
{ __asm__ __volatile__ ("rep ; ins" #s \ { __asm__ __volatile__ ("rep ; ins" #s \
: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } : "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
#define __OUTS(s) \ #define __OUTS(s) \
extern inline void outs##s(unsigned short port, const void * addr, unsigned long count) \ static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
{ __asm__ __volatile__ ("rep ; outs" #s \ { __asm__ __volatile__ ("rep ; outs" #s \
: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } : "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
...@@ -110,12 +110,12 @@ __OUTS(l) ...@@ -110,12 +110,12 @@ __OUTS(l)
* Change virtual addresses to physical addresses and vv. * Change virtual addresses to physical addresses and vv.
* These are pretty trivial * These are pretty trivial
*/ */
extern inline unsigned long virt_to_phys(volatile void * address) static inline unsigned long virt_to_phys(volatile void * address)
{ {
return __pa(address); return __pa(address);
} }
extern inline void * phys_to_virt(unsigned long address) static inline void * phys_to_virt(unsigned long address)
{ {
return __va(address); return __va(address);
} }
...@@ -130,7 +130,7 @@ extern inline void * phys_to_virt(unsigned long address) ...@@ -130,7 +130,7 @@ extern inline void * phys_to_virt(unsigned long address)
extern void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags); extern void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags);
extern inline void __iomem * ioremap (unsigned long offset, unsigned long size) static inline void __iomem * ioremap (unsigned long offset, unsigned long size)
{ {
return __ioremap(offset, size, 0); return __ioremap(offset, size, 0);
} }
......
...@@ -64,7 +64,7 @@ ...@@ -64,7 +64,7 @@
: "=a" (low), "=d" (high) \ : "=a" (low), "=d" (high) \
: "c" (counter)) : "c" (counter))
extern inline void cpuid(int op, unsigned int *eax, unsigned int *ebx, static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx) unsigned int *ecx, unsigned int *edx)
{ {
__asm__("cpuid" __asm__("cpuid"
...@@ -90,7 +90,7 @@ static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, ...@@ -90,7 +90,7 @@ static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
/* /*
* CPUID functions returning a single datum * CPUID functions returning a single datum
*/ */
extern inline unsigned int cpuid_eax(unsigned int op) static inline unsigned int cpuid_eax(unsigned int op)
{ {
unsigned int eax; unsigned int eax;
...@@ -100,7 +100,7 @@ extern inline unsigned int cpuid_eax(unsigned int op) ...@@ -100,7 +100,7 @@ extern inline unsigned int cpuid_eax(unsigned int op)
: "bx", "cx", "dx"); : "bx", "cx", "dx");
return eax; return eax;
} }
extern inline unsigned int cpuid_ebx(unsigned int op) static inline unsigned int cpuid_ebx(unsigned int op)
{ {
unsigned int eax, ebx; unsigned int eax, ebx;
...@@ -110,7 +110,7 @@ extern inline unsigned int cpuid_ebx(unsigned int op) ...@@ -110,7 +110,7 @@ extern inline unsigned int cpuid_ebx(unsigned int op)
: "cx", "dx" ); : "cx", "dx" );
return ebx; return ebx;
} }
extern inline unsigned int cpuid_ecx(unsigned int op) static inline unsigned int cpuid_ecx(unsigned int op)
{ {
unsigned int eax, ecx; unsigned int eax, ecx;
...@@ -120,7 +120,7 @@ extern inline unsigned int cpuid_ecx(unsigned int op) ...@@ -120,7 +120,7 @@ extern inline unsigned int cpuid_ecx(unsigned int op)
: "bx", "dx" ); : "bx", "dx" );
return ecx; return ecx;
} }
extern inline unsigned int cpuid_edx(unsigned int op) static inline unsigned int cpuid_edx(unsigned int op)
{ {
unsigned int eax, edx; unsigned int eax, edx;
......
...@@ -18,12 +18,12 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *p ...@@ -18,12 +18,12 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *p
set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT))); set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
} }
extern __inline__ pmd_t *get_pmd(void) static inline pmd_t *get_pmd(void)
{ {
return (pmd_t *)get_zeroed_page(GFP_KERNEL); return (pmd_t *)get_zeroed_page(GFP_KERNEL);
} }
extern __inline__ void pmd_free(pmd_t *pmd) static inline void pmd_free(pmd_t *pmd)
{ {
BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
free_page((unsigned long)pmd); free_page((unsigned long)pmd);
...@@ -86,13 +86,13 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add ...@@ -86,13 +86,13 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add
/* Should really implement gc for free page table pages. This could be /* Should really implement gc for free page table pages. This could be
done with a reference count in struct page. */ done with a reference count in struct page. */
extern __inline__ void pte_free_kernel(pte_t *pte) static inline void pte_free_kernel(pte_t *pte)
{ {
BUG_ON((unsigned long)pte & (PAGE_SIZE-1)); BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
free_page((unsigned long)pte); free_page((unsigned long)pte);
} }
extern inline void pte_free(struct page *pte) static inline void pte_free(struct page *pte)
{ {
__free_page(pte); __free_page(pte);
} }
......
...@@ -85,7 +85,7 @@ static inline void set_pud(pud_t *dst, pud_t val) ...@@ -85,7 +85,7 @@ static inline void set_pud(pud_t *dst, pud_t val)
pud_val(*dst) = pud_val(val); pud_val(*dst) = pud_val(val);
} }
extern inline void pud_clear (pud_t *pud) static inline void pud_clear (pud_t *pud)
{ {
set_pud(pud, __pud(0)); set_pud(pud, __pud(0));
} }
...@@ -95,7 +95,7 @@ static inline void set_pgd(pgd_t *dst, pgd_t val) ...@@ -95,7 +95,7 @@ static inline void set_pgd(pgd_t *dst, pgd_t val)
pgd_val(*dst) = pgd_val(val); pgd_val(*dst) = pgd_val(val);
} }
extern inline void pgd_clear (pgd_t * pgd) static inline void pgd_clear (pgd_t * pgd)
{ {
set_pgd(pgd, __pgd(0)); set_pgd(pgd, __pgd(0));
} }
...@@ -375,7 +375,7 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) ...@@ -375,7 +375,7 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
} }
/* Change flags of a PTE */ /* Change flags of a PTE */
extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{ {
pte_val(pte) &= _PAGE_CHG_MASK; pte_val(pte) &= _PAGE_CHG_MASK;
pte_val(pte) |= pgprot_val(newprot); pte_val(pte) |= pgprot_val(newprot);
......
...@@ -375,13 +375,13 @@ struct extended_sigtable { ...@@ -375,13 +375,13 @@ struct extended_sigtable {
#define ASM_NOP_MAX 8 #define ASM_NOP_MAX 8
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
extern inline void rep_nop(void) static inline void rep_nop(void)
{ {
__asm__ __volatile__("rep;nop": : :"memory"); __asm__ __volatile__("rep;nop": : :"memory");
} }
/* Stop speculative execution */ /* Stop speculative execution */
extern inline void sync_core(void) static inline void sync_core(void)
{ {
int tmp; int tmp;
asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
......
...@@ -143,23 +143,23 @@ typedef struct sigaltstack { ...@@ -143,23 +143,23 @@ typedef struct sigaltstack {
#undef __HAVE_ARCH_SIG_BITOPS #undef __HAVE_ARCH_SIG_BITOPS
#if 0 #if 0
extern __inline__ void sigaddset(sigset_t *set, int _sig) static inline void sigaddset(sigset_t *set, int _sig)
{ {
__asm__("btsq %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc"); __asm__("btsq %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc");
} }
extern __inline__ void sigdelset(sigset_t *set, int _sig) static inline void sigdelset(sigset_t *set, int _sig)
{ {
__asm__("btrq %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc"); __asm__("btrq %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc");
} }
extern __inline__ int __const_sigismember(sigset_t *set, int _sig) static inline int __const_sigismember(sigset_t *set, int _sig)
{ {
unsigned long sig = _sig - 1; unsigned long sig = _sig - 1;
return 1 & (set->sig[sig / _NSIG_BPW] >> (sig & ~(_NSIG_BPW-1))); return 1 & (set->sig[sig / _NSIG_BPW] >> (sig & ~(_NSIG_BPW-1)));
} }
extern __inline__ int __gen_sigismember(sigset_t *set, int _sig) static inline int __gen_sigismember(sigset_t *set, int _sig)
{ {
int ret; int ret;
__asm__("btq %2,%1\n\tsbbq %0,%0" __asm__("btq %2,%1\n\tsbbq %0,%0"
...@@ -172,7 +172,7 @@ extern __inline__ int __gen_sigismember(sigset_t *set, int _sig) ...@@ -172,7 +172,7 @@ extern __inline__ int __gen_sigismember(sigset_t *set, int _sig)
__const_sigismember((set),(sig)) : \ __const_sigismember((set),(sig)) : \
__gen_sigismember((set),(sig))) __gen_sigismember((set),(sig)))
extern __inline__ int sigfindinword(unsigned long word) static inline int sigfindinword(unsigned long word)
{ {
__asm__("bsfq %1,%0" : "=r"(word) : "rm"(word) : "cc"); __asm__("bsfq %1,%0" : "=r"(word) : "rm"(word) : "cc");
return word; return word;
......
...@@ -72,7 +72,7 @@ static inline int num_booting_cpus(void) ...@@ -72,7 +72,7 @@ static inline int num_booting_cpus(void)
#define raw_smp_processor_id() read_pda(cpunumber) #define raw_smp_processor_id() read_pda(cpunumber)
extern __inline int hard_smp_processor_id(void) static inline int hard_smp_processor_id(void)
{ {
/* we don't want to mark this access volatile - bad code generation */ /* we don't want to mark this access volatile - bad code generation */
return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID)); return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
......
...@@ -188,7 +188,7 @@ static inline void write_cr4(unsigned long val) ...@@ -188,7 +188,7 @@ static inline void write_cr4(unsigned long val)
#define __xg(x) ((volatile long *)(x)) #define __xg(x) ((volatile long *)(x))
extern inline void set_64bit(volatile unsigned long *ptr, unsigned long val) static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
{ {
*ptr = val; *ptr = val;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册