提交 84d69848 编写于 作者: L Linus Torvalds

Merge branch 'kbuild' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild

Pull kbuild updates from Michal Marek:

 - EXPORT_SYMBOL for asm source by Al Viro.

   This does bring a regression, because genksyms no longer generates
   checksums for these symbols (CONFIG_MODVERSIONS). Nick Piggin is
   working on a patch to fix this.

   Plus, we are talking about functions like strcpy(), which rarely
   change prototypes.

 - Fixes for PPC fallout of the above by Stephen Rothwell and Nick
   Piggin

 - fixdep speedup by Alexey Dobriyan.

 - preparatory work by Nick Piggin to allow architectures to build with
   -ffunction-sections, -fdata-sections and --gc-sections

 - CONFIG_THIN_ARCHIVES support by Stephen Rothwell

 - fix for filenames with colons in the initramfs source by me.

* 'kbuild' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild: (22 commits)
  initramfs: Escape colons in depfile
  ppc: there is no clear_pages to export
  powerpc/64: whitelist unresolved modversions CRCs
  kbuild: -ffunction-sections fix for archs with conflicting sections
  kbuild: add arch specific post-link Makefile
  kbuild: allow archs to select link dead code/data elimination
  kbuild: allow architectures to use thin archives instead of ld -r
  kbuild: Regenerate genksyms lexer
  kbuild: genksyms fix for typeof handling
  fixdep: faster CONFIG_ search
  ia64: move exports to definitions
  sparc32: debride memcpy.S a bit
  [sparc] unify 32bit and 64bit string.h
  sparc: move exports to definitions
  ppc: move exports to definitions
  arm: move exports to definitions
  s390: move exports to definitions
  m68k: move exports to definitions
  alpha: move exports to actual definitions
  x86: move exports to actual definitions
  ...
......@@ -41,6 +41,7 @@ This document describes the Linux kernel Makefiles.
--- 6.8 Custom kbuild commands
--- 6.9 Preprocessing linker scripts
--- 6.10 Generic header files
--- 6.11 Post-link pass
=== 7 Kbuild syntax for exported headers
--- 7.1 header-y
......@@ -1237,6 +1238,21 @@ When kbuild executes, the following steps are followed (roughly):
to list the file in the Kbuild file.
See "7.4 generic-y" for further info on syntax etc.
--- 6.11 Post-link pass
If the file arch/xxx/Makefile.postlink exists, this makefile
will be invoked for post-link objects (vmlinux and modules.ko)
for architectures to run post-link passes on. Must also handle
the clean target.
This pass runs after kallsyms generation. If the architecture
needs to modify symbol locations, rather than manipulate the
kallsyms, it may be easier to add another postlink target for
.tmp_vmlinux? targets to be called from link-vmlinux.sh.
For example, powerpc uses this to check relocation sanity of
the linked vmlinux file.
=== 7 Kbuild syntax for exported headers
The kernel includes a set of headers that is exported to userspace.
......
......@@ -623,6 +623,11 @@ KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
KBUILD_CFLAGS += $(call cc-option,-ffunction-sections,)
KBUILD_CFLAGS += $(call cc-option,-fdata-sections,)
endif
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
KBUILD_CFLAGS += -Os
else
......@@ -803,6 +808,10 @@ LDFLAGS_BUILD_ID = $(patsubst -Wl$(comma)%,%,\
KBUILD_LDFLAGS_MODULE += $(LDFLAGS_BUILD_ID)
LDFLAGS_vmlinux += $(LDFLAGS_BUILD_ID)
ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
LDFLAGS_vmlinux += $(call ld-option, --gc-sections,)
endif
ifeq ($(CONFIG_STRIP_ASM_SYMS),y)
LDFLAGS_vmlinux += $(call ld-option, -X,)
endif
......@@ -942,9 +951,12 @@ endif
include/generated/autoksyms.h: FORCE
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh true
# Final link of vmlinux
cmd_link-vmlinux = $(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux)
quiet_cmd_link-vmlinux = LINK $@
ARCH_POSTLINK := $(wildcard $(srctree)/arch/$(SRCARCH)/Makefile.postlink)
# Final link of vmlinux with optional arch pass after final link
cmd_link-vmlinux = \
$(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) ; \
$(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true)
vmlinux: scripts/link-vmlinux.sh vmlinux_prereq $(vmlinux-deps) FORCE
+$(call if_changed,link-vmlinux)
......@@ -1271,6 +1283,7 @@ $(clean-dirs):
vmlinuxclean:
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/link-vmlinux.sh clean
$(Q)$(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) clean)
clean: archclean vmlinuxclean
......
......@@ -450,6 +450,27 @@ config CC_STACKPROTECTOR_STRONG
endchoice
config THIN_ARCHIVES
bool
help
Select this if the architecture wants to use thin archives
instead of ld -r to create the built-in.o files.
config LD_DEAD_CODE_DATA_ELIMINATION
bool
help
Select this if the architecture wants to do dead code and
data elimination with the linker by compiling with
-ffunction-sections -fdata-sections and linking with
--gc-sections.
This requires that the arch annotates or otherwise protects
its external entry points from being discarded. Linker scripts
must also merge .text.*, .data.*, and .bss.* correctly into
output sections. Care must be taken not to pull in unrelated
sections (e.g., '.text.init'). Typically '.' in section names
is used to distinguish them from label names / C identifiers.
config HAVE_ARCH_WITHIN_STACK_FRAMES
bool
help
......
......@@ -3,6 +3,7 @@
generic-y += clkdev.h
generic-y += cputime.h
generic-y += exec.h
generic-y += export.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
......
......@@ -8,7 +8,7 @@ ccflags-y := -Wno-sign-compare
obj-y := entry.o traps.o process.o osf_sys.o irq.o \
irq_alpha.o signal.o setup.o ptrace.o time.o \
alpha_ksyms.o systbls.o err_common.o io.o
systbls.o err_common.o io.o
obj-$(CONFIG_VGA_HOSE) += console.o
obj-$(CONFIG_SMP) += smp.o
......
/*
* linux/arch/alpha/kernel/alpha_ksyms.c
*
* Export the alpha-specific functions that are needed for loadable
* modules.
*/
#include <linux/module.h>
#include <asm/console.h>
#include <asm/uaccess.h>
#include <asm/checksum.h>
#include <asm/fpu.h>
#include <asm/machvec.h>
#include <linux/syscalls.h>
/* these are C runtime functions with special calling conventions: */
extern void __divl (void);
extern void __reml (void);
extern void __divq (void);
extern void __remq (void);
extern void __divlu (void);
extern void __remlu (void);
extern void __divqu (void);
extern void __remqu (void);
EXPORT_SYMBOL(alpha_mv);
EXPORT_SYMBOL(callback_getenv);
EXPORT_SYMBOL(callback_setenv);
EXPORT_SYMBOL(callback_save_env);
/* platform dependent support */
EXPORT_SYMBOL(strcat);
EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strncpy);
EXPORT_SYMBOL(strncat);
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(___memset);
EXPORT_SYMBOL(__memsetw);
EXPORT_SYMBOL(__constant_c_memset);
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(alpha_read_fp_reg);
EXPORT_SYMBOL(alpha_read_fp_reg_s);
EXPORT_SYMBOL(alpha_write_fp_reg);
EXPORT_SYMBOL(alpha_write_fp_reg_s);
/* Networking helper routines. */
EXPORT_SYMBOL(csum_tcpudp_magic);
EXPORT_SYMBOL(ip_compute_csum);
EXPORT_SYMBOL(ip_fast_csum);
EXPORT_SYMBOL(csum_partial_copy_nocheck);
EXPORT_SYMBOL(csum_partial_copy_from_user);
EXPORT_SYMBOL(csum_ipv6_magic);
#ifdef CONFIG_MATHEMU_MODULE
extern long (*alpha_fp_emul_imprecise)(struct pt_regs *, unsigned long);
extern long (*alpha_fp_emul) (unsigned long pc);
EXPORT_SYMBOL(alpha_fp_emul_imprecise);
EXPORT_SYMBOL(alpha_fp_emul);
#endif
/*
* The following are specially called from the uaccess assembly stubs.
*/
EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(__do_clear_user);
/*
* SMP-specific symbols.
*/
#ifdef CONFIG_SMP
EXPORT_SYMBOL(_atomic_dec_and_lock);
#endif /* CONFIG_SMP */
/*
* The following are special because they're not called
* explicitly (the C compiler or assembler generates them in
* response to division operations). Fortunately, their
* interface isn't gonna change any time soon now, so it's OK
* to leave it out of version control.
*/
# undef memcpy
# undef memset
EXPORT_SYMBOL(__divl);
EXPORT_SYMBOL(__divlu);
EXPORT_SYMBOL(__divq);
EXPORT_SYMBOL(__divqu);
EXPORT_SYMBOL(__reml);
EXPORT_SYMBOL(__remlu);
EXPORT_SYMBOL(__remq);
EXPORT_SYMBOL(__remqu);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memchr);
......@@ -144,9 +144,11 @@
else beforehand. Fine. We'll do it ourselves. */
#if 0
#define ALIAS_MV(system) \
struct alpha_machine_vector alpha_mv __attribute__((alias(#system "_mv")));
struct alpha_machine_vector alpha_mv __attribute__((alias(#system "_mv"))); \
EXPORT_SYMBOL(alpha_mv);
#else
#define ALIAS_MV(system) \
asm(".global alpha_mv\nalpha_mv = " #system "_mv");
asm(".global alpha_mv\nalpha_mv = " #system "_mv"); \
EXPORT_SYMBOL(alpha_mv);
#endif
#endif /* GENERIC */
......@@ -115,6 +115,7 @@ unsigned long alpha_agpgart_size = DEFAULT_AGP_APER_SIZE;
#ifdef CONFIG_ALPHA_GENERIC
struct alpha_machine_vector alpha_mv;
EXPORT_SYMBOL(alpha_mv);
#endif
#ifndef alpha_using_srm
......
......@@ -3,6 +3,7 @@
*/
#include <asm/console.h>
#include <asm/export.h>
.text
#define HWRPB_CRB_OFFSET 0xc0
......@@ -92,6 +93,10 @@ CALLBACK(reset_env, CCB_RESET_ENV, 4)
CALLBACK(save_env, CCB_SAVE_ENV, 1)
CALLBACK(pswitch, CCB_PSWITCH, 3)
CALLBACK(bios_emul, CCB_BIOS_EMUL, 5)
EXPORT_SYMBOL(callback_getenv)
EXPORT_SYMBOL(callback_setenv)
EXPORT_SYMBOL(callback_save_env)
.data
__alpha_using_srm: # For use by bootpheader
......
......@@ -48,6 +48,7 @@ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
(__force u64)saddr + (__force u64)daddr +
(__force u64)sum + ((len + proto) << 8));
}
EXPORT_SYMBOL(csum_tcpudp_magic);
__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
__u32 len, __u8 proto, __wsum sum)
......@@ -144,6 +145,7 @@ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
return (__force __sum16)~do_csum(iph,ihl*4);
}
EXPORT_SYMBOL(ip_fast_csum);
/*
* computes the checksum of a memory block at buff, length len,
......@@ -178,3 +180,4 @@ __sum16 ip_compute_csum(const void *buff, int len)
{
return (__force __sum16)~from64to16(do_csum(buff,len));
}
EXPORT_SYMBOL(ip_compute_csum);
......@@ -3,7 +3,7 @@
*
* Zero an entire page.
*/
#include <asm/export.h>
.text
.align 4
.global clear_page
......@@ -37,3 +37,4 @@ clear_page:
nop
.end clear_page
EXPORT_SYMBOL(clear_page)
......@@ -24,6 +24,7 @@
* Clobbers:
* $1,$2,$3,$4,$5,$6
*/
#include <asm/export.h>
/* Allow an exception for an insn; exit if we get one. */
#define EX(x,y...) \
......@@ -111,3 +112,4 @@ $exception:
ret $31, ($28), 1 # .. e1 :
.end __do_clear_user
EXPORT_SYMBOL(__do_clear_user)
......@@ -3,7 +3,7 @@
*
* Copy an entire page.
*/
#include <asm/export.h>
.text
.align 4
.global copy_page
......@@ -47,3 +47,4 @@ copy_page:
nop
.end copy_page
EXPORT_SYMBOL(copy_page)
......@@ -26,6 +26,8 @@
* $1,$2,$3,$4,$5,$6,$7
*/
#include <asm/export.h>
/* Allow an exception for an insn; exit if we get one. */
#define EXI(x,y...) \
99: x,##y; \
......@@ -143,3 +145,4 @@ $101:
ret $31,($28),1
.end __copy_user
EXPORT_SYMBOL(__copy_user)
......@@ -12,6 +12,7 @@
* added by Ivan Kokshaysky <ink@jurassic.park.msu.ru>
*/
#include <asm/export.h>
.globl csum_ipv6_magic
.align 4
.ent csum_ipv6_magic
......@@ -113,3 +114,4 @@ csum_ipv6_magic:
ret # .. e1 :
.end csum_ipv6_magic
EXPORT_SYMBOL(csum_ipv6_magic)
......@@ -374,6 +374,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst, int len,
}
return (__force __wsum)checksum;
}
EXPORT_SYMBOL(csum_partial_copy_from_user);
__wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
......@@ -386,3 +387,4 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
set_fs(oldfs);
return checksum;
}
EXPORT_SYMBOL(csum_partial_copy_nocheck);
......@@ -7,6 +7,7 @@
#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <linux/export.h>
asm (".text \n\
.global _atomic_dec_and_lock \n\
......@@ -39,3 +40,4 @@ static int __used atomic_dec_and_lock_1(atomic_t *atomic, spinlock_t *lock)
spin_unlock(lock);
return 0;
}
EXPORT_SYMBOL(_atomic_dec_and_lock);
......@@ -45,6 +45,7 @@
* $28 - compare status
*/
#include <asm/export.h>
#define halt .long 0
/*
......@@ -151,6 +152,7 @@ ufunction:
addq $30,STACK,$30
ret $31,($23),1
.end ufunction
EXPORT_SYMBOL(ufunction)
/*
* Uhh.. Ugly signed division. I'd rather not have it at all, but
......@@ -193,3 +195,4 @@ sfunction:
addq $30,STACK,$30
ret $31,($23),1
.end sfunction
EXPORT_SYMBOL(sfunction)
......@@ -3,7 +3,7 @@
*
* Zero an entire page.
*/
#include <asm/export.h>
.text
.align 4
.global clear_page
......@@ -52,3 +52,4 @@ clear_page:
nop
.end clear_page
EXPORT_SYMBOL(clear_page)
......@@ -43,6 +43,7 @@
* want to leave a hole (and we also want to avoid repeating lots of work)
*/
#include <asm/export.h>
/* Allow an exception for an insn; exit if we get one. */
#define EX(x,y...) \
99: x,##y; \
......@@ -222,4 +223,4 @@ $exception: # Destination for exception recovery(?)
nop # .. E .. .. :
ret $31, ($28), 1 # L0 .. .. .. : L U L U
.end __do_clear_user
EXPORT_SYMBOL(__do_clear_user)
......@@ -56,7 +56,7 @@
destination pages are in the dcache, but it is my guess that this is
less important than the dcache miss case. */
#include <asm/export.h>
.text
.align 4
.global copy_page
......@@ -201,3 +201,4 @@ copy_page:
nop
.end copy_page
EXPORT_SYMBOL(copy_page)
......@@ -37,6 +37,7 @@
* L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
*/
#include <asm/export.h>
/* Allow an exception for an insn; exit if we get one. */
#define EXI(x,y...) \
99: x,##y; \
......@@ -256,4 +257,4 @@ $101:
ret $31,($28),1 # L0
.end __copy_user
EXPORT_SYMBOL(__copy_user)
......@@ -52,6 +52,7 @@
* may cause additional delay in rare cases (load-load replay traps).
*/
#include <asm/export.h>
.globl csum_ipv6_magic
.align 4
.ent csum_ipv6_magic
......@@ -148,3 +149,4 @@ csum_ipv6_magic:
ret # L0 : L U L U
.end csum_ipv6_magic
EXPORT_SYMBOL(csum_ipv6_magic)
......@@ -55,6 +55,7 @@
* Try not to change the actual algorithm if possible for consistency.
*/
#include <asm/export.h>
#define halt .long 0
/*
......@@ -205,6 +206,7 @@ ufunction:
addq $30,STACK,$30 # E :
ret $31,($23),1 # L0 : L U U L
.end ufunction
EXPORT_SYMBOL(ufunction)
/*
* Uhh.. Ugly signed division. I'd rather not have it at all, but
......@@ -257,3 +259,4 @@ sfunction:
addq $30,STACK,$30 # E :
ret $31,($23),1 # L0 : L U U L
.end sfunction
EXPORT_SYMBOL(sfunction)
......@@ -27,7 +27,7 @@
* L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
* Try not to change the actual algorithm if possible for consistency.
*/
#include <asm/export.h>
.set noreorder
.set noat
......@@ -189,3 +189,4 @@ $not_found:
ret # L0 :
.end memchr
EXPORT_SYMBOL(memchr)
......@@ -19,7 +19,7 @@
* Temp usage notes:
* $1,$2, - scratch
*/
#include <asm/export.h>
.set noreorder
.set noat
......@@ -242,6 +242,7 @@ $nomoredata:
nop # E :
.end memcpy
EXPORT_SYMBOL(memcpy)
/* For backwards module compatibility. */
__memcpy = memcpy
......
......@@ -26,7 +26,7 @@
* as fixes will need to be made in multiple places. The performance gain
* is worth it.
*/
#include <asm/export.h>
.set noat
.set noreorder
.text
......@@ -229,6 +229,7 @@ end_b:
nop
ret $31,($26),1 # L0 :
.end ___memset
EXPORT_SYMBOL(___memset)
/*
* This is the original body of code, prior to replication and
......@@ -406,6 +407,7 @@ end:
nop
ret $31,($26),1 # L0 :
.end __constant_c_memset
EXPORT_SYMBOL(__constant_c_memset)
/*
* This is a replicant of the __constant_c_memset code, rescheduled
......@@ -594,6 +596,9 @@ end_w:
ret $31,($26),1 # L0 :
.end __memsetw
EXPORT_SYMBOL(__memsetw)
memset = ___memset
__memset = ___memset
EXPORT_SYMBOL(memset)
EXPORT_SYMBOL(__memset)
......@@ -19,7 +19,7 @@
* string once.
*/
#include <asm/export.h>
.text
.align 4
......@@ -52,3 +52,4 @@ $found: cttz $2, $3 # U0 :
br __stxcpy # L0 :
.end strcat
EXPORT_SYMBOL(strcat)
......@@ -15,7 +15,7 @@
* L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
* Try not to change the actual algorithm if possible for consistency.
*/
#include <asm/export.h>
#include <asm/regdef.h>
.set noreorder
......@@ -86,3 +86,4 @@ $found: negq t0, t1 # E : clear all but least set bit
ret # L0 :
.end strchr
EXPORT_SYMBOL(strchr)
......@@ -17,7 +17,7 @@
* U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
* L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
*/
#include <asm/export.h>
.set noreorder
.set noat
......@@ -47,3 +47,4 @@ $found:
ret $31, ($26) # L0 :
.end strlen
EXPORT_SYMBOL(strlen)
......@@ -20,7 +20,7 @@
* Try not to change the actual algorithm if possible for consistency.
*/
#include <asm/export.h>
.text
.align 4
......@@ -92,3 +92,4 @@ $zerocount:
ret # L0 :
.end strncat
EXPORT_SYMBOL(strncat)
......@@ -18,7 +18,7 @@
* L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
*/
#include <asm/export.h>
#include <asm/regdef.h>
.set noreorder
......@@ -107,3 +107,4 @@ $eos:
nop
.end strrchr
EXPORT_SYMBOL(strrchr)
......@@ -4,6 +4,9 @@
* (C) Copyright 1998 Linus Torvalds
*/
#include <linux/compiler.h>
#include <linux/export.h>
#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
#define STT(reg,val) asm volatile ("ftoit $f"#reg",%0" : "=r"(val));
#else
......@@ -52,6 +55,7 @@ alpha_read_fp_reg (unsigned long reg)
}
return val;
}
EXPORT_SYMBOL(alpha_read_fp_reg);
#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
#define LDT(reg,val) asm volatile ("itoft %0,$f"#reg : : "r"(val));
......@@ -97,6 +101,7 @@ alpha_write_fp_reg (unsigned long reg, unsigned long val)
case 31: LDT(31, val); break;
}
}
EXPORT_SYMBOL(alpha_write_fp_reg);
#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
#define STS(reg,val) asm volatile ("ftois $f"#reg",%0" : "=r"(val));
......@@ -146,6 +151,7 @@ alpha_read_fp_reg_s (unsigned long reg)
}
return val;
}
EXPORT_SYMBOL(alpha_read_fp_reg_s);
#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
#define LDS(reg,val) asm volatile ("itofs %0,$f"#reg : : "r"(val));
......@@ -191,3 +197,4 @@ alpha_write_fp_reg_s (unsigned long reg, unsigned long val)
case 31: LDS(31, val); break;
}
}
EXPORT_SYMBOL(alpha_write_fp_reg_s);
......@@ -31,7 +31,7 @@ For correctness consider that:
- only minimum number of quadwords may be accessed
- the third argument is an unsigned long
*/
#include <asm/export.h>
.set noreorder
.set noat
......@@ -162,3 +162,4 @@ $not_found:
ret # .. e1 :
.end memchr
EXPORT_SYMBOL(memchr)
......@@ -16,6 +16,7 @@
*/
#include <linux/types.h>
#include <linux/export.h>
/*
* This should be done in one go with ldq_u*2/mask/stq_u. Do it
......@@ -158,6 +159,4 @@ void * memcpy(void * dest, const void *src, size_t n)
__memcpy_unaligned_up ((unsigned long) dest, (unsigned long) src, n);
return dest;
}
/* For backward modules compatibility, define __memcpy. */
asm("__memcpy = memcpy; .globl __memcpy");
EXPORT_SYMBOL(memcpy);
......@@ -6,7 +6,7 @@
* This is hand-massaged output from the original memcpy.c. We defer to
* memcpy whenever possible; the backwards copy loops are not unrolled.
*/
#include <asm/export.h>
.set noat
.set noreorder
.text
......@@ -179,3 +179,4 @@ $egress:
nop
.end memmove
EXPORT_SYMBOL(memmove)
......@@ -13,7 +13,7 @@
* The scheduling comments are according to the EV5 documentation (and done by
* hand, so they might well be incorrect, please do tell me about it..)
*/
#include <asm/export.h>
.set noat
.set noreorder
.text
......@@ -106,6 +106,8 @@ within_one_quad:
end:
ret $31,($26),1 /* E1 */
.end ___memset
EXPORT_SYMBOL(___memset)
EXPORT_SYMBOL(__constant_c_memset)
.align 5
.ent __memsetw
......@@ -122,6 +124,9 @@ __memsetw:
br __constant_c_memset /* .. E1 */
.end __memsetw
EXPORT_SYMBOL(__memsetw)
memset = ___memset
__memset = ___memset
EXPORT_SYMBOL(memset)
EXPORT_SYMBOL(__memset)
......@@ -4,6 +4,7 @@
*
* Append a null-terminated string from SRC to DST.
*/
#include <asm/export.h>
.text
......@@ -50,3 +51,4 @@ $found: negq $2, $3 # clear all but least set bit
br __stxcpy
.end strcat
EXPORT_SYMBOL(strcat);
......@@ -5,7 +5,7 @@
* Return the address of a given character within a null-terminated
* string, or null if it is not found.
*/
#include <asm/export.h>
#include <asm/regdef.h>
.set noreorder
......@@ -68,3 +68,4 @@ $retnull:
ret # .. e1 :
.end strchr
EXPORT_SYMBOL(strchr)
......@@ -5,7 +5,7 @@
* Copy a null-terminated string from SRC to DST. Return a pointer
* to the null-terminator in the source.
*/
#include <asm/export.h>
.text
.align 3
......@@ -21,3 +21,4 @@ strcpy:
br __stxcpy # do the copy
.end strcpy
EXPORT_SYMBOL(strcpy)
......@@ -11,7 +11,7 @@
* do this instead of the 9 instructions that
* binary search needs).
*/
#include <asm/export.h>
.set noreorder
.set noat
......@@ -55,3 +55,4 @@ done: subq $0, $16, $0
ret $31, ($26)
.end strlen
EXPORT_SYMBOL(strlen)
......@@ -9,7 +9,7 @@
* past count, whereas libc may write to count+1. This follows the generic
* implementation in lib/string.c and is, IMHO, more sensible.
*/
#include <asm/export.h>
.text
.align 3
......@@ -82,3 +82,4 @@ $zerocount:
ret
.end strncat
EXPORT_SYMBOL(strncat)
......@@ -10,7 +10,7 @@
* version has cropped that bit o' nastiness as well as assuming that
* __stxncpy is in range of a branch.
*/
#include <asm/export.h>
.set noat
.set noreorder
......@@ -79,3 +79,4 @@ $zerolen:
ret
.end strncpy
EXPORT_SYMBOL(strncpy)
......@@ -5,7 +5,7 @@
* Return the address of the last occurrence of a given character
* within a null-terminated string, or null if it is not found.
*/
#include <asm/export.h>
#include <asm/regdef.h>
.set noreorder
......@@ -85,3 +85,4 @@ $retnull:
ret # .. e1 :
.end strrchr
EXPORT_SYMBOL(strrchr)
......@@ -8,6 +8,7 @@ generic-y += early_ioremap.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
generic-y += export.h
generic-y += ioctl.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
......
......@@ -33,7 +33,7 @@ endif
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
obj-$(CONFIG_ISA_DMA_API) += dma.o
obj-$(CONFIG_FIQ) += fiq.o fiqasm.o
obj-$(CONFIG_MODULES) += armksyms.o module.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_ARM_MODULE_PLTS) += module-plts.o
obj-$(CONFIG_ISA_DMA) += dma-isa.o
obj-$(CONFIG_PCI) += bios32.o isa.o
......
/*
* linux/arch/arm/kernel/armksyms.c
*
* Copyright (C) 2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/cryptohash.h>
#include <linux/delay.h>
#include <linux/in6.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/arm-smccc.h>
#include <asm/checksum.h>
#include <asm/ftrace.h>
/*
* libgcc functions - functions that are used internally by the
* compiler... (prototypes are not correct though, but that
* doesn't really matter since they're not versioned).
*/
extern void __ashldi3(void);
extern void __ashrdi3(void);
extern void __divsi3(void);
extern void __lshrdi3(void);
extern void __modsi3(void);
extern void __muldi3(void);
extern void __ucmpdi2(void);
extern void __udivsi3(void);
extern void __umodsi3(void);
extern void __do_div64(void);
extern void __bswapsi2(void);
extern void __bswapdi2(void);
extern void __aeabi_idiv(void);
extern void __aeabi_idivmod(void);
extern void __aeabi_lasr(void);
extern void __aeabi_llsl(void);
extern void __aeabi_llsr(void);
extern void __aeabi_lmul(void);
extern void __aeabi_uidiv(void);
extern void __aeabi_uidivmod(void);
extern void __aeabi_ulcmp(void);
extern void fpundefinstr(void);
void mmioset(void *, unsigned int, size_t);
void mmiocpy(void *, const void *, size_t);
/* platform dependent support */
EXPORT_SYMBOL(arm_delay_ops);
/* networking */
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_from_user);
EXPORT_SYMBOL(csum_partial_copy_nocheck);
EXPORT_SYMBOL(__csum_ipv6_magic);
/* io */
#ifndef __raw_readsb
EXPORT_SYMBOL(__raw_readsb);
#endif
#ifndef __raw_readsw
EXPORT_SYMBOL(__raw_readsw);
#endif
#ifndef __raw_readsl
EXPORT_SYMBOL(__raw_readsl);
#endif
#ifndef __raw_writesb
EXPORT_SYMBOL(__raw_writesb);
#endif
#ifndef __raw_writesw
EXPORT_SYMBOL(__raw_writesw);
#endif
#ifndef __raw_writesl
EXPORT_SYMBOL(__raw_writesl);
#endif
/* string / mem functions */
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(__memzero);
EXPORT_SYMBOL(mmioset);
EXPORT_SYMBOL(mmiocpy);
#ifdef CONFIG_MMU
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(arm_copy_from_user);
EXPORT_SYMBOL(arm_copy_to_user);
EXPORT_SYMBOL(arm_clear_user);
EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2);
EXPORT_SYMBOL(__get_user_4);
EXPORT_SYMBOL(__get_user_8);
#ifdef __ARMEB__
EXPORT_SYMBOL(__get_user_64t_1);
EXPORT_SYMBOL(__get_user_64t_2);
EXPORT_SYMBOL(__get_user_64t_4);
EXPORT_SYMBOL(__get_user_32t_8);
#endif
EXPORT_SYMBOL(__put_user_1);
EXPORT_SYMBOL(__put_user_2);
EXPORT_SYMBOL(__put_user_4);
EXPORT_SYMBOL(__put_user_8);
#endif
/* gcc lib functions */
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__divsi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__modsi3);
EXPORT_SYMBOL(__muldi3);
EXPORT_SYMBOL(__ucmpdi2);
EXPORT_SYMBOL(__udivsi3);
EXPORT_SYMBOL(__umodsi3);
EXPORT_SYMBOL(__do_div64);
EXPORT_SYMBOL(__bswapsi2);
EXPORT_SYMBOL(__bswapdi2);
#ifdef CONFIG_AEABI
EXPORT_SYMBOL(__aeabi_idiv);
EXPORT_SYMBOL(__aeabi_idivmod);
EXPORT_SYMBOL(__aeabi_lasr);
EXPORT_SYMBOL(__aeabi_llsl);
EXPORT_SYMBOL(__aeabi_llsr);
EXPORT_SYMBOL(__aeabi_lmul);
EXPORT_SYMBOL(__aeabi_uidiv);
EXPORT_SYMBOL(__aeabi_uidivmod);
EXPORT_SYMBOL(__aeabi_ulcmp);
#endif
/* bitops */
EXPORT_SYMBOL(_set_bit);
EXPORT_SYMBOL(_test_and_set_bit);
EXPORT_SYMBOL(_clear_bit);
EXPORT_SYMBOL(_test_and_clear_bit);
EXPORT_SYMBOL(_change_bit);
EXPORT_SYMBOL(_test_and_change_bit);
EXPORT_SYMBOL(_find_first_zero_bit_le);
EXPORT_SYMBOL(_find_next_zero_bit_le);
EXPORT_SYMBOL(_find_first_bit_le);
EXPORT_SYMBOL(_find_next_bit_le);
#ifdef __ARMEB__
EXPORT_SYMBOL(_find_first_zero_bit_be);
EXPORT_SYMBOL(_find_next_zero_bit_be);
EXPORT_SYMBOL(_find_first_bit_be);
EXPORT_SYMBOL(_find_next_bit_be);
#endif
#ifdef CONFIG_FUNCTION_TRACER
#ifdef CONFIG_OLD_MCOUNT
EXPORT_SYMBOL(mcount);
#endif
EXPORT_SYMBOL(__gnu_mcount_nc);
#endif
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
EXPORT_SYMBOL(__pv_phys_pfn_offset);
EXPORT_SYMBOL(__pv_offset);
#endif
#ifdef CONFIG_HAVE_ARM_SMCCC
EXPORT_SYMBOL(arm_smccc_smc);
EXPORT_SYMBOL(arm_smccc_hvc);
#endif
......@@ -7,6 +7,7 @@
#include <asm/assembler.h>
#include <asm/ftrace.h>
#include <asm/unwind.h>
#include <asm/export.h>
#include "entry-header.S"
......@@ -153,6 +154,7 @@ ENTRY(mcount)
__mcount _old
#endif
ENDPROC(mcount)
EXPORT_SYMBOL(mcount)
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(ftrace_caller_old)
......@@ -205,6 +207,7 @@ UNWIND(.fnstart)
#endif
UNWIND(.fnend)
ENDPROC(__gnu_mcount_nc)
EXPORT_SYMBOL(__gnu_mcount_nc)
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(ftrace_caller)
......
......@@ -22,6 +22,7 @@
#include <asm/memory.h>
#include <asm/thread_info.h>
#include <asm/pgtable.h>
#include <asm/export.h>
#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING)
#include CONFIG_DEBUG_LL_INCLUDE
......@@ -727,6 +728,8 @@ __pv_phys_pfn_offset:
__pv_offset:
.quad 0
.size __pv_offset, . -__pv_offset
EXPORT_SYMBOL(__pv_phys_pfn_offset)
EXPORT_SYMBOL(__pv_offset)
#endif
#include "head-common.S"
......@@ -16,6 +16,7 @@
#include <asm/opcodes-sec.h>
#include <asm/opcodes-virt.h>
#include <asm/unwind.h>
#include <asm/export.h>
/*
* Wrap c macros in asm macros to delay expansion until after the
......@@ -51,6 +52,7 @@ UNWIND( .fnend)
ENTRY(arm_smccc_smc)
SMCCC SMCCC_SMC
ENDPROC(arm_smccc_smc)
EXPORT_SYMBOL(arm_smccc_smc)
/*
* void smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
......@@ -60,3 +62,4 @@ ENDPROC(arm_smccc_smc)
ENTRY(arm_smccc_hvc)
SMCCC SMCCC_HVC
ENDPROC(arm_smccc_hvc)
EXPORT_SYMBOL(arm_smccc_hvc)
......@@ -28,6 +28,7 @@ Boston, MA 02110-1301, USA. */
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
#ifdef __ARMEB__
#define al r1
......@@ -52,3 +53,5 @@ ENTRY(__aeabi_llsl)
ENDPROC(__ashldi3)
ENDPROC(__aeabi_llsl)
EXPORT_SYMBOL(__ashldi3)
EXPORT_SYMBOL(__aeabi_llsl)
......@@ -28,6 +28,7 @@ Boston, MA 02110-1301, USA. */
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
#ifdef __ARMEB__
#define al r1
......@@ -52,3 +53,5 @@ ENTRY(__aeabi_lasr)
ENDPROC(__ashrdi3)
ENDPROC(__aeabi_lasr)
EXPORT_SYMBOL(__ashrdi3)
EXPORT_SYMBOL(__aeabi_lasr)
#include <asm/assembler.h>
#include <asm/unwind.h>
#include <asm/export.h>
#if __LINUX_ARM_ARCH__ >= 6
.macro bitop, name, instr
......@@ -25,6 +26,7 @@ UNWIND( .fnstart )
bx lr
UNWIND( .fnend )
ENDPROC(\name )
EXPORT_SYMBOL(\name )
.endm
.macro testop, name, instr, store
......@@ -55,6 +57,7 @@ UNWIND( .fnstart )
2: bx lr
UNWIND( .fnend )
ENDPROC(\name )
EXPORT_SYMBOL(\name )
.endm
#else
.macro bitop, name, instr
......@@ -74,6 +77,7 @@ UNWIND( .fnstart )
ret lr
UNWIND( .fnend )
ENDPROC(\name )
EXPORT_SYMBOL(\name )
.endm
/**
......@@ -102,5 +106,6 @@ UNWIND( .fnstart )
ret lr
UNWIND( .fnend )
ENDPROC(\name )
EXPORT_SYMBOL(\name )
.endm
#endif
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
#if __LINUX_ARM_ARCH__ >= 6
ENTRY(__bswapsi2)
......@@ -35,3 +36,5 @@ ENTRY(__bswapdi2)
ret lr
ENDPROC(__bswapdi2)
#endif
EXPORT_SYMBOL(__bswapsi2)
EXPORT_SYMBOL(__bswapdi2)
......@@ -10,6 +10,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
#include <asm/export.h>
.text
......@@ -50,6 +51,9 @@ USER( strnebt r2, [r0])
UNWIND(.fnend)
ENDPROC(arm_clear_user)
ENDPROC(__clear_user_std)
#ifndef CONFIG_UACCESS_WITH_MEMCPY
EXPORT_SYMBOL(arm_clear_user)
#endif
.pushsection .text.fixup,"ax"
.align 0
......
......@@ -13,6 +13,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
#include <asm/export.h>
/*
* Prototype:
......@@ -94,6 +95,7 @@ ENTRY(arm_copy_from_user)
#include "copy_template.S"
ENDPROC(arm_copy_from_user)
EXPORT_SYMBOL(arm_copy_from_user)
.pushsection .fixup,"ax"
.align 0
......
......@@ -13,6 +13,7 @@
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/cache.h>
#include <asm/export.h>
#define COPY_COUNT (PAGE_SZ / (2 * L1_CACHE_BYTES) PLD( -1 ))
......@@ -45,3 +46,4 @@ ENTRY(copy_page)
PLD( beq 2b )
ldmfd sp!, {r4, pc} @ 3
ENDPROC(copy_page)
EXPORT_SYMBOL(copy_page)
......@@ -13,6 +13,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
#include <asm/export.h>
/*
* Prototype:
......@@ -99,6 +100,9 @@ WEAK(arm_copy_to_user)
ENDPROC(arm_copy_to_user)
ENDPROC(__copy_to_user_std)
#ifndef CONFIG_UACCESS_WITH_MEMCPY
EXPORT_SYMBOL(arm_copy_to_user)
#endif
.pushsection .text.fixup,"ax"
.align 0
......
......@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
.text
......@@ -30,4 +31,4 @@ ENTRY(__csum_ipv6_magic)
adcs r0, r0, #0
ldmfd sp!, {pc}
ENDPROC(__csum_ipv6_magic)
EXPORT_SYMBOL(__csum_ipv6_magic)
......@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
.text
......@@ -140,3 +141,4 @@ ENTRY(csum_partial)
bne 4b
b .Lless4
ENDPROC(csum_partial)
EXPORT_SYMBOL(csum_partial)
......@@ -49,5 +49,6 @@
#define FN_ENTRY ENTRY(csum_partial_copy_nocheck)
#define FN_EXIT ENDPROC(csum_partial_copy_nocheck)
#define FN_EXPORT EXPORT_SYMBOL(csum_partial_copy_nocheck)
#include "csumpartialcopygeneric.S"
......@@ -8,6 +8,7 @@
* published by the Free Software Foundation.
*/
#include <asm/assembler.h>
#include <asm/export.h>
/*
* unsigned int
......@@ -331,3 +332,4 @@ FN_ENTRY
mov r5, r4, get_byte_1
b .Lexit
FN_EXIT
FN_EXPORT
......@@ -73,6 +73,7 @@
#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
#define FN_EXPORT EXPORT_SYMBOL(csum_partial_copy_from_user)
#include "csumpartialcopygeneric.S"
......
......@@ -24,6 +24,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/export.h>
#include <linux/timex.h>
/*
......@@ -34,6 +35,7 @@ struct arm_delay_ops arm_delay_ops __ro_after_init = {
.const_udelay = __loop_const_udelay,
.udelay = __loop_udelay,
};
EXPORT_SYMBOL(arm_delay_ops);
static const struct delay_timer *delay_timer;
static bool delay_calibrated;
......
......@@ -15,6 +15,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
#include <asm/export.h>
#ifdef __ARMEB__
#define xh r0
......@@ -210,3 +211,4 @@ Ldiv0_64:
UNWIND(.fnend)
ENDPROC(__do_div64)
EXPORT_SYMBOL(__do_div64)
......@@ -15,6 +15,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
.text
/*
......@@ -37,6 +38,7 @@ ENTRY(_find_first_zero_bit_le)
3: mov r0, r1 @ no free bits
ret lr
ENDPROC(_find_first_zero_bit_le)
EXPORT_SYMBOL(_find_first_zero_bit_le)
/*
* Purpose : Find next 'zero' bit
......@@ -57,6 +59,7 @@ ENTRY(_find_next_zero_bit_le)
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_zero_bit_le)
EXPORT_SYMBOL(_find_next_zero_bit_le)
/*
* Purpose : Find a 'one' bit
......@@ -78,6 +81,7 @@ ENTRY(_find_first_bit_le)
3: mov r0, r1 @ no free bits
ret lr
ENDPROC(_find_first_bit_le)
EXPORT_SYMBOL(_find_first_bit_le)
/*
* Purpose : Find next 'one' bit
......@@ -97,6 +101,7 @@ ENTRY(_find_next_bit_le)
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_bit_le)
EXPORT_SYMBOL(_find_next_bit_le)
#ifdef __ARMEB__
......@@ -116,6 +121,7 @@ ENTRY(_find_first_zero_bit_be)
3: mov r0, r1 @ no free bits
ret lr
ENDPROC(_find_first_zero_bit_be)
EXPORT_SYMBOL(_find_first_zero_bit_be)
ENTRY(_find_next_zero_bit_be)
teq r1, #0
......@@ -133,6 +139,7 @@ ENTRY(_find_next_zero_bit_be)
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_zero_bit_be)
EXPORT_SYMBOL(_find_next_zero_bit_be)
ENTRY(_find_first_bit_be)
teq r1, #0
......@@ -150,6 +157,7 @@ ENTRY(_find_first_bit_be)
3: mov r0, r1 @ no free bits
ret lr
ENDPROC(_find_first_bit_be)
EXPORT_SYMBOL(_find_first_bit_be)
ENTRY(_find_next_bit_be)
teq r1, #0
......@@ -166,6 +174,7 @@ ENTRY(_find_next_bit_be)
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_bit_be)
EXPORT_SYMBOL(_find_next_bit_be)
#endif
......
......@@ -31,6 +31,7 @@
#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/domain.h>
#include <asm/export.h>
ENTRY(__get_user_1)
check_uaccess r0, 1, r1, r2, __get_user_bad
......@@ -38,6 +39,7 @@ ENTRY(__get_user_1)
mov r0, #0
ret lr
ENDPROC(__get_user_1)
EXPORT_SYMBOL(__get_user_1)
ENTRY(__get_user_2)
check_uaccess r0, 2, r1, r2, __get_user_bad
......@@ -58,6 +60,7 @@ rb .req r0
mov r0, #0
ret lr
ENDPROC(__get_user_2)
EXPORT_SYMBOL(__get_user_2)
ENTRY(__get_user_4)
check_uaccess r0, 4, r1, r2, __get_user_bad
......@@ -65,6 +68,7 @@ ENTRY(__get_user_4)
mov r0, #0
ret lr
ENDPROC(__get_user_4)
EXPORT_SYMBOL(__get_user_4)
ENTRY(__get_user_8)
check_uaccess r0, 8, r1, r2, __get_user_bad
......@@ -78,6 +82,7 @@ ENTRY(__get_user_8)
mov r0, #0
ret lr
ENDPROC(__get_user_8)
EXPORT_SYMBOL(__get_user_8)
#ifdef __ARMEB__
ENTRY(__get_user_32t_8)
......@@ -91,6 +96,7 @@ ENTRY(__get_user_32t_8)
mov r0, #0
ret lr
ENDPROC(__get_user_32t_8)
EXPORT_SYMBOL(__get_user_32t_8)
ENTRY(__get_user_64t_1)
check_uaccess r0, 1, r1, r2, __get_user_bad8
......@@ -98,6 +104,7 @@ ENTRY(__get_user_64t_1)
mov r0, #0
ret lr
ENDPROC(__get_user_64t_1)
EXPORT_SYMBOL(__get_user_64t_1)
ENTRY(__get_user_64t_2)
check_uaccess r0, 2, r1, r2, __get_user_bad8
......@@ -114,6 +121,7 @@ rb .req r0
mov r0, #0
ret lr
ENDPROC(__get_user_64t_2)
EXPORT_SYMBOL(__get_user_64t_2)
ENTRY(__get_user_64t_4)
check_uaccess r0, 4, r1, r2, __get_user_bad8
......@@ -121,6 +129,7 @@ ENTRY(__get_user_64t_4)
mov r0, #0
ret lr
ENDPROC(__get_user_64t_4)
EXPORT_SYMBOL(__get_user_64t_4)
#endif
__get_user_bad8:
......
......@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
.Linsb_align: rsb ip, ip, #4
cmp ip, r2
......@@ -121,3 +122,4 @@ ENTRY(__raw_readsb)
ldmfd sp!, {r4 - r6, pc}
ENDPROC(__raw_readsb)
EXPORT_SYMBOL(__raw_readsb)
......@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
ENTRY(__raw_readsl)
teq r2, #0 @ do we have to check for the zero len?
......@@ -77,3 +78,4 @@ ENTRY(__raw_readsl)
strb r3, [r1, #0]
ret lr
ENDPROC(__raw_readsl)
EXPORT_SYMBOL(__raw_readsl)
......@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
.Linsw_bad_alignment:
adr r0, .Linsw_bad_align_msg
......@@ -103,4 +104,4 @@ ENTRY(__raw_readsw)
ldmfd sp!, {r4, r5, r6, pc}
EXPORT_SYMBOL(__raw_readsw)
......@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
.macro pack, rd, hw1, hw2
#ifndef __ARMEB__
......@@ -129,3 +130,4 @@ ENTRY(__raw_readsw)
strneb ip, [r1]
ldmfd sp!, {r4, pc}
ENDPROC(__raw_readsw)
EXPORT_SYMBOL(__raw_readsw)
......@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
.macro outword, rd
#ifndef __ARMEB__
......@@ -92,3 +93,4 @@ ENTRY(__raw_writesb)
ldmfd sp!, {r4, r5, pc}
ENDPROC(__raw_writesb)
EXPORT_SYMBOL(__raw_writesb)
......@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
ENTRY(__raw_writesl)
teq r2, #0 @ do we have to check for the zero len?
......@@ -65,3 +66,4 @@ ENTRY(__raw_writesl)
bne 6b
ret lr
ENDPROC(__raw_writesl)
EXPORT_SYMBOL(__raw_writesl)
......@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
.Loutsw_bad_alignment:
adr r0, .Loutsw_bad_align_msg
......@@ -124,3 +125,4 @@ ENTRY(__raw_writesw)
strne ip, [r0]
ldmfd sp!, {r4, r5, r6, pc}
EXPORT_SYMBOL(__raw_writesw)
......@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
.macro outword, rd
#ifndef __ARMEB__
......@@ -98,3 +99,4 @@ ENTRY(__raw_writesw)
strneh ip, [r0]
ret lr
ENDPROC(__raw_writesw)
EXPORT_SYMBOL(__raw_writesw)
......@@ -36,6 +36,7 @@ Boston, MA 02111-1307, USA. */
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
#include <asm/export.h>
.macro ARM_DIV_BODY dividend, divisor, result, curbit
......@@ -238,6 +239,8 @@ UNWIND(.fnstart)
UNWIND(.fnend)
ENDPROC(__udivsi3)
ENDPROC(__aeabi_uidiv)
EXPORT_SYMBOL(__udivsi3)
EXPORT_SYMBOL(__aeabi_uidiv)
ENTRY(__umodsi3)
UNWIND(.fnstart)
......@@ -256,6 +259,7 @@ UNWIND(.fnstart)
UNWIND(.fnend)
ENDPROC(__umodsi3)
EXPORT_SYMBOL(__umodsi3)
#ifdef CONFIG_ARM_PATCH_IDIV
.align 3
......@@ -303,6 +307,8 @@ UNWIND(.fnstart)
UNWIND(.fnend)
ENDPROC(__divsi3)
ENDPROC(__aeabi_idiv)
EXPORT_SYMBOL(__divsi3)
EXPORT_SYMBOL(__aeabi_idiv)
ENTRY(__modsi3)
UNWIND(.fnstart)
......@@ -327,6 +333,7 @@ UNWIND(.fnstart)
UNWIND(.fnend)
ENDPROC(__modsi3)
EXPORT_SYMBOL(__modsi3)
#ifdef CONFIG_AEABI
......@@ -343,6 +350,7 @@ UNWIND(.save {r0, r1, ip, lr} )
UNWIND(.fnend)
ENDPROC(__aeabi_uidivmod)
EXPORT_SYMBOL(__aeabi_uidivmod)
ENTRY(__aeabi_idivmod)
UNWIND(.fnstart)
......@@ -356,6 +364,7 @@ UNWIND(.save {r0, r1, ip, lr} )
UNWIND(.fnend)
ENDPROC(__aeabi_idivmod)
EXPORT_SYMBOL(__aeabi_idivmod)
#endif
......
......@@ -28,6 +28,7 @@ Boston, MA 02110-1301, USA. */
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
#ifdef __ARMEB__
#define al r1
......@@ -52,3 +53,5 @@ ENTRY(__aeabi_llsr)
ENDPROC(__lshrdi3)
ENDPROC(__aeabi_llsr)
EXPORT_SYMBOL(__lshrdi3)
EXPORT_SYMBOL(__aeabi_llsr)
......@@ -11,6 +11,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
.text
.align 5
......@@ -24,3 +25,4 @@ ENTRY(memchr)
2: movne r0, #0
ret lr
ENDPROC(memchr)
EXPORT_SYMBOL(memchr)
......@@ -13,6 +13,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
#include <asm/export.h>
#define LDR1W_SHIFT 0
#define STR1W_SHIFT 0
......@@ -68,3 +69,5 @@ ENTRY(memcpy)
ENDPROC(memcpy)
ENDPROC(mmiocpy)
EXPORT_SYMBOL(memcpy)
EXPORT_SYMBOL(mmiocpy)
......@@ -13,6 +13,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
#include <asm/export.h>
.text
......@@ -225,3 +226,4 @@ ENTRY(memmove)
18: backward_copy_shift push=24 pull=8
ENDPROC(memmove)
EXPORT_SYMBOL(memmove)
......@@ -12,6 +12,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
#include <asm/export.h>
.text
.align 5
......@@ -135,3 +136,5 @@ UNWIND( .fnstart )
UNWIND( .fnend )
ENDPROC(memset)
ENDPROC(mmioset)
EXPORT_SYMBOL(memset)
EXPORT_SYMBOL(mmioset)
......@@ -10,6 +10,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
#include <asm/export.h>
.text
.align 5
......@@ -135,3 +136,4 @@ UNWIND( .fnstart )
ret lr @ 1
UNWIND( .fnend )
ENDPROC(__memzero)
EXPORT_SYMBOL(__memzero)
......@@ -12,6 +12,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
#ifdef __ARMEB__
#define xh r0
......@@ -46,3 +47,5 @@ ENTRY(__aeabi_lmul)
ENDPROC(__muldi3)
ENDPROC(__aeabi_lmul)
EXPORT_SYMBOL(__muldi3)
EXPORT_SYMBOL(__aeabi_lmul)
......@@ -31,6 +31,7 @@
#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/domain.h>
#include <asm/export.h>
ENTRY(__put_user_1)
check_uaccess r0, 1, r1, ip, __put_user_bad
......@@ -38,6 +39,7 @@ ENTRY(__put_user_1)
mov r0, #0
ret lr
ENDPROC(__put_user_1)
EXPORT_SYMBOL(__put_user_1)
ENTRY(__put_user_2)
check_uaccess r0, 2, r1, ip, __put_user_bad
......@@ -62,6 +64,7 @@ ENTRY(__put_user_2)
mov r0, #0
ret lr
ENDPROC(__put_user_2)
EXPORT_SYMBOL(__put_user_2)
ENTRY(__put_user_4)
check_uaccess r0, 4, r1, ip, __put_user_bad
......@@ -69,6 +72,7 @@ ENTRY(__put_user_4)
mov r0, #0
ret lr
ENDPROC(__put_user_4)
EXPORT_SYMBOL(__put_user_4)
ENTRY(__put_user_8)
check_uaccess r0, 8, r1, ip, __put_user_bad
......@@ -82,6 +86,7 @@ ENTRY(__put_user_8)
mov r0, #0
ret lr
ENDPROC(__put_user_8)
EXPORT_SYMBOL(__put_user_8)
__put_user_bad:
mov r0, #-EFAULT
......
......@@ -11,6 +11,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
.text
.align 5
......@@ -25,3 +26,4 @@ ENTRY(strchr)
subeq r0, r0, #1
ret lr
ENDPROC(strchr)
EXPORT_SYMBOL(strchr)
......@@ -11,6 +11,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
.text
.align 5
......@@ -24,3 +25,4 @@ ENTRY(strrchr)
mov r0, r3
ret lr
ENDPROC(strrchr)
EXPORT_SYMBOL(strrchr)
......@@ -19,6 +19,7 @@
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/hugetlb.h>
#include <linux/export.h>
#include <asm/current.h>
#include <asm/page.h>
......@@ -156,6 +157,7 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
}
return n;
}
EXPORT_SYMBOL(arm_copy_to_user);
static unsigned long noinline
__clear_user_memset(void __user *addr, unsigned long n)
......@@ -213,6 +215,7 @@ unsigned long arm_clear_user(void __user *addr, unsigned long n)
}
return n;
}
EXPORT_SYMBOL(arm_clear_user);
#if 0
......
......@@ -12,6 +12,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
#ifdef __ARMEB__
#define xh r0
......@@ -35,6 +36,7 @@ ENTRY(__ucmpdi2)
ret lr
ENDPROC(__ucmpdi2)
EXPORT_SYMBOL(__ucmpdi2)
#ifdef CONFIG_AEABI
......@@ -48,6 +50,7 @@ ENTRY(__aeabi_ulcmp)
ret lr
ENDPROC(__aeabi_ulcmp)
EXPORT_SYMBOL(__aeabi_ulcmp)
#endif
......@@ -32,7 +32,6 @@ endif
ifdef CONFIG_SND_IMX_SOC
obj-y += ssi-fiq.o
obj-y += ssi-fiq-ksym.o
endif
# i.MX21 based machines
......
/*
* Exported ksyms for the SSI FIQ handler
*
* Copyright (C) 2009, Sascha Hauer <s.hauer@pengutronix.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/platform_data/asoc-imx-ssi.h>
EXPORT_SYMBOL(imx_ssi_fiq_tx_buffer);
EXPORT_SYMBOL(imx_ssi_fiq_rx_buffer);
EXPORT_SYMBOL(imx_ssi_fiq_start);
EXPORT_SYMBOL(imx_ssi_fiq_end);
EXPORT_SYMBOL(imx_ssi_fiq_base);
......@@ -8,6 +8,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/export.h>
/*
* r8 = bit 0-15: tx offset, bit 16-31: tx buffer size
......@@ -144,4 +145,8 @@ imx_ssi_fiq_tx_buffer:
.word 0x0
.L_imx_ssi_fiq_end:
imx_ssi_fiq_end:
EXPORT_SYMBOL(imx_ssi_fiq_tx_buffer)
EXPORT_SYMBOL(imx_ssi_fiq_rx_buffer)
EXPORT_SYMBOL(imx_ssi_fiq_start)
EXPORT_SYMBOL(imx_ssi_fiq_end)
EXPORT_SYMBOL(imx_ssi_fiq_base)
......@@ -33,5 +33,5 @@ $(obj)/vmlinux.bin: vmlinux FORCE
LDFLAGS_bootloader = -static -T
$(obj)/bootloader: $(src)/bootloader.lds $(obj)/bootloader.o $(obj)/boot_head.o $(obj)/fw-emu.o \
lib/lib.a arch/ia64/lib/built-in.o arch/ia64/lib/lib.a FORCE
lib/lib.a arch/ia64/lib/lib.a FORCE
$(call if_changed,ld)
/* EXPORT_DATA_SYMBOL != EXPORT_SYMBOL here */
#define KSYM_FUNC(name) @fptr(name)
#include <asm-generic/export.h>
......@@ -48,6 +48,7 @@
#include <asm/thread_info.h>
#include <asm/unistd.h>
#include <asm/ftrace.h>
#include <asm/export.h>
#include "minstate.h"
......@@ -1345,12 +1346,14 @@ GLOBAL_ENTRY(unw_init_running)
mov rp=loc0
br.ret.sptk.many rp
END(unw_init_running)
EXPORT_SYMBOL(unw_init_running)
#ifdef CONFIG_FUNCTION_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
GLOBAL_ENTRY(_mcount)
br ftrace_stub
END(_mcount)
EXPORT_SYMBOL(_mcount)
.here:
br.ret.sptk.many b0
......
......@@ -35,6 +35,7 @@
#include <asm/processor.h>
#include <asm/asmmacro.h>
#include <asm/export.h>
/*
* Inputs:
......@@ -94,3 +95,4 @@ GLOBAL_ENTRY(esi_call_phys)
mov gp=loc2
br.ret.sptk.many rp
END(esi_call_phys)
EXPORT_SYMBOL_GPL(esi_call_phys)
......@@ -32,6 +32,7 @@
#include <asm/mca_asm.h>
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/export.h>
#ifdef CONFIG_HOTPLUG_CPU
#define SAL_PSR_BITS_TO_SET \
......@@ -168,6 +169,7 @@ RestRR: \
__PAGE_ALIGNED_DATA
.global empty_zero_page
EXPORT_DATA_SYMBOL_GPL(empty_zero_page)
empty_zero_page:
.skip PAGE_SIZE
......
/*
* Architecture-specific kernel symbols
*
* Don't put any exports here unless it's defined in an assembler file.
* All other exports should be put directly after the definition.
*/
#include <linux/module.h>
#include <linux/string.h>
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(strlen);
#include <asm/pgtable.h>
EXPORT_SYMBOL_GPL(empty_zero_page);
#include <asm/checksum.h>
EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */
EXPORT_SYMBOL(csum_ipv6_magic);
#include <asm/page.h>
EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(copy_page);
#ifdef CONFIG_VIRTUAL_MEM_MAP
#include <linux/compiler.h>
#include <linux/export.h>
#include <linux/bootmem.h>
EXPORT_SYMBOL(min_low_pfn); /* defined by bootmem.c, but not exported by generic code */
EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic code */
#endif
#include <asm/processor.h>
EXPORT_SYMBOL(ia64_cpu_info);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(local_per_cpu_offset);
#endif
#include <asm/uaccess.h>
EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(__do_clear_user);
EXPORT_SYMBOL(__strlen_user);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(__strnlen_user);
/* from arch/ia64/lib */
extern void __divsi3(void);
extern void __udivsi3(void);
extern void __modsi3(void);
extern void __umodsi3(void);
extern void __divdi3(void);
extern void __udivdi3(void);
extern void __moddi3(void);
extern void __umoddi3(void);
EXPORT_SYMBOL(__divsi3);
EXPORT_SYMBOL(__udivsi3);
EXPORT_SYMBOL(__modsi3);
EXPORT_SYMBOL(__umodsi3);
EXPORT_SYMBOL(__divdi3);
EXPORT_SYMBOL(__udivdi3);
EXPORT_SYMBOL(__moddi3);
EXPORT_SYMBOL(__umoddi3);
#if defined(CONFIG_MD_RAID456) || defined(CONFIG_MD_RAID456_MODULE)
extern void xor_ia64_2(void);
extern void xor_ia64_3(void);
extern void xor_ia64_4(void);
extern void xor_ia64_5(void);
EXPORT_SYMBOL(xor_ia64_2);
EXPORT_SYMBOL(xor_ia64_3);
EXPORT_SYMBOL(xor_ia64_4);
EXPORT_SYMBOL(xor_ia64_5);
#endif
#include <asm/pal.h>
EXPORT_SYMBOL(ia64_pal_call_phys_stacked);
EXPORT_SYMBOL(ia64_pal_call_phys_static);
EXPORT_SYMBOL(ia64_pal_call_stacked);
EXPORT_SYMBOL(ia64_pal_call_static);
EXPORT_SYMBOL(ia64_load_scratch_fpregs);
EXPORT_SYMBOL(ia64_save_scratch_fpregs);
#include <asm/unwind.h>
EXPORT_SYMBOL(unw_init_running);
#if defined(CONFIG_IA64_ESI) || defined(CONFIG_IA64_ESI_MODULE)
extern void esi_call_phys (void);
EXPORT_SYMBOL_GPL(esi_call_phys);
#endif
extern char ia64_ivt[];
EXPORT_SYMBOL(ia64_ivt);
#include <asm/ftrace.h>
#ifdef CONFIG_FUNCTION_TRACER
/* mcount is defined in assembly */
EXPORT_SYMBOL(_mcount);
#endif
#include <asm/cacheflush.h>
EXPORT_SYMBOL_GPL(flush_icache_range);
......@@ -57,6 +57,7 @@
#include <asm/thread_info.h>
#include <asm/unistd.h>
#include <asm/errno.h>
#include <asm/export.h>
#if 0
# define PSR_DEFAULT_BITS psr.ac
......@@ -85,6 +86,7 @@
.align 32768 // align on 32KB boundary
.global ia64_ivt
EXPORT_DATA_SYMBOL(ia64_ivt)
ia64_ivt:
/////////////////////////////////////////////////////////////////////////////////////////
// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
......
......@@ -14,6 +14,7 @@
#include <asm/asmmacro.h>
#include <asm/processor.h>
#include <asm/export.h>
.data
pal_entry_point:
......@@ -87,6 +88,7 @@ GLOBAL_ENTRY(ia64_pal_call_static)
srlz.d // seralize restoration of psr.l
br.ret.sptk.many b0
END(ia64_pal_call_static)
EXPORT_SYMBOL(ia64_pal_call_static)
/*
* Make a PAL call using the stacked registers calling convention.
......@@ -122,6 +124,7 @@ GLOBAL_ENTRY(ia64_pal_call_stacked)
srlz.d // serialize restoration of psr.l
br.ret.sptk.many b0
END(ia64_pal_call_stacked)
EXPORT_SYMBOL(ia64_pal_call_stacked)
/*
* Make a physical mode PAL call using the static registers calling convention.
......@@ -193,6 +196,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_static)
srlz.d // seralize restoration of psr.l
br.ret.sptk.many b0
END(ia64_pal_call_phys_static)
EXPORT_SYMBOL(ia64_pal_call_phys_static)
/*
* Make a PAL call using the stacked registers in physical mode.
......@@ -250,6 +254,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
srlz.d // seralize restoration of psr.l
br.ret.sptk.many b0
END(ia64_pal_call_phys_stacked)
EXPORT_SYMBOL(ia64_pal_call_phys_stacked)
/*
* Save scratch fp scratch regs which aren't saved in pt_regs already
......@@ -275,6 +280,7 @@ GLOBAL_ENTRY(ia64_save_scratch_fpregs)
stf.spill [r2] = f15,32
br.ret.sptk.many rp
END(ia64_save_scratch_fpregs)
EXPORT_SYMBOL(ia64_save_scratch_fpregs)
/*
* Load scratch fp scratch regs (fp10-fp15)
......@@ -296,3 +302,4 @@ GLOBAL_ENTRY(ia64_load_scratch_fpregs)
ldf.fill f15 = [r2],32
br.ret.sptk.many rp
END(ia64_load_scratch_fpregs)
EXPORT_SYMBOL(ia64_load_scratch_fpregs)
......@@ -71,7 +71,11 @@ EXPORT_SYMBOL(__per_cpu_offset);
#endif
DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
EXPORT_SYMBOL(ia64_cpu_info);
DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(local_per_cpu_offset);
#endif
unsigned long ia64_cycles_per_usec;
struct ia64_boot_param *ia64_boot_param;
struct screen_info screen_info;
......
......@@ -2,17 +2,15 @@
# Makefile for ia64-specific library routines..
#
obj-y := io.o
lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
lib-y := io.o __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
__divdi3.o __udivdi3.o __moddi3.o __umoddi3.o \
checksum.o clear_page.o csum_partial_copy.o \
clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \
flush.o ip_fast_csum.o do_csum.o \
memset.o strlen.o xor.o
obj-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o
obj-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o
lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o
lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o
lib-$(CONFIG_PERFMON) += carta_random.o
AFLAGS___divdi3.o =
......
......@@ -11,6 +11,7 @@
#include <asm/asmmacro.h>
#include <asm/page.h>
#include <asm/export.h>
#ifdef CONFIG_ITANIUM
# define L3_LINE_SIZE 64 // Itanium L3 line size
......@@ -74,3 +75,4 @@ GLOBAL_ENTRY(clear_page)
mov ar.lc = saved_lc // restore lc
br.ret.sptk.many rp
END(clear_page)
EXPORT_SYMBOL(clear_page)
......@@ -12,6 +12,7 @@
*/
#include <asm/asmmacro.h>
#include <asm/export.h>
//
// arguments
......@@ -207,3 +208,4 @@ GLOBAL_ENTRY(__do_clear_user)
mov ar.lc=saved_lc
br.ret.sptk.many rp
END(__do_clear_user)
EXPORT_SYMBOL(__do_clear_user)
......@@ -16,6 +16,7 @@
*/
#include <asm/asmmacro.h>
#include <asm/page.h>
#include <asm/export.h>
#define PIPE_DEPTH 3
#define EPI p[PIPE_DEPTH-1]
......@@ -96,3 +97,4 @@ GLOBAL_ENTRY(copy_page)
mov ar.lc=saved_lc
br.ret.sptk.many rp
END(copy_page)
EXPORT_SYMBOL(copy_page)
......@@ -61,6 +61,7 @@
*/
#include <asm/asmmacro.h>
#include <asm/page.h>
#include <asm/export.h>
#define PREFETCH_DIST 8 // McKinley sustains 16 outstanding L2 misses (8 ld, 8 st)
......@@ -183,3 +184,4 @@ GLOBAL_ENTRY(copy_page)
mov pr = saved_pr, -1
br.ret.sptk.many rp
END(copy_page)
EXPORT_SYMBOL(copy_page)
......@@ -30,6 +30,7 @@
*/
#include <asm/asmmacro.h>
#include <asm/export.h>
//
// Tuneable parameters
......@@ -608,3 +609,4 @@ GLOBAL_ENTRY(__copy_user)
mov ar.pfs=saved_pfs
br.ret.sptk.many rp
END(__copy_user)
EXPORT_SYMBOL(__copy_user)
......@@ -8,6 +8,7 @@
*/
#include <asm/asmmacro.h>
#include <asm/export.h>
/*
......@@ -60,6 +61,7 @@ GLOBAL_ENTRY(flush_icache_range)
mov ar.lc=r3 // restore ar.lc
br.ret.sptk.many rp
END(flush_icache_range)
EXPORT_SYMBOL_GPL(flush_icache_range)
/*
* clflush_cache_range(start,size)
......
......@@ -15,6 +15,7 @@
*/
#include <asm/asmmacro.h>
#include <asm/export.h>
#ifdef MODULO
# define OP mod
......@@ -81,3 +82,4 @@ GLOBAL_ENTRY(NAME)
getf.sig r8 = f6 // transfer result to result register
br.ret.sptk.many rp
END(NAME)
EXPORT_SYMBOL(NAME)
......@@ -15,6 +15,7 @@
*/
#include <asm/asmmacro.h>
#include <asm/export.h>
#ifdef MODULO
# define OP mod
......@@ -78,3 +79,4 @@ GLOBAL_ENTRY(NAME)
getf.sig r8 = f11 // transfer result to result register
br.ret.sptk.many rp
END(NAME)
EXPORT_SYMBOL(NAME)
......@@ -13,6 +13,7 @@
*/
#include <asm/asmmacro.h>
#include <asm/export.h>
/*
* Since we know that most likely this function is called with buf aligned
......@@ -92,6 +93,7 @@ GLOBAL_ENTRY(ip_fast_csum)
mov b0=r34
br.ret.sptk.many b0
END(ip_fast_csum)
EXPORT_SYMBOL(ip_fast_csum)
GLOBAL_ENTRY(csum_ipv6_magic)
ld4 r20=[in0],4
......@@ -142,3 +144,4 @@ GLOBAL_ENTRY(csum_ipv6_magic)
andcm r8=r9,r8
br.ret.sptk.many b0
END(csum_ipv6_magic)
EXPORT_SYMBOL(csum_ipv6_magic)
......@@ -14,6 +14,7 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <asm/asmmacro.h>
#include <asm/export.h>
GLOBAL_ENTRY(memcpy)
......@@ -299,3 +300,4 @@ GLOBAL_ENTRY(memcpy)
COPY(56, 0)
END(memcpy)
EXPORT_SYMBOL(memcpy)
......@@ -15,6 +15,7 @@
*/
#include <asm/asmmacro.h>
#include <asm/page.h>
#include <asm/export.h>
#define EK(y...) EX(y)
......@@ -78,6 +79,7 @@ GLOBAL_ENTRY(memcpy)
br.cond.sptk .common_code
;;
END(memcpy)
EXPORT_SYMBOL(memcpy)
GLOBAL_ENTRY(__copy_user)
.prologue
// check dest alignment
......@@ -664,3 +666,4 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \
/* end of McKinley specific optimization */
END(__copy_user)
EXPORT_SYMBOL(__copy_user)
......@@ -18,6 +18,7 @@
to get peak speed when value = 0. */
#include <asm/asmmacro.h>
#include <asm/export.h>
#undef ret
#define dest in0
......@@ -360,3 +361,4 @@ GLOBAL_ENTRY(memset)
br.ret.sptk.many rp
}
END(memset)
EXPORT_SYMBOL(memset)
......@@ -17,6 +17,7 @@
*/
#include <asm/asmmacro.h>
#include <asm/export.h>
//
//
......@@ -190,3 +191,4 @@ GLOBAL_ENTRY(strlen)
mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what
br.ret.sptk.many rp // end of successful recovery code
END(strlen)
EXPORT_SYMBOL(strlen)
......@@ -16,6 +16,7 @@
*/
#include <asm/asmmacro.h>
#include <asm/export.h>
//
// int strlen_user(char *)
......@@ -196,3 +197,4 @@ GLOBAL_ENTRY(__strlen_user)
mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what
br.ret.sptk.many rp
END(__strlen_user)
EXPORT_SYMBOL(__strlen_user)
......@@ -17,6 +17,7 @@
*/
#include <asm/asmmacro.h>
#include <asm/export.h>
GLOBAL_ENTRY(__strncpy_from_user)
alloc r2=ar.pfs,3,0,0,0
......@@ -42,3 +43,4 @@ GLOBAL_ENTRY(__strncpy_from_user)
[.Lexit:]
br.ret.sptk.many rp
END(__strncpy_from_user)
EXPORT_SYMBOL(__strncpy_from_user)
......@@ -13,6 +13,7 @@
*/
#include <asm/asmmacro.h>
#include <asm/export.h>
GLOBAL_ENTRY(__strnlen_user)
.prologue
......@@ -43,3 +44,4 @@ GLOBAL_ENTRY(__strnlen_user)
mov ar.lc=r16 // restore ar.lc
br.ret.sptk.many rp
END(__strnlen_user)
EXPORT_SYMBOL(__strnlen_user)
......@@ -14,6 +14,7 @@
*/
#include <asm/asmmacro.h>
#include <asm/export.h>
GLOBAL_ENTRY(xor_ia64_2)
.prologue
......@@ -51,6 +52,7 @@ GLOBAL_ENTRY(xor_ia64_2)
mov pr = r29, -1
br.ret.sptk.few rp
END(xor_ia64_2)
EXPORT_SYMBOL(xor_ia64_2)
GLOBAL_ENTRY(xor_ia64_3)
.prologue
......@@ -91,6 +93,7 @@ GLOBAL_ENTRY(xor_ia64_3)
mov pr = r29, -1
br.ret.sptk.few rp
END(xor_ia64_3)
EXPORT_SYMBOL(xor_ia64_3)
GLOBAL_ENTRY(xor_ia64_4)
.prologue
......@@ -134,6 +137,7 @@ GLOBAL_ENTRY(xor_ia64_4)
mov pr = r29, -1
br.ret.sptk.few rp
END(xor_ia64_4)
EXPORT_SYMBOL(xor_ia64_4)
GLOBAL_ENTRY(xor_ia64_5)
.prologue
......@@ -182,3 +186,4 @@ GLOBAL_ENTRY(xor_ia64_5)
mov pr = r29, -1
br.ret.sptk.few rp
END(xor_ia64_5)
EXPORT_SYMBOL(xor_ia64_5)
#define KSYM_ALIGN 2
#define KCRC_ALIGN 2
#include <asm-generic/export.h>
......@@ -13,7 +13,7 @@ extra-$(CONFIG_SUN3X) := head.o
extra-$(CONFIG_SUN3) := sun3-head.o
extra-y += vmlinux.lds
obj-y := entry.o irq.o m68k_ksyms.o module.o process.o ptrace.o
obj-y := entry.o irq.o module.o process.o ptrace.o
obj-y += setup.o signal.o sys_m68k.o syscalltable.o time.o traps.o
obj-$(CONFIG_MMU_MOTOROLA) += ints.o vectors.o
......
#include <linux/module.h>
asmlinkage long long __ashldi3 (long long, int);
asmlinkage long long __ashrdi3 (long long, int);
asmlinkage long long __lshrdi3 (long long, int);
asmlinkage long long __muldi3 (long long, long long);
/* The following are special because they're not called
explicitly (the C compiler generates them). Fortunately,
their interface isn't gonna change any time soon now, so
it's OK to leave it out of version control. */
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__muldi3);
#if defined(CONFIG_CPU_HAS_NO_MULDIV64)
/*
* Simpler 68k and ColdFire parts also need a few other gcc functions.
*/
extern long long __divsi3(long long, long long);
extern long long __modsi3(long long, long long);
extern long long __mulsi3(long long, long long);
extern long long __udivsi3(long long, long long);
extern long long __umodsi3(long long, long long);
EXPORT_SYMBOL(__divsi3);
EXPORT_SYMBOL(__modsi3);
EXPORT_SYMBOL(__mulsi3);
EXPORT_SYMBOL(__udivsi3);
EXPORT_SYMBOL(__umodsi3);
#endif
......@@ -13,6 +13,9 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details. */
#include <linux/compiler.h>
#include <linux/export.h>
#define BITS_PER_UNIT 8
typedef int SItype __attribute__ ((mode (SI)));
......@@ -55,3 +58,4 @@ __ashldi3 (DItype u, word_type b)
return w.ll;
}
EXPORT_SYMBOL(__ashldi3);
......@@ -13,6 +13,9 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details. */
#include <linux/compiler.h>
#include <linux/export.h>
#define BITS_PER_UNIT 8
typedef int SItype __attribute__ ((mode (SI)));
......@@ -56,3 +59,4 @@ __ashrdi3 (DItype u, word_type b)
return w.ll;
}
EXPORT_SYMBOL(__ashrdi3);
......@@ -33,6 +33,8 @@ General Public License for more details. */
D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
*/
#include <asm/export.h>
/* These are predefined by new versions of GNU cpp. */
#ifndef __USER_LABEL_PREFIX__
......@@ -118,3 +120,4 @@ L2: movel d1, sp@-
L3: movel sp@+, d2
rts
EXPORT_SYMBOL(__divsi3)
......@@ -13,6 +13,9 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details. */
#include <linux/compiler.h>
#include <linux/export.h>
#define BITS_PER_UNIT 8
typedef int SItype __attribute__ ((mode (SI)));
......@@ -55,3 +58,4 @@ __lshrdi3 (DItype u, word_type b)
return w.ll;
}
EXPORT_SYMBOL(__lshrdi3);
......@@ -33,6 +33,8 @@ General Public License for more details. */
D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
*/
#include <asm/export.h>
/* These are predefined by new versions of GNU cpp. */
#ifndef __USER_LABEL_PREFIX__
......@@ -106,3 +108,4 @@ SYM (__modsi3):
movel d1, d0
rts
EXPORT_SYMBOL(__modsi3)
......@@ -14,6 +14,9 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details. */
#include <linux/compiler.h>
#include <linux/export.h>
#ifdef CONFIG_CPU_HAS_NO_MULDIV64
#define SI_TYPE_SIZE 32
......@@ -90,3 +93,4 @@ __muldi3 (DItype u, DItype v)
return w.ll;
}
EXPORT_SYMBOL(__muldi3);
......@@ -32,7 +32,7 @@ General Public License for more details. */
Some of this code comes from MINIX, via the folks at ericsson.
D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
*/
#include <asm/export.h>
/* These are predefined by new versions of GNU cpp. */
#ifndef __USER_LABEL_PREFIX__
......@@ -102,4 +102,4 @@ SYM (__mulsi3):
addl d1, d0
rts
EXPORT_SYMBOL(__mulsi3)
......@@ -32,7 +32,7 @@ General Public License for more details. */
Some of this code comes from MINIX, via the folks at ericsson.
D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
*/
#include <asm/export.h>
/* These are predefined by new versions of GNU cpp. */
#ifndef __USER_LABEL_PREFIX__
......@@ -154,4 +154,4 @@ L2: subql IMM (1),d4
unlk a6 | and return
rts
#endif /* __mcf5200__ || __mcoldfire__ */
EXPORT_SYMBOL(__udivsi3)
......@@ -32,7 +32,7 @@ General Public License for more details. */
Some of this code comes from MINIX, via the folks at ericsson.
D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
*/
#include <asm/export.h>
/* These are predefined by new versions of GNU cpp. */
#ifndef __USER_LABEL_PREFIX__
......@@ -105,4 +105,4 @@ SYM (__umodsi3):
subl d0, d1 /* d1 = a - (a/b)*b */
movel d1, d0
rts
EXPORT_SYMBOL(__umodsi3)
generic-y += clkdev.h
generic-y += div64.h
generic-y += export.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += local64.h
......
......@@ -90,10 +90,6 @@ obj-$(CONFIG_RELOCATABLE) += reloc_$(BITS).o
obj-$(CONFIG_PPC32) += entry_32.o setup_32.o
obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_MODULES) += ppc_ksyms.o
ifeq ($(CONFIG_PPC32),y)
obj-$(CONFIG_MODULES) += ppc_ksyms_32.o
endif
obj-$(CONFIG_BOOTX_TEXT) += btext.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_KPROBES) += kprobes.o
......
......@@ -33,6 +33,7 @@
#include <asm/unistd.h>
#include <asm/ftrace.h>
#include <asm/ptrace.h>
#include <asm/export.h>
/*
* MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
......@@ -1358,6 +1359,7 @@ _GLOBAL(_mcount)
MCOUNT_RESTORE_FRAME
bctr
#endif
EXPORT_SYMBOL(_mcount)
_GLOBAL(ftrace_stub)
blr
......
......@@ -38,6 +38,7 @@
#include <asm/context_tracking.h>
#include <asm/tm.h>
#include <asm/ppc-opcode.h>
#include <asm/export.h>
/*
* System calls.
......@@ -1177,6 +1178,7 @@ _GLOBAL(enter_prom)
#ifdef CONFIG_DYNAMIC_FTRACE
_GLOBAL(mcount)
_GLOBAL(_mcount)
EXPORT_SYMBOL(_mcount)
mflr r12
mtctr r12
mtlr r0
......@@ -1413,6 +1415,7 @@ livepatch_handler:
#else
_GLOBAL_TOC(_mcount)
EXPORT_SYMBOL(_mcount)
/* Taken from output of objdump from lib64/glibc */
mflr r3
ld r11, 0(r1)
......
......@@ -16,6 +16,7 @@
#include <asm/ppc_asm.h>
#include <asm/asm-compat.h>
#include <asm/asm-offsets.h>
#include <asm/export.h>
#ifndef CONFIG_PPC64
/* epapr_ev_idle() was derived from e500_idle() */
......@@ -53,3 +54,4 @@ epapr_hypercall_start:
nop
nop
blr
EXPORT_SYMBOL(epapr_hypercall_start)
......@@ -24,6 +24,7 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
#include <asm/export.h>
#ifdef CONFIG_VSX
#define __REST_32FPVSRS(n,c,base) \
......@@ -59,6 +60,7 @@ _GLOBAL(load_fp_state)
MTFSF_L(fr0)
REST_32FPVSRS(0, R4, R3)
blr
EXPORT_SYMBOL(load_fp_state)
/*
* Store FP state into memory, including FPSCR
......@@ -69,6 +71,7 @@ _GLOBAL(store_fp_state)
mffs fr0
stfd fr0,FPSTATE_FPSCR(r3)
blr
EXPORT_SYMBOL(store_fp_state)
/*
* This task wants to use the FPU now.
......
......@@ -34,6 +34,7 @@
#include <asm/ptrace.h>
#include <asm/bug.h>
#include <asm/kvm_book3s_asm.h>
#include <asm/export.h>
/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
#define LOAD_BAT(n, reg, RA, RB) \
......@@ -738,6 +739,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
.globl mol_trampoline
.set mol_trampoline, i0x2f00
EXPORT_SYMBOL(mol_trampoline)
. = 0x3000
......@@ -1045,6 +1047,7 @@ _ENTRY(switch_mmu_context)
4: trap
EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
blr
EXPORT_SYMBOL(switch_mmu_context)
/*
* An undocumented "feature" of 604e requires that the v bit
......@@ -1272,6 +1275,7 @@ sdata:
.globl empty_zero_page
empty_zero_page:
.space 4096
EXPORT_SYMBOL(empty_zero_page)
.globl swapper_pg_dir
swapper_pg_dir:
......@@ -1285,6 +1289,7 @@ intercept_table:
.long 0, 0, 0, 0, 0, 0, 0, 0
.long 0, 0, 0, 0, 0, 0, 0, 0
.long 0, 0, 0, 0, 0, 0, 0, 0
EXPORT_SYMBOL(intercept_table)
/* Room for two PTE pointers, usually the kernel and current user pointers
* to their respective root page table.
......
......@@ -41,6 +41,7 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
#include <asm/export.h>
/* As with the other PowerPC ports, it is expected that when code
* execution begins here, the following registers contain valid, yet
......@@ -971,6 +972,7 @@ sdata:
.globl empty_zero_page
empty_zero_page:
.space 4096
EXPORT_SYMBOL(empty_zero_page)
.globl swapper_pg_dir
swapper_pg_dir:
.space PGD_TABLE_SIZE
......
......@@ -39,6 +39,7 @@
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
#include <asm/synch.h>
#include <asm/export.h>
#include "head_booke.h"
......@@ -1254,6 +1255,7 @@ sdata:
.globl empty_zero_page
empty_zero_page:
.space PAGE_SIZE
EXPORT_SYMBOL(empty_zero_page)
/*
* To support >32-bit physical addresses, we use an 8KB pgdir.
......
......@@ -43,6 +43,7 @@
#include <asm/hw_irq.h>
#include <asm/cputhreads.h>
#include <asm/ppc-opcode.h>
#include <asm/export.h>
/* The physical memory is laid out such that the secondary processor
* spin code sits at 0x0000...0x00ff. On server, the vectors follow
......@@ -1002,3 +1003,4 @@ swapper_pg_dir:
.globl empty_zero_page
empty_zero_page:
.space PAGE_SIZE
EXPORT_SYMBOL(empty_zero_page)
......@@ -31,6 +31,7 @@
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
#include <asm/fixmap.h>
#include <asm/export.h>
/* Macro to make the code more readable. */
#ifdef CONFIG_8xx_CPU6
......@@ -884,6 +885,7 @@ sdata:
.align PAGE_SHIFT
empty_zero_page:
.space PAGE_SIZE
EXPORT_SYMBOL(empty_zero_page)
.globl swapper_pg_dir
swapper_pg_dir:
......
......@@ -42,6 +42,7 @@
#include <asm/asm-offsets.h>
#include <asm/cache.h>
#include <asm/ptrace.h>
#include <asm/export.h>
#include "head_booke.h"
/* As with the other PowerPC ports, it is expected that when code
......@@ -1223,6 +1224,7 @@ sdata:
.globl empty_zero_page
empty_zero_page:
.space 4096
EXPORT_SYMBOL(empty_zero_page)
.globl swapper_pg_dir
swapper_pg_dir:
.space PGD_TABLE_SIZE
......
......@@ -18,6 +18,7 @@
#include <asm/unistd.h>
#include <asm/asm-compat.h>
#include <asm/asm-offsets.h>
#include <asm/export.h>
.text
......@@ -118,3 +119,4 @@ _GLOBAL(longjmp)
_GLOBAL(current_stack_pointer)
PPC_LL r3,0(r1)
blr
EXPORT_SYMBOL(current_stack_pointer)
......@@ -33,6 +33,7 @@
#include <asm/kexec.h>
#include <asm/bug.h>
#include <asm/ptrace.h>
#include <asm/export.h>
.text
......@@ -319,6 +320,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE)
#endif /* CONFIG_4xx */
isync
blr
EXPORT_SYMBOL(flush_instruction_cache)
#endif /* CONFIG_PPC_8xx */
/*
......@@ -359,6 +361,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
isync
blr
_ASM_NOKPROBE_SYMBOL(flush_icache_range)
EXPORT_SYMBOL(flush_icache_range)
/*
* Flush a particular page from the data cache to RAM.
......@@ -497,6 +500,7 @@ _GLOBAL(copy_page)
li r0,MAX_COPY_PREFETCH
li r11,4
b 2b
EXPORT_SYMBOL(copy_page)
/*
* Extended precision shifts.
......@@ -524,6 +528,7 @@ _GLOBAL(__ashrdi3)
sraw r3,r3,r5 # MSW = MSW >> count
or r4,r4,r7 # LSW |= t2
blr
EXPORT_SYMBOL(__ashrdi3)
_GLOBAL(__ashldi3)
subfic r6,r5,32
......@@ -535,6 +540,7 @@ _GLOBAL(__ashldi3)
slw r4,r4,r5 # LSW = LSW << count
or r3,r3,r7 # MSW |= t2
blr
EXPORT_SYMBOL(__ashldi3)
_GLOBAL(__lshrdi3)
subfic r6,r5,32
......@@ -546,6 +552,7 @@ _GLOBAL(__lshrdi3)
srw r3,r3,r5 # MSW = MSW >> count
or r4,r4,r7 # LSW |= t2
blr
EXPORT_SYMBOL(__lshrdi3)
/*
* 64-bit comparison: __cmpdi2(s64 a, s64 b)
......@@ -561,6 +568,7 @@ _GLOBAL(__cmpdi2)
bltlr
li r3,2
blr
EXPORT_SYMBOL(__cmpdi2)
/*
* 64-bit comparison: __ucmpdi2(u64 a, u64 b)
* Returns 0 if a < b, 1 if a == b, 2 if a > b.
......@@ -575,6 +583,7 @@ _GLOBAL(__ucmpdi2)
bltlr
li r3,2
blr
EXPORT_SYMBOL(__ucmpdi2)
_GLOBAL(__bswapdi2)
rotlwi r9,r4,8
......@@ -586,6 +595,7 @@ _GLOBAL(__bswapdi2)
mr r3,r9
mr r4,r10
blr
EXPORT_SYMBOL(__bswapdi2)
#ifdef CONFIG_SMP
_GLOBAL(start_secondary_resume)
......
......@@ -27,6 +27,7 @@
#include <asm/kexec.h>
#include <asm/ptrace.h>
#include <asm/mmu.h>
#include <asm/export.h>
.text
......@@ -110,6 +111,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
isync
blr
_ASM_NOKPROBE_SYMBOL(flush_icache_range)
EXPORT_SYMBOL(flush_icache_range)
/*
* Like above, but only do the D-cache.
......@@ -140,6 +142,7 @@ _GLOBAL(flush_dcache_range)
bdnz 0b
sync
blr
EXPORT_SYMBOL(flush_dcache_range)
/*
* Like above, but works on non-mapped physical addresses.
......@@ -243,6 +246,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
blr
_GLOBAL(__bswapdi2)
EXPORT_SYMBOL(__bswapdi2)
srdi r8,r3,32
rlwinm r7,r3,8,0xffffffff
rlwimi r7,r3,24,0,7
......
......@@ -56,6 +56,7 @@ static DECLARE_BITMAP(phb_bitmap, MAX_PHBS);
/* ISA Memory physical address */
resource_size_t isa_mem_base;
EXPORT_SYMBOL(isa_mem_base);
static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
......
......@@ -32,6 +32,8 @@
unsigned long isa_io_base = 0;
unsigned long pci_dram_offset = 0;
int pcibios_assign_bus_offset = 1;
EXPORT_SYMBOL(isa_io_base);
EXPORT_SYMBOL(pci_dram_offset);
void pcibios_make_OF_bus_map(void);
......
#include <linux/ftrace.h>
#include <linux/mm.h>
#include <asm/processor.h>
#include <asm/switch_to.h>
#include <asm/cacheflush.h>
#include <asm/epapr_hcalls.h>
#ifdef CONFIG_PPC64
EXPORT_SYMBOL(flush_dcache_range);
#endif
EXPORT_SYMBOL(flush_icache_range);
EXPORT_SYMBOL(empty_zero_page);
long long __bswapdi2(long long);
EXPORT_SYMBOL(__bswapdi2);
#ifdef CONFIG_FUNCTION_TRACER
EXPORT_SYMBOL(_mcount);
#endif
#ifdef CONFIG_PPC_FPU
EXPORT_SYMBOL(load_fp_state);
EXPORT_SYMBOL(store_fp_state);
#endif
#ifdef CONFIG_ALTIVEC
EXPORT_SYMBOL(load_vr_state);
EXPORT_SYMBOL(store_vr_state);
#endif
#ifdef CONFIG_EPAPR_PARAVIRT
EXPORT_SYMBOL(epapr_hypercall_start);
#endif
EXPORT_SYMBOL(current_stack_pointer);
#include <linux/export.h>
#include <linux/smp.h>
#include <asm/page.h>
#include <asm/dma.h>
#include <asm/io.h>
#include <asm/hw_irq.h>
#include <asm/time.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/dcr.h>
EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
EXPORT_SYMBOL(DMA_MODE_READ);
EXPORT_SYMBOL(DMA_MODE_WRITE);
#if defined(CONFIG_PCI)
EXPORT_SYMBOL(isa_io_base);
EXPORT_SYMBOL(isa_mem_base);
EXPORT_SYMBOL(pci_dram_offset);
#endif
#ifdef CONFIG_SMP
EXPORT_SYMBOL(smp_hw_index);
#endif
long long __ashrdi3(long long, int);
long long __ashldi3(long long, int);
long long __lshrdi3(long long, int);
int __ucmpdi2(unsigned long long, unsigned long long);
int __cmpdi2(long long, long long);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__ucmpdi2);
EXPORT_SYMBOL(__cmpdi2);
EXPORT_SYMBOL(timer_interrupt);
EXPORT_SYMBOL(tb_ticks_per_jiffy);
EXPORT_SYMBOL(switch_mmu_context);
#ifdef CONFIG_PPC_STD_MMU_32
extern long mol_trampoline;
EXPORT_SYMBOL(mol_trampoline); /* For MOL */
EXPORT_SYMBOL(flush_hash_pages); /* For MOL */
#ifdef CONFIG_SMP
extern int mmu_hash_lock;
EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */
#endif /* CONFIG_SMP */
extern long *intercept_table;
EXPORT_SYMBOL(intercept_table);
#endif /* CONFIG_PPC_STD_MMU_32 */
#ifdef CONFIG_PPC_DCR_NATIVE
EXPORT_SYMBOL(__mtdcr);
EXPORT_SYMBOL(__mfdcr);
#endif
EXPORT_SYMBOL(flush_instruction_cache);
......@@ -16,6 +16,7 @@
#include <linux/cpu.h>
#include <linux/console.h>
#include <linux/memblock.h>
#include <linux/export.h>
#include <asm/io.h>
#include <asm/prom.h>
......@@ -47,11 +48,16 @@ int boot_cpuid_phys;
EXPORT_SYMBOL_GPL(boot_cpuid_phys);
int smp_hw_index[NR_CPUS];
EXPORT_SYMBOL(smp_hw_index);
unsigned long ISA_DMA_THRESHOLD;
unsigned int DMA_MODE_READ;
unsigned int DMA_MODE_WRITE;
EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
EXPORT_SYMBOL(DMA_MODE_READ);
EXPORT_SYMBOL(DMA_MODE_WRITE);
/*
* These are used in binfmt_elf.c to put aux entries on the stack
* for each elf executable being started.
......
......@@ -596,6 +596,7 @@ void timer_interrupt(struct pt_regs * regs)
irq_exit();
set_irq_regs(old_regs);
}
EXPORT_SYMBOL(timer_interrupt);
/*
* Hypervisor decrementer interrupts shouldn't occur but are sometimes
......
......@@ -6,6 +6,7 @@
#include <asm/thread_info.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/export.h>
/*
* Load state from memory into VMX registers including VSCR.
......@@ -17,6 +18,7 @@ _GLOBAL(load_vr_state)
mtvscr v0
REST_32VRS(0,r4,r3)
blr
EXPORT_SYMBOL(load_vr_state)
/*
* Store VMX state into memory, including VSCR.
......@@ -28,6 +30,7 @@ _GLOBAL(store_vr_state)
li r4, VRSTATE_VSCR
stvx v0, r4, r3
blr
EXPORT_SYMBOL(store_vr_state)
/*
* Disable VMX for the task which had it previously,
......
......@@ -9,7 +9,7 @@ ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE)
obj-y += string.o alloc.o crtsavres.o ppc_ksyms.o code-patching.o \
obj-y += string.o alloc.o crtsavres.o code-patching.o \
feature-fixups.o
obj-$(CONFIG_PPC32) += div64.o copy_32.o
......
......@@ -17,6 +17,7 @@
#include <asm/cache.h>
#include <asm/errno.h>
#include <asm/ppc_asm.h>
#include <asm/export.h>
.text
......@@ -68,6 +69,7 @@ _GLOBAL(__csum_partial)
adde r5,r5,r0
5: addze r3,r5 /* add in final carry */
blr
EXPORT_SYMBOL(__csum_partial)
/*
* Computes the checksum of a memory block at src, length len,
......@@ -297,3 +299,4 @@ dst_error:
.long 41b,dst_error
.long 50b,src_error
.long 51b,dst_error
EXPORT_SYMBOL(csum_partial_copy_generic)
......@@ -16,6 +16,7 @@
#include <asm/processor.h>
#include <asm/errno.h>
#include <asm/ppc_asm.h>
#include <asm/export.h>
/*
* Computes the checksum of a memory block at buff, length len,
......@@ -176,6 +177,7 @@ _GLOBAL(__csum_partial)
add r3,r4,r0
srdi r3,r3,32
blr
EXPORT_SYMBOL(__csum_partial)
.macro srcnr
......@@ -430,3 +432,4 @@ dstnr; stb r6,0(r4)
li r6,-EFAULT
stw r6,0(r8)
blr
EXPORT_SYMBOL(csum_partial_copy_generic)
......@@ -12,6 +12,7 @@
#include <asm/cache.h>
#include <asm/errno.h>
#include <asm/ppc_asm.h>
#include <asm/export.h>
#define COPY_16_BYTES \
lwz r7,4(r4); \
......@@ -92,6 +93,7 @@ _GLOBAL(memset)
subf r6,r0,r6
cmplwi 0,r4,0
bne 2f /* Use normal procedure if r4 is not zero */
EXPORT_SYMBOL(memset)
_GLOBAL(memset_nocache_branch)
b 2f /* Skip optimised bloc until cache is enabled */
......@@ -216,6 +218,8 @@ _GLOBAL(memcpy)
stbu r0,1(r6)
bdnz 40b
65: blr
EXPORT_SYMBOL(memcpy)
EXPORT_SYMBOL(memmove)
generic_memcpy:
srwi. r7,r5,3
......@@ -507,3 +511,4 @@ _GLOBAL(__copy_tofrom_user)
.long 112b,120b
.long 114b,120b
.text
EXPORT_SYMBOL(__copy_tofrom_user)
......@@ -10,6 +10,7 @@
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/export.h>
.section ".toc","aw"
PPC64_CACHES:
......@@ -110,3 +111,4 @@ END_FTR_SECTION_IFSET(CPU_FTR_CP_USE_DCBTZ)
std r11,120(r3)
std r12,128(r3)
blr
EXPORT_SYMBOL(copy_page)
......@@ -8,6 +8,7 @@
*/
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/export.h>
#ifdef __BIG_ENDIAN__
#define sLd sld /* Shift towards low-numbered address. */
......@@ -671,3 +672,4 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
.llong 89b,100b
.llong 90b,100b
.llong 91b,100b
EXPORT_SYMBOL(__copy_tofrom_user)
......@@ -19,6 +19,7 @@
*/
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/export.h>
/* Note: This code relies on -mminimal-toc */
......@@ -32,6 +33,7 @@ FTR_SECTION_ELSE
clrldi r3,r3,64-8
blr
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
EXPORT_SYMBOL(__arch_hweight8)
_GLOBAL(__arch_hweight16)
BEGIN_FTR_SECTION
......@@ -54,6 +56,7 @@ FTR_SECTION_ELSE
blr
ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 50)
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
EXPORT_SYMBOL(__arch_hweight16)
_GLOBAL(__arch_hweight32)
BEGIN_FTR_SECTION
......@@ -79,6 +82,7 @@ FTR_SECTION_ELSE
blr
ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 51)
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
EXPORT_SYMBOL(__arch_hweight32)
_GLOBAL(__arch_hweight64)
BEGIN_FTR_SECTION
......@@ -108,3 +112,4 @@ FTR_SECTION_ELSE
blr
ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 52)
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
EXPORT_SYMBOL(__arch_hweight64)
......@@ -11,6 +11,7 @@
#include <asm/processor.h>
#include <asm/errno.h>
#include <asm/ppc_asm.h>
#include <asm/export.h>
_GLOBAL(memset)
neg r0,r3
......@@ -77,6 +78,7 @@ _GLOBAL(memset)
10: bflr 31
stb r4,0(r6)
blr
EXPORT_SYMBOL(memset)
_GLOBAL_TOC(memmove)
cmplw 0,r3,r4
......@@ -119,3 +121,4 @@ _GLOBAL(backwards_memcpy)
beq 2b
mtctr r7
b 1b
EXPORT_SYMBOL(memmove)
......@@ -8,6 +8,7 @@
* 2 of the License, or (at your option) any later version.
*/
#include <asm/ppc_asm.h>
#include <asm/export.h>
#define off8 r6
#define off16 r7
......@@ -231,3 +232,4 @@ _GLOBAL(memcmp)
ld r28,-32(r1)
ld r27,-40(r1)
blr
EXPORT_SYMBOL(memcmp)
......@@ -8,6 +8,7 @@
*/
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/export.h>
.align 7
_GLOBAL_TOC(memcpy)
......@@ -219,3 +220,4 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
4: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* return dest pointer */
blr
#endif
EXPORT_SYMBOL(memcpy)
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/bitops.h>
#include <net/checksum.h>
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(strncpy);
EXPORT_SYMBOL(strncmp);
#ifndef CONFIG_GENERIC_CSUM
EXPORT_SYMBOL(__csum_partial);
EXPORT_SYMBOL(csum_partial_copy_generic);
#endif
EXPORT_SYMBOL(__copy_tofrom_user);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(copy_page);
#ifdef CONFIG_PPC64
EXPORT_SYMBOL(__arch_hweight8);
EXPORT_SYMBOL(__arch_hweight16);
EXPORT_SYMBOL(__arch_hweight32);
EXPORT_SYMBOL(__arch_hweight64);
#endif
......@@ -11,6 +11,7 @@
#include <asm/processor.h>
#include <asm/errno.h>
#include <asm/ppc_asm.h>
#include <asm/export.h>
.section __ex_table,"a"
PPC_LONG_ALIGN
......@@ -36,6 +37,7 @@ _GLOBAL(strncpy)
2: stbu r0,1(r6) /* clear it out if so */
bdnz 2b
blr
EXPORT_SYMBOL(strncpy)
_GLOBAL(strncmp)
PPC_LCMPI 0,r5,0
......@@ -53,6 +55,7 @@ _GLOBAL(strncmp)
blr
2: li r3,0
blr
EXPORT_SYMBOL(strncmp)
#ifdef CONFIG_PPC32
_GLOBAL(memcmp)
......@@ -68,6 +71,7 @@ _GLOBAL(memcmp)
blr
2: li r3,0
blr
EXPORT_SYMBOL(memcmp)
#endif
_GLOBAL(memchr)
......@@ -82,6 +86,7 @@ _GLOBAL(memchr)
beqlr
2: li r3,0
blr
EXPORT_SYMBOL(memchr)
#ifdef CONFIG_PPC32
_GLOBAL(__clear_user)
......@@ -125,4 +130,5 @@ _GLOBAL(__clear_user)
PPC_LONG 1b,91b
PPC_LONG 8b,92b
.text
EXPORT_SYMBOL(__clear_user)
#endif
......@@ -20,6 +20,7 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/export.h>
.section ".toc","aw"
PPC64_CACHES:
......@@ -200,3 +201,4 @@ err1; dcbz r0,r3
cmpdi r4,32
blt .Lshort_clear
b .Lmedium_clear
EXPORT_SYMBOL(__clear_user)
......@@ -26,6 +26,7 @@
#include <asm/ppc_asm.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#include <asm/export.h>
#ifdef CONFIG_SMP
.section .bss
......@@ -33,6 +34,7 @@
.globl mmu_hash_lock
mmu_hash_lock:
.space 4
EXPORT_SYMBOL(mmu_hash_lock)
#endif /* CONFIG_SMP */
/*
......@@ -575,6 +577,7 @@ _GLOBAL(flush_hash_pages)
rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */
stwcx. r8,0,r5 /* update the pte */
bne- 33b
EXPORT_SYMBOL(flush_hash_pages)
/* Get the address of the primary PTE group in the hash table (r3) */
_GLOBAL(flush_hash_patch_A)
......
......@@ -30,6 +30,7 @@ bad_relocs=$(
# On PPC64:
# R_PPC64_RELATIVE, R_PPC64_NONE
# R_PPC64_ADDR64 mach_<name>
# R_PPC64_ADDR64 __crc_<name>
# On PPC:
# R_PPC_RELATIVE, R_PPC_ADDR16_HI,
# R_PPC_ADDR16_HA,R_PPC_ADDR16_LO,
......@@ -41,7 +42,8 @@ R_PPC_ADDR16_HI
R_PPC_ADDR16_HA
R_PPC_RELATIVE
R_PPC_NONE' |
grep -E -v '\<R_PPC64_ADDR64[[:space:]]+mach_'
grep -E -v '\<R_PPC64_ADDR64[[:space:]]+mach_' |
grep -E -v '\<R_PPC64_ADDR64[[:space:]]+__crc_'
)
if [ -z "$bad_relocs" ]; then
......
......@@ -12,6 +12,7 @@
#include <asm/ppc_asm.h>
#include <asm/processor.h>
#include <asm/bug.h>
#include <asm/export.h>
#define DCR_ACCESS_PROLOG(table) \
cmpli cr0,r3,1024; \
......@@ -28,9 +29,11 @@
_GLOBAL(__mfdcr)
DCR_ACCESS_PROLOG(__mfdcr_table)
EXPORT_SYMBOL(__mfdcr)
_GLOBAL(__mtdcr)
DCR_ACCESS_PROLOG(__mtdcr_table)
EXPORT_SYMBOL(__mtdcr)
__mfdcr_table:
mfdcr r3,0; blr
......
generic-y += clkdev.h
generic-y += export.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
......
......@@ -61,7 +61,7 @@ obj-y += entry.o reipl.o relocate_kernel.o
extra-y += head.o head64.o vmlinux.lds
obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o
obj-$(CONFIG_HIBERNATION) += suspend.o swsusp.o
......
......@@ -23,6 +23,7 @@
#include <asm/vx-insn.h>
#include <asm/setup.h>
#include <asm/nmi.h>
#include <asm/export.h>
__PT_R0 = __PT_GPRS
__PT_R1 = __PT_GPRS + 8
......@@ -259,6 +260,8 @@ sie_exit:
EX_TABLE(.Lrewind_pad,.Lsie_fault)
EX_TABLE(sie_exit,.Lsie_fault)
EXPORT_SYMBOL(sie64a)
EXPORT_SYMBOL(sie_exit)
#endif
/*
......@@ -825,6 +828,9 @@ ENTRY(save_fpu_regs)
oi __LC_CPU_FLAGS+7,_CIF_FPU
br %r14
.Lsave_fpu_regs_end:
#if IS_ENABLED(CONFIG_KVM)
EXPORT_SYMBOL(save_fpu_regs)
#endif
/*
* Load floating-point controls and floating-point or vector registers.
......
......@@ -9,6 +9,7 @@
#include <asm/asm-offsets.h>
#include <asm/ftrace.h>
#include <asm/ptrace.h>
#include <asm/export.h>
.section .kprobes.text, "ax"
......@@ -23,6 +24,8 @@ ENTRY(ftrace_stub)
ENTRY(_mcount)
br %r14
EXPORT_SYMBOL(_mcount)
ENTRY(ftrace_caller)
.globl ftrace_regs_caller
.set ftrace_regs_caller,ftrace_caller
......
#include <linux/module.h>
#include <linux/kvm_host.h>
#include <asm/fpu/api.h>
#include <asm/ftrace.h>
#ifdef CONFIG_FUNCTION_TRACER
EXPORT_SYMBOL(_mcount);
#endif
#if IS_ENABLED(CONFIG_KVM)
EXPORT_SYMBOL(sie64a);
EXPORT_SYMBOL(sie_exit);
EXPORT_SYMBOL(save_fpu_regs);
#endif
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
......@@ -5,6 +5,7 @@
*/
#include <linux/linkage.h>
#include <asm/export.h>
/*
* memset implementation
......@@ -60,6 +61,7 @@ ENTRY(memset)
xc 0(1,%r1),0(%r1)
.Lmemset_mvc:
mvc 1(1,%r1),0(%r1)
EXPORT_SYMBOL(memset)
/*
* memcpy implementation
......@@ -86,3 +88,4 @@ ENTRY(memcpy)
j .Lmemcpy_rest
.Lmemcpy_mvc:
mvc 0(1,%r1),0(%r3)
EXPORT_SYMBOL(memcpy)
......@@ -6,6 +6,7 @@ generic-y += cputime.h
generic-y += div64.h
generic-y += emergency-restart.h
generic-y += exec.h
generic-y += export.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += linkage.h
......
......@@ -5,4 +5,38 @@
#else
#include <asm/string_32.h>
#endif
/* First the mem*() things. */
#define __HAVE_ARCH_MEMMOVE
void *memmove(void *, const void *, __kernel_size_t);
#define __HAVE_ARCH_MEMCPY
#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
#define __HAVE_ARCH_MEMSET
#define memset(s, c, count) __builtin_memset(s, c, count)
#define __HAVE_ARCH_MEMSCAN
#define memscan(__arg0, __char, __arg2) \
({ \
void *__memscan_zero(void *, size_t); \
void *__memscan_generic(void *, int, size_t); \
void *__retval, *__addr = (__arg0); \
size_t __size = (__arg2); \
\
if(__builtin_constant_p(__char) && !(__char)) \
__retval = __memscan_zero(__addr, __size); \
else \
__retval = __memscan_generic(__addr, (__char), __size); \
\
__retval; \
})
#define __HAVE_ARCH_MEMCMP
int memcmp(const void *,const void *,__kernel_size_t);
#define __HAVE_ARCH_STRNCMP
int strncmp(const char *, const char *, __kernel_size_t);
#endif
......@@ -11,60 +11,4 @@
#include <asm/page.h>
/* Really, userland/ksyms should not see any of this stuff. */
#ifdef __KERNEL__
void __memmove(void *,const void *,__kernel_size_t);
#ifndef EXPORT_SYMTAB_STROPS
/* First the mem*() things. */
#define __HAVE_ARCH_MEMMOVE
#undef memmove
#define memmove(_to, _from, _n) \
({ \
void *_t = (_to); \
__memmove(_t, (_from), (_n)); \
_t; \
})
#define __HAVE_ARCH_MEMCPY
#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
#define __HAVE_ARCH_MEMSET
#define memset(s, c, count) __builtin_memset(s, c, count)
#define __HAVE_ARCH_MEMSCAN
#undef memscan
#define memscan(__arg0, __char, __arg2) \
({ \
void *__memscan_zero(void *, size_t); \
void *__memscan_generic(void *, int, size_t); \
void *__retval, *__addr = (__arg0); \
size_t __size = (__arg2); \
\
if(__builtin_constant_p(__char) && !(__char)) \
__retval = __memscan_zero(__addr, __size); \
else \
__retval = __memscan_generic(__addr, (__char), __size); \
\
__retval; \
})
#define __HAVE_ARCH_MEMCMP
int memcmp(const void *,const void *,__kernel_size_t);
/* Now the str*() stuff... */
#define __HAVE_ARCH_STRLEN
__kernel_size_t strlen(const char *);
#define __HAVE_ARCH_STRNCMP
int strncmp(const char *, const char *, __kernel_size_t);
#endif /* !EXPORT_SYMTAB_STROPS */
#endif /* __KERNEL__ */
#endif /* !(__SPARC_STRING_H__) */
......@@ -9,54 +9,10 @@
#ifndef __SPARC64_STRING_H__
#define __SPARC64_STRING_H__
/* Really, userland/ksyms should not see any of this stuff. */
#ifdef __KERNEL__
#include <asm/asi.h>
#ifndef EXPORT_SYMTAB_STROPS
/* First the mem*() things. */
#define __HAVE_ARCH_MEMMOVE
void *memmove(void *, const void *, __kernel_size_t);
#define __HAVE_ARCH_MEMCPY
#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
#define __HAVE_ARCH_MEMSET
#define memset(s, c, count) __builtin_memset(s, c, count)
#define __HAVE_ARCH_MEMSCAN
#undef memscan
#define memscan(__arg0, __char, __arg2) \
({ \
void *__memscan_zero(void *, size_t); \
void *__memscan_generic(void *, int, size_t); \
void *__retval, *__addr = (__arg0); \
size_t __size = (__arg2); \
\
if(__builtin_constant_p(__char) && !(__char)) \
__retval = __memscan_zero(__addr, __size); \
else \
__retval = __memscan_generic(__addr, (__char), __size); \
\
__retval; \
})
#define __HAVE_ARCH_MEMCMP
int memcmp(const void *,const void *,__kernel_size_t);
/* Now the str*() stuff... */
#define __HAVE_ARCH_STRLEN
__kernel_size_t strlen(const char *);
#define __HAVE_ARCH_STRNCMP
int strncmp(const char *, const char *, __kernel_size_t);
#endif /* !EXPORT_SYMTAB_STROPS */
#endif /* __KERNEL__ */
#endif /* !(__SPARC64_STRING_H__) */
......@@ -86,7 +86,7 @@ obj-y += auxio_$(BITS).o
obj-$(CONFIG_SUN_PM) += apc.o pmc.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_MODULES) += sparc_ksyms_$(BITS).o
obj-$(CONFIG_MODULES) += sparc_ksyms.o
obj-$(CONFIG_SPARC_LED) += led.o
obj-$(CONFIG_KGDB) += kgdb_$(BITS).o
......
......@@ -29,6 +29,7 @@
#include <asm/unistd.h>
#include <asm/asmmacro.h>
#include <asm/export.h>
#define curptr g6
......@@ -1207,6 +1208,8 @@ delay_continue:
ret
restore
EXPORT_SYMBOL(__udelay)
EXPORT_SYMBOL(__ndelay)
/* Handle a software breakpoint */
/* We have to inform parent that child has stopped */
......
......@@ -24,6 +24,7 @@
#include <asm/thread_info.h> /* TI_UWINMASK */
#include <asm/errno.h>
#include <asm/pgtsrmmu.h> /* SRMMU_PGDIR_SHIFT */
#include <asm/export.h>
.data
/* The following are used with the prom_vector node-ops to figure out
......@@ -60,6 +61,7 @@ sun4e_notsup:
*/
.globl empty_zero_page
empty_zero_page: .skip PAGE_SIZE
EXPORT_SYMBOL(empty_zero_page)
.global root_flags
.global ram_flags
......@@ -813,3 +815,4 @@ lvl14_save:
__ret_efault:
ret
restore %g0, -EFAULT, %o0
EXPORT_SYMBOL(__ret_efault)
......@@ -32,7 +32,8 @@
#include <asm/estate.h>
#include <asm/sfafsr.h>
#include <asm/unistd.h>
#include <asm/export.h>
/* This section from from _start to sparc64_boot_end should fit into
* 0x0000000000404000 to 0x0000000000408000.
*/
......@@ -143,6 +144,7 @@ prom_cpu_compatible:
.skip 64
prom_root_node:
.word 0
EXPORT_SYMBOL(prom_root_node)
prom_mmu_ihandle_cache:
.word 0
prom_boot_mapped_pc:
......@@ -158,6 +160,7 @@ is_sun4v:
.word 0
sun4v_chip_type:
.word SUN4V_CHIP_INVALID
EXPORT_SYMBOL(sun4v_chip_type)
1:
rd %pc, %l0
......@@ -920,6 +923,7 @@ swapper_4m_tsb:
.globl prom_tba, tlb_type
prom_tba: .xword 0
tlb_type: .word 0 /* Must NOT end up in BSS */
EXPORT_SYMBOL(tlb_type)
.section ".fixup",#alloc,#execinstr
.globl __ret_efault, __retl_efault, __ret_one, __retl_one
......@@ -927,6 +931,7 @@ ENTRY(__ret_efault)
ret
restore %g0, -EFAULT, %o0
ENDPROC(__ret_efault)
EXPORT_SYMBOL(__ret_efault)
ENTRY(__retl_efault)
retl
......
......@@ -15,6 +15,7 @@ __flushw_user:
2: retl
nop
.size __flushw_user,.-__flushw_user
EXPORT_SYMBOL(__flushw_user)
/* Flush %fp and %i7 to the stack for all register
* windows active inside of the cpu. This allows
......@@ -61,3 +62,4 @@ real_hard_smp_processor_id:
.size hard_smp_processor_id,.-hard_smp_processor_id
#endif
.size real_hard_smp_processor_id,.-real_hard_smp_processor_id
EXPORT_SYMBOL_GPL(real_hard_smp_processor_id)
......@@ -343,6 +343,7 @@ ENTRY(sun4v_mach_set_watchdog)
0: retl
nop
ENDPROC(sun4v_mach_set_watchdog)
EXPORT_SYMBOL(sun4v_mach_set_watchdog)
/* No inputs and does not return. */
ENTRY(sun4v_mach_sir)
......@@ -776,6 +777,7 @@ ENTRY(sun4v_niagara_getperf)
retl
nop
ENDPROC(sun4v_niagara_getperf)
EXPORT_SYMBOL(sun4v_niagara_getperf)
ENTRY(sun4v_niagara_setperf)
mov HV_FAST_SET_PERFREG, %o5
......@@ -783,6 +785,7 @@ ENTRY(sun4v_niagara_setperf)
retl
nop
ENDPROC(sun4v_niagara_setperf)
EXPORT_SYMBOL(sun4v_niagara_setperf)
ENTRY(sun4v_niagara2_getperf)
mov %o0, %o4
......@@ -792,6 +795,7 @@ ENTRY(sun4v_niagara2_getperf)
retl
nop
ENDPROC(sun4v_niagara2_getperf)
EXPORT_SYMBOL(sun4v_niagara2_getperf)
ENTRY(sun4v_niagara2_setperf)
mov HV_FAST_N2_SET_PERFREG, %o5
......@@ -799,6 +803,7 @@ ENTRY(sun4v_niagara2_setperf)
retl
nop
ENDPROC(sun4v_niagara2_setperf)
EXPORT_SYMBOL(sun4v_niagara2_setperf)
ENTRY(sun4v_reboot_data_set)
mov HV_FAST_REBOOT_DATA_SET, %o5
......
......@@ -5,27 +5,8 @@
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/export.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
#include <asm/delay.h>
#include <asm/head.h>
#include <asm/dma.h>
struct poll {
int fd;
short events;
short revents;
};
/* from entry.S */
EXPORT_SYMBOL(__udelay);
EXPORT_SYMBOL(__ndelay);
/* from head_32.S */
EXPORT_SYMBOL(__ret_efault);
EXPORT_SYMBOL(empty_zero_page);
/* Exporting a symbol from /init/main.c */
/* This is needed only for drivers/sbus/char/openprom.c */
EXPORT_SYMBOL(saved_command_line);
/* arch/sparc64/kernel/sparc64_ksyms.c: Sparc64 specific ksyms support.
*
* Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
#include <linux/export.h>
#include <linux/pci.h>
#include <linux/bitops.h>
#include <asm/cpudata.h>
#include <asm/uaccess.h>
#include <asm/spitfire.h>
#include <asm/oplib.h>
#include <asm/hypervisor.h>
#include <asm/cacheflush.h>
struct poll {
int fd;
short events;
short revents;
};
/* from helpers.S */
EXPORT_SYMBOL(__flushw_user);
EXPORT_SYMBOL_GPL(real_hard_smp_processor_id);
/* from head_64.S */
EXPORT_SYMBOL(__ret_efault);
EXPORT_SYMBOL(tlb_type);
EXPORT_SYMBOL(sun4v_chip_type);
EXPORT_SYMBOL(prom_root_node);
/* from hvcalls.S */
EXPORT_SYMBOL(sun4v_niagara_getperf);
EXPORT_SYMBOL(sun4v_niagara_setperf);
EXPORT_SYMBOL(sun4v_niagara2_getperf);
EXPORT_SYMBOL(sun4v_niagara2_setperf);
EXPORT_SYMBOL(sun4v_mach_set_watchdog);
/* from hweight.S */
EXPORT_SYMBOL(__arch_hweight8);
EXPORT_SYMBOL(__arch_hweight16);
EXPORT_SYMBOL(__arch_hweight32);
EXPORT_SYMBOL(__arch_hweight64);
/* from ffs_ffz.S */
EXPORT_SYMBOL(ffs);
EXPORT_SYMBOL(__ffs);
/* Exporting a symbol from /init/main.c */
EXPORT_SYMBOL(saved_command_line);
......@@ -43,5 +43,4 @@ lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o
obj-$(CONFIG_SPARC64) += iomap.o
obj-$(CONFIG_SPARC32) += atomic32.o ucmpdi2.o
obj-y += ksyms.o
obj-$(CONFIG_SPARC64) += PeeCeeI.o
......@@ -7,6 +7,7 @@
#ifdef __KERNEL__
#include <asm/visasm.h>
#include <asm/asi.h>
#include <asm/export.h>
#define GLOBAL_SPARE g7
#else
#define GLOBAL_SPARE g5
......@@ -567,3 +568,4 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
mov EX_RETVAL(%o4), %o0
.size FUNC_NAME, .-FUNC_NAME
EXPORT_SYMBOL(FUNC_NAME)
......@@ -13,6 +13,7 @@
#include <asm/ptrace.h>
#include <asm/visasm.h>
#include <asm/thread_info.h>
#include <asm/export.h>
/* On entry: %o5=current FPRS value, %g7 is callers address */
/* May clobber %o5, %g1, %g2, %g3, %g7, %icc, %xcc */
......@@ -79,3 +80,4 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
80: jmpl %g7 + %g0, %g0
nop
ENDPROC(VISenter)
EXPORT_SYMBOL(VISenter)
......@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
#include <asm/export.h>
.text
ENTRY(__ashldi3)
......@@ -33,3 +34,4 @@ ENTRY(__ashldi3)
retl
nop
ENDPROC(__ashldi3)
EXPORT_SYMBOL(__ashldi3)
......@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
#include <asm/export.h>
.text
ENTRY(__ashrdi3)
......@@ -35,3 +36,4 @@ ENTRY(__ashrdi3)
jmpl %o7 + 8, %g0
nop
ENDPROC(__ashrdi3)
EXPORT_SYMBOL(__ashrdi3)
......@@ -6,6 +6,7 @@
#include <linux/linkage.h>
#include <asm/asi.h>
#include <asm/backoff.h>
#include <asm/export.h>
.text
......@@ -29,6 +30,7 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
nop; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
ENDPROC(atomic_##op); \
EXPORT_SYMBOL(atomic_##op);
#define ATOMIC_OP_RETURN(op) \
ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
......@@ -42,7 +44,8 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
sra %g1, 0, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
ENDPROC(atomic_##op##_return);
ENDPROC(atomic_##op##_return); \
EXPORT_SYMBOL(atomic_##op##_return);
#define ATOMIC_FETCH_OP(op) \
ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
......@@ -56,7 +59,8 @@ ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
sra %g1, 0, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
ENDPROC(atomic_fetch_##op);
ENDPROC(atomic_fetch_##op); \
EXPORT_SYMBOL(atomic_fetch_##op);
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
......@@ -88,6 +92,7 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
nop; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
ENDPROC(atomic64_##op); \
EXPORT_SYMBOL(atomic64_##op);
#define ATOMIC64_OP_RETURN(op) \
ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
......@@ -101,7 +106,8 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
op %g1, %o0, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
ENDPROC(atomic64_##op##_return);
ENDPROC(atomic64_##op##_return); \
EXPORT_SYMBOL(atomic64_##op##_return);
#define ATOMIC64_FETCH_OP(op) \
ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
......@@ -115,7 +121,8 @@ ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
mov %g1, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
ENDPROC(atomic64_fetch_##op);
ENDPROC(atomic64_fetch_##op); \
EXPORT_SYMBOL(atomic64_fetch_##op);
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
......@@ -147,3 +154,4 @@ ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
sub %g1, 1, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b)
ENDPROC(atomic64_dec_if_positive)
EXPORT_SYMBOL(atomic64_dec_if_positive)
......@@ -6,6 +6,7 @@
#include <linux/linkage.h>
#include <asm/asi.h>
#include <asm/backoff.h>
#include <asm/export.h>
.text
......@@ -29,6 +30,7 @@ ENTRY(test_and_set_bit) /* %o0=nr, %o1=addr */
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
ENDPROC(test_and_set_bit)
EXPORT_SYMBOL(test_and_set_bit)
ENTRY(test_and_clear_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
......@@ -50,6 +52,7 @@ ENTRY(test_and_clear_bit) /* %o0=nr, %o1=addr */
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
ENDPROC(test_and_clear_bit)
EXPORT_SYMBOL(test_and_clear_bit)
ENTRY(test_and_change_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
......@@ -71,6 +74,7 @@ ENTRY(test_and_change_bit) /* %o0=nr, %o1=addr */
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
ENDPROC(test_and_change_bit)
EXPORT_SYMBOL(test_and_change_bit)
ENTRY(set_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
......@@ -90,6 +94,7 @@ ENTRY(set_bit) /* %o0=nr, %o1=addr */
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
ENDPROC(set_bit)
EXPORT_SYMBOL(set_bit)
ENTRY(clear_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
......@@ -109,6 +114,7 @@ ENTRY(clear_bit) /* %o0=nr, %o1=addr */
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
ENDPROC(clear_bit)
EXPORT_SYMBOL(clear_bit)
ENTRY(change_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
......@@ -128,3 +134,4 @@ ENTRY(change_bit) /* %o0=nr, %o1=addr */
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
ENDPROC(change_bit)
EXPORT_SYMBOL(change_bit)
......@@ -6,6 +6,7 @@
#include <linux/linkage.h>
#include <asm/page.h>
#include <asm/export.h>
/* Zero out 64 bytes of memory at (buf + offset).
* Assumes %g1 contains zero.
......@@ -64,6 +65,7 @@ ENTRY(bzero_1page)
retl
nop
ENDPROC(bzero_1page)
EXPORT_SYMBOL(bzero_1page)
ENTRY(__copy_1page)
/* NOTE: If you change the number of insns of this routine, please check
......@@ -87,3 +89,4 @@ ENTRY(__copy_1page)
retl
nop
ENDPROC(__copy_1page)
EXPORT_SYMBOL(__copy_1page)
......@@ -5,6 +5,7 @@
*/
#include <linux/linkage.h>
#include <asm/export.h>
.text
......@@ -78,6 +79,8 @@ __bzero_done:
mov %o3, %o0
ENDPROC(__bzero)
ENDPROC(memset)
EXPORT_SYMBOL(__bzero)
EXPORT_SYMBOL(memset)
#define EX_ST(x,y) \
98: x,y; \
......@@ -143,3 +146,4 @@ __clear_user_done:
retl
clr %o0
ENDPROC(__clear_user)
EXPORT_SYMBOL(__clear_user)
......@@ -14,6 +14,7 @@
*/
#include <asm/errno.h>
#include <asm/export.h>
#define CSUM_BIGCHUNK(buf, offset, sum, t0, t1, t2, t3, t4, t5) \
ldd [buf + offset + 0x00], t0; \
......@@ -104,6 +105,7 @@ csum_partial_fix_alignment:
* buffer of size 0x20. Follow the code path for that case.
*/
.globl csum_partial
EXPORT_SYMBOL(csum_partial)
csum_partial: /* %o0=buf, %o1=len, %o2=sum */
andcc %o0, 0x7, %g0 ! alignment problems?
bne csum_partial_fix_alignment ! yep, handle it
......@@ -335,6 +337,7 @@ cc_dword_align:
*/
.align 8
.globl __csum_partial_copy_sparc_generic
EXPORT_SYMBOL(__csum_partial_copy_sparc_generic)
__csum_partial_copy_sparc_generic:
/* %o0=src, %o1=dest, %g1=len, %g7=sum */
xor %o0, %o1, %o4 ! get changing bits
......
......@@ -13,6 +13,7 @@
* BSD4.4 portable checksum routine
*/
#include <asm/export.h>
.text
csum_partial_fix_alignment:
......@@ -37,6 +38,7 @@ csum_partial_fix_alignment:
.align 32
.globl csum_partial
EXPORT_SYMBOL(csum_partial)
csum_partial: /* %o0=buff, %o1=len, %o2=sum */
prefetch [%o0 + 0x000], #n_reads
clr %o4
......
......@@ -10,6 +10,7 @@
#include <asm/pgtable.h>
#include <asm/spitfire.h>
#include <asm/head.h>
#include <asm/export.h>
/* What we used to do was lock a TLB entry into a specific
* TLB slot, clear the page with interrupts disabled, then
......@@ -26,6 +27,7 @@
.text
.globl _clear_page
EXPORT_SYMBOL(_clear_page)
_clear_page: /* %o0=dest */
ba,pt %xcc, clear_page_common
clr %o4
......@@ -35,6 +37,7 @@ _clear_page: /* %o0=dest */
*/
.align 32
.globl clear_user_page
EXPORT_SYMBOL(clear_user_page)
clear_user_page: /* %o0=dest, %o1=vaddr */
lduw [%g6 + TI_PRE_COUNT], %o2
sethi %hi(PAGE_OFFSET), %g2
......
......@@ -5,6 +5,7 @@
#include <linux/linkage.h>
#include <asm/asi.h>
#include <asm/export.h>
#define XCC xcc
......@@ -90,3 +91,4 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */
retl
clr %o0
ENDPROC(___copy_in_user)
EXPORT_SYMBOL(___copy_in_user)
......@@ -10,6 +10,7 @@
#include <asm/pgtable.h>
#include <asm/spitfire.h>
#include <asm/head.h>
#include <asm/export.h>
/* What we used to do was lock a TLB entry into a specific
* TLB slot, clear the page with interrupts disabled, then
......@@ -44,6 +45,7 @@
.align 32
.globl copy_user_page
.type copy_user_page,#function
EXPORT_SYMBOL(copy_user_page)
copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */
lduw [%g6 + TI_PRE_COUNT], %o4
sethi %hi(PAGE_OFFSET), %g2
......
......@@ -15,6 +15,7 @@
#include <asm/asmmacro.h>
#include <asm/page.h>
#include <asm/thread_info.h>
#include <asm/export.h>
/* Work around cpp -rob */
#define ALLOC #alloc
......@@ -119,6 +120,7 @@
__copy_user_begin:
.globl __copy_user
EXPORT_SYMBOL(__copy_user)
dword_align:
andcc %o1, 1, %g0
be 4f
......
......@@ -3,6 +3,8 @@
* Copyright (C) 2005 David S. Miller <davem@davemloft.net>
*/
#include <asm/export.h>
#ifdef __KERNEL__
#define GLOBAL_SPARE %g7
#else
......@@ -63,6 +65,7 @@
add %o5, %o4, %o4
.globl FUNC_NAME
EXPORT_SYMBOL(FUNC_NAME)
FUNC_NAME: /* %o0=src, %o1=dst, %o2=len, %o3=sum */
LOAD(prefetch, %o0 + 0x000, #n_reads)
xor %o0, %o1, %g1
......
......@@ -17,6 +17,7 @@ along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#include <asm/export.h>
.text
.align 4
.globl __divdi3
......@@ -279,3 +280,4 @@ __divdi3:
.LL81:
ret
restore
EXPORT_SYMBOL(__divdi3)
#include <linux/linkage.h>
#include <asm/export.h>
.register %g2,#scratch
......@@ -65,6 +66,8 @@ ENTRY(__ffs)
add %o2, %g1, %o0
ENDPROC(ffs)
ENDPROC(__ffs)
EXPORT_SYMBOL(__ffs)
EXPORT_SYMBOL(ffs)
.section .popc_6insn_patch, "ax"
.word ffs
......
#include <linux/linkage.h>
#include <asm/export.h>
.text
.align 32
......@@ -7,6 +8,7 @@ ENTRY(__arch_hweight8)
nop
nop
ENDPROC(__arch_hweight8)
EXPORT_SYMBOL(__arch_hweight8)
.section .popc_3insn_patch, "ax"
.word __arch_hweight8
sllx %o0, 64-8, %g1
......@@ -19,6 +21,7 @@ ENTRY(__arch_hweight16)
nop
nop
ENDPROC(__arch_hweight16)
EXPORT_SYMBOL(__arch_hweight16)
.section .popc_3insn_patch, "ax"
.word __arch_hweight16
sllx %o0, 64-16, %g1
......@@ -31,6 +34,7 @@ ENTRY(__arch_hweight32)
nop
nop
ENDPROC(__arch_hweight32)
EXPORT_SYMBOL(__arch_hweight32)
.section .popc_3insn_patch, "ax"
.word __arch_hweight32
sllx %o0, 64-32, %g1
......@@ -43,6 +47,7 @@ ENTRY(__arch_hweight64)
nop
nop
ENDPROC(__arch_hweight64)
EXPORT_SYMBOL(__arch_hweight64)
.section .popc_3insn_patch, "ax"
.word __arch_hweight64
retl
......
#include <linux/linkage.h>
#include <asm/export.h>
.text
ENTRY(ip_fast_csum) /* %o0 = iph, %o1 = ihl */
......@@ -31,3 +32,4 @@ ENTRY(ip_fast_csum) /* %o0 = iph, %o1 = ihl */
retl
and %o2, %o1, %o0
ENDPROC(ip_fast_csum)
EXPORT_SYMBOL(ip_fast_csum)
/*
* Export of symbols defined in assembler
*/
/* Tell string.h we don't want memcpy etc. as cpp defines */
#define EXPORT_SYMTAB_STROPS
#include <linux/module.h>
#include <linux/string.h>
#include <linux/types.h>
#include <asm/checksum.h>
#include <asm/uaccess.h>
#include <asm/ftrace.h>
/* string functions */
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strncmp);
/* mem* functions */
extern void *__memscan_zero(void *, size_t);
extern void *__memscan_generic(void *, int, size_t);
extern void *__bzero(void *, size_t);
EXPORT_SYMBOL(memscan);
EXPORT_SYMBOL(__memscan_zero);
EXPORT_SYMBOL(__memscan_generic);
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(__bzero);
/* Networking helper routines. */
EXPORT_SYMBOL(csum_partial);
#ifdef CONFIG_MCOUNT
EXPORT_SYMBOL(_mcount);
#endif
/*
* sparc
*/
#ifdef CONFIG_SPARC32
extern int __ashrdi3(int, int);
extern int __ashldi3(int, int);
extern int __lshrdi3(int, int);
extern int __muldi3(int, int);
extern int __divdi3(int, int);
extern void (*__copy_1page)(void *, const void *);
extern void (*bzero_1page)(void *);
extern void ___rw_read_enter(void);
extern void ___rw_read_try(void);
extern void ___rw_read_exit(void);
extern void ___rw_write_enter(void);
/* Networking helper routines. */
EXPORT_SYMBOL(__csum_partial_copy_sparc_generic);
/* Special internal versions of library functions. */
EXPORT_SYMBOL(__copy_1page);
EXPORT_SYMBOL(__memmove);
EXPORT_SYMBOL(bzero_1page);
/* Moving data to/from/in userspace. */
EXPORT_SYMBOL(__copy_user);
/* Used by asm/spinlock.h */
#ifdef CONFIG_SMP
EXPORT_SYMBOL(___rw_read_enter);
EXPORT_SYMBOL(___rw_read_try);
EXPORT_SYMBOL(___rw_read_exit);
EXPORT_SYMBOL(___rw_write_enter);
#endif
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__muldi3);
EXPORT_SYMBOL(__divdi3);
#endif
/*
* sparc64
*/
#ifdef CONFIG_SPARC64
/* Networking helper routines. */
EXPORT_SYMBOL(csum_partial_copy_nocheck);
EXPORT_SYMBOL(__csum_partial_copy_from_user);
EXPORT_SYMBOL(__csum_partial_copy_to_user);
EXPORT_SYMBOL(ip_fast_csum);
/* Moving data to/from/in userspace. */
EXPORT_SYMBOL(___copy_to_user);
EXPORT_SYMBOL(___copy_from_user);
EXPORT_SYMBOL(___copy_in_user);
EXPORT_SYMBOL(__clear_user);
/* Atomic counter implementation. */
#define ATOMIC_OP(op) \
EXPORT_SYMBOL(atomic_##op); \
EXPORT_SYMBOL(atomic64_##op);
#define ATOMIC_OP_RETURN(op) \
EXPORT_SYMBOL(atomic_##op##_return); \
EXPORT_SYMBOL(atomic64_##op##_return);
#define ATOMIC_FETCH_OP(op) \
EXPORT_SYMBOL(atomic_fetch_##op); \
EXPORT_SYMBOL(atomic64_fetch_##op);
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
#undef ATOMIC_OPS
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS(and)
ATOMIC_OPS(or)
ATOMIC_OPS(xor)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
EXPORT_SYMBOL(atomic64_dec_if_positive);
/* Atomic bit operations. */
EXPORT_SYMBOL(test_and_set_bit);
EXPORT_SYMBOL(test_and_clear_bit);
EXPORT_SYMBOL(test_and_change_bit);
EXPORT_SYMBOL(set_bit);
EXPORT_SYMBOL(clear_bit);
EXPORT_SYMBOL(change_bit);
/* Special internal versions of library functions. */
EXPORT_SYMBOL(_clear_page);
EXPORT_SYMBOL(clear_user_page);
EXPORT_SYMBOL(copy_user_page);
/* RAID code needs this */
void VISenter(void);
EXPORT_SYMBOL(VISenter);
extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
unsigned long *);
extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *,
unsigned long *, unsigned long *);
extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *,
unsigned long *, unsigned long *, unsigned long *);
EXPORT_SYMBOL(xor_vis_2);
EXPORT_SYMBOL(xor_vis_3);
EXPORT_SYMBOL(xor_vis_4);
EXPORT_SYMBOL(xor_vis_5);
extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *);
extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *,
unsigned long *);
extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *,
unsigned long *, unsigned long *);
extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *,
unsigned long *, unsigned long *, unsigned long *);
EXPORT_SYMBOL(xor_niagara_2);
EXPORT_SYMBOL(xor_niagara_3);
EXPORT_SYMBOL(xor_niagara_4);
EXPORT_SYMBOL(xor_niagara_5);
#endif
......@@ -10,6 +10,7 @@
#include <asm/psr.h>
#include <asm/smp.h>
#include <asm/spinlock.h>
#include <asm/export.h>
.text
.align 4
......@@ -48,6 +49,7 @@ ___rw_write_enter_spin_on_wlock:
ld [%g1], %g2
.globl ___rw_read_enter
EXPORT_SYMBOL(___rw_read_enter)
___rw_read_enter:
orcc %g2, 0x0, %g0
bne,a ___rw_read_enter_spin_on_wlock
......@@ -59,6 +61,7 @@ ___rw_read_enter:
mov %g4, %o7
.globl ___rw_read_exit
EXPORT_SYMBOL(___rw_read_exit)
___rw_read_exit:
orcc %g2, 0x0, %g0
bne,a ___rw_read_exit_spin_on_wlock
......@@ -70,6 +73,7 @@ ___rw_read_exit:
mov %g4, %o7
.globl ___rw_read_try
EXPORT_SYMBOL(___rw_read_try)
___rw_read_try:
orcc %g2, 0x0, %g0
bne ___rw_read_try_spin_on_wlock
......@@ -81,6 +85,7 @@ ___rw_read_try:
mov %g4, %o7
.globl ___rw_write_enter
EXPORT_SYMBOL(___rw_write_enter)
___rw_write_enter:
orcc %g2, 0x0, %g0
bne ___rw_write_enter_spin_on_wlock
......
#include <linux/linkage.h>
#include <asm/export.h>
ENTRY(__lshrdi3)
cmp %o2, 0
......@@ -25,3 +26,4 @@ ENTRY(__lshrdi3)
retl
nop
ENDPROC(__lshrdi3)
EXPORT_SYMBOL(__lshrdi3)
......@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
#include <asm/export.h>
/*
* This is the main variant and is called by C code. GCC's -pg option
......@@ -16,6 +17,7 @@
.align 32
.globl _mcount
.type _mcount,#function
EXPORT_SYMBOL(_mcount)
.globl mcount
.type mcount,#function
_mcount:
......
......@@ -6,6 +6,7 @@
#include <linux/linkage.h>
#include <asm/asm.h>
#include <asm/export.h>
.text
ENTRY(memcmp)
......@@ -25,3 +26,4 @@ ENTRY(memcmp)
2: retl
mov 0, %o0
ENDPROC(memcmp)
EXPORT_SYMBOL(memcmp)
......@@ -7,6 +7,7 @@
* Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <asm/export.h>
#define FUNC(x) \
.globl x; \
.type x,@function; \
......@@ -58,93 +59,11 @@ x:
stb %t0, [%dst - (offset) - 0x02]; \
stb %t1, [%dst - (offset) - 0x01];
/* Both these macros have to start with exactly the same insn */
#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
ldd [%src - (offset) - 0x20], %t0; \
ldd [%src - (offset) - 0x18], %t2; \
ldd [%src - (offset) - 0x10], %t4; \
ldd [%src - (offset) - 0x08], %t6; \
st %t0, [%dst - (offset) - 0x20]; \
st %t1, [%dst - (offset) - 0x1c]; \
st %t2, [%dst - (offset) - 0x18]; \
st %t3, [%dst - (offset) - 0x14]; \
st %t4, [%dst - (offset) - 0x10]; \
st %t5, [%dst - (offset) - 0x0c]; \
st %t6, [%dst - (offset) - 0x08]; \
st %t7, [%dst - (offset) - 0x04];
#define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
ldd [%src - (offset) - 0x20], %t0; \
ldd [%src - (offset) - 0x18], %t2; \
ldd [%src - (offset) - 0x10], %t4; \
ldd [%src - (offset) - 0x08], %t6; \
std %t0, [%dst - (offset) - 0x20]; \
std %t2, [%dst - (offset) - 0x18]; \
std %t4, [%dst - (offset) - 0x10]; \
std %t6, [%dst - (offset) - 0x08];
#define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
ldd [%src + (offset) + 0x00], %t0; \
ldd [%src + (offset) + 0x08], %t2; \
st %t0, [%dst + (offset) + 0x00]; \
st %t1, [%dst + (offset) + 0x04]; \
st %t2, [%dst + (offset) + 0x08]; \
st %t3, [%dst + (offset) + 0x0c];
#define RMOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
ldub [%src + (offset) + 0x00], %t0; \
ldub [%src + (offset) + 0x01], %t1; \
stb %t0, [%dst + (offset) + 0x00]; \
stb %t1, [%dst + (offset) + 0x01];
#define SMOVE_CHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
ldd [%src + (offset) + 0x00], %t0; \
ldd [%src + (offset) + 0x08], %t2; \
srl %t0, shir, %t5; \
srl %t1, shir, %t6; \
sll %t0, shil, %t0; \
or %t5, %prev, %t5; \
sll %t1, shil, %prev; \
or %t6, %t0, %t0; \
srl %t2, shir, %t1; \
srl %t3, shir, %t6; \
sll %t2, shil, %t2; \
or %t1, %prev, %t1; \
std %t4, [%dst + (offset) + (offset2) - 0x04]; \
std %t0, [%dst + (offset) + (offset2) + 0x04]; \
sll %t3, shil, %prev; \
or %t6, %t2, %t4;
#define SMOVE_ALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
ldd [%src + (offset) + 0x00], %t0; \
ldd [%src + (offset) + 0x08], %t2; \
srl %t0, shir, %t4; \
srl %t1, shir, %t5; \
sll %t0, shil, %t6; \
or %t4, %prev, %t0; \
sll %t1, shil, %prev; \
or %t5, %t6, %t1; \
srl %t2, shir, %t4; \
srl %t3, shir, %t5; \
sll %t2, shil, %t6; \
or %t4, %prev, %t2; \
sll %t3, shil, %prev; \
or %t5, %t6, %t3; \
std %t0, [%dst + (offset) + (offset2) + 0x00]; \
std %t2, [%dst + (offset) + (offset2) + 0x08];
.text
.align 4
0:
retl
nop ! Only bcopy returns here and it retuns void...
#ifdef __KERNEL__
FUNC(amemmove)
FUNC(__memmove)
#endif
FUNC(memmove)
EXPORT_SYMBOL(memmove)
cmp %o0, %o1
mov %o0, %g7
bleu 9f
......@@ -202,6 +121,7 @@ FUNC(memmove)
add %o0, 2, %o0
FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
EXPORT_SYMBOL(memcpy)
sub %o0, %o1, %o4
mov %o0, %g7
......
......@@ -5,6 +5,7 @@
*/
#include <linux/linkage.h>
#include <asm/export.h>
.text
ENTRY(memmove) /* o0=dst o1=src o2=len */
......@@ -57,3 +58,4 @@ ENTRY(memmove) /* o0=dst o1=src o2=len */
stb %g7, [%o0 - 0x1]
ba,a,pt %xcc, 99b
ENDPROC(memmove)
EXPORT_SYMBOL(memmove)
......@@ -4,6 +4,8 @@
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#include <asm/export.h>
/* In essence, this is just a fancy strlen. */
#define LO_MAGIC 0x01010101
......@@ -13,6 +15,8 @@
.align 4
.globl __memscan_zero, __memscan_generic
.globl memscan
EXPORT_SYMBOL(__memscan_zero)
EXPORT_SYMBOL(__memscan_generic)
__memscan_zero:
/* %o0 = addr, %o1 = size */
cmp %o1, 0
......
......@@ -5,6 +5,8 @@
* Copyright (C) 1998 David S. Miller (davem@redhat.com)
*/
#include <asm/export.h>
#define HI_MAGIC 0x8080808080808080
#define LO_MAGIC 0x0101010101010101
#define ASI_PL 0x88
......@@ -13,6 +15,8 @@
.align 32
.globl __memscan_zero, __memscan_generic
.globl memscan
EXPORT_SYMBOL(__memscan_zero)
EXPORT_SYMBOL(__memscan_generic)
__memscan_zero:
/* %o0 = bufp, %o1 = size */
......
......@@ -9,6 +9,7 @@
*/
#include <asm/ptrace.h>
#include <asm/export.h>
/* Work around cpp -rob */
#define ALLOC #alloc
......@@ -63,6 +64,8 @@ __bzero_begin:
.globl __bzero
.globl memset
EXPORT_SYMBOL(__bzero)
EXPORT_SYMBOL(memset)
.globl __memset_start, __memset_end
__memset_start:
memset:
......
......@@ -17,6 +17,7 @@ along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#include <asm/export.h>
.text
.align 4
.globl __muldi3
......@@ -74,3 +75,4 @@ __muldi3:
add %l2, %l0, %i0
ret
restore %g0, %l3, %o1
EXPORT_SYMBOL(__muldi3)
......@@ -7,6 +7,7 @@
#include <linux/linkage.h>
#include <asm/asm.h>
#include <asm/export.h>
#define LO_MAGIC 0x01010101
#define HI_MAGIC 0x80808080
......@@ -78,3 +79,4 @@ ENTRY(strlen)
retl
mov 2, %o0
ENDPROC(strlen)
EXPORT_SYMBOL(strlen)
......@@ -4,6 +4,7 @@
*/
#include <linux/linkage.h>
#include <asm/export.h>
.text
ENTRY(strncmp)
......@@ -116,3 +117,4 @@ ENTRY(strncmp)
retl
sub %o3, %o0, %o0
ENDPROC(strncmp)
EXPORT_SYMBOL(strncmp)
......@@ -6,6 +6,7 @@
#include <linux/linkage.h>
#include <asm/asi.h>
#include <asm/export.h>
.text
ENTRY(strncmp)
......@@ -28,3 +29,4 @@ ENTRY(strncmp)
retl
clr %o0
ENDPROC(strncmp)
EXPORT_SYMBOL(strncmp)
......@@ -13,6 +13,7 @@
#include <asm/asi.h>
#include <asm/dcu.h>
#include <asm/spitfire.h>
#include <asm/export.h>
/*
* Requirements:
......@@ -90,6 +91,7 @@ ENTRY(xor_vis_2)
retl
wr %g0, 0, %fprs
ENDPROC(xor_vis_2)
EXPORT_SYMBOL(xor_vis_2)
ENTRY(xor_vis_3)
rd %fprs, %o5
......@@ -156,6 +158,7 @@ ENTRY(xor_vis_3)
retl
wr %g0, 0, %fprs
ENDPROC(xor_vis_3)
EXPORT_SYMBOL(xor_vis_3)
ENTRY(xor_vis_4)
rd %fprs, %o5
......@@ -241,6 +244,7 @@ ENTRY(xor_vis_4)
retl
wr %g0, 0, %fprs
ENDPROC(xor_vis_4)
EXPORT_SYMBOL(xor_vis_4)
ENTRY(xor_vis_5)
save %sp, -192, %sp
......@@ -347,6 +351,7 @@ ENTRY(xor_vis_5)
ret
restore
ENDPROC(xor_vis_5)
EXPORT_SYMBOL(xor_vis_5)
/* Niagara versions. */
ENTRY(xor_niagara_2) /* %o0=bytes, %o1=dest, %o2=src */
......@@ -393,6 +398,7 @@ ENTRY(xor_niagara_2) /* %o0=bytes, %o1=dest, %o2=src */
ret
restore
ENDPROC(xor_niagara_2)
EXPORT_SYMBOL(xor_niagara_2)
ENTRY(xor_niagara_3) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */
save %sp, -192, %sp
......@@ -454,6 +460,7 @@ ENTRY(xor_niagara_3) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */
ret
restore
ENDPROC(xor_niagara_3)
EXPORT_SYMBOL(xor_niagara_3)
ENTRY(xor_niagara_4) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */
save %sp, -192, %sp
......@@ -536,6 +543,7 @@ ENTRY(xor_niagara_4) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */
ret
restore
ENDPROC(xor_niagara_4)
EXPORT_SYMBOL(xor_niagara_4)
ENTRY(xor_niagara_5) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=src4 */
save %sp, -192, %sp
......@@ -634,3 +642,4 @@ ENTRY(xor_niagara_5) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=s
ret
restore
ENDPROC(xor_niagara_5)
EXPORT_SYMBOL(xor_niagara_5)
......@@ -44,6 +44,7 @@
#include <asm/alternative-asm.h>
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/export.h>
.section .entry.text, "ax"
......@@ -991,6 +992,7 @@ trace:
jmp ftrace_stub
END(mcount)
#endif /* CONFIG_DYNAMIC_FTRACE */
EXPORT_SYMBOL(mcount)
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
......
......@@ -35,6 +35,7 @@
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/pgtable_types.h>
#include <asm/export.h>
#include <linux/err.h>
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
......@@ -875,6 +876,7 @@ ENTRY(native_load_gs_index)
popfq
ret
END(native_load_gs_index)
EXPORT_SYMBOL(native_load_gs_index)
_ASM_EXTABLE(.Lgs_change, bad_gs)
.section .fixup, "ax"
......
......@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
#include <asm/asm.h>
#include <asm/export.h>
/* put return address in eax (arg1) */
.macro THUNK name, func, put_ret_addr_in_eax=0
......@@ -36,5 +37,7 @@
#ifdef CONFIG_PREEMPT
THUNK ___preempt_schedule, preempt_schedule
THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
EXPORT_SYMBOL(___preempt_schedule)
EXPORT_SYMBOL(___preempt_schedule_notrace)
#endif
......@@ -8,6 +8,7 @@
#include <linux/linkage.h>
#include "calling.h"
#include <asm/asm.h>
#include <asm/export.h>
/* rdi: arg1 ... normal C conventions. rax is saved/restored. */
.macro THUNK name, func, put_ret_addr_in_rdi=0
......@@ -49,6 +50,8 @@
#ifdef CONFIG_PREEMPT
THUNK ___preempt_schedule, preempt_schedule
THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
EXPORT_SYMBOL(___preempt_schedule)
EXPORT_SYMBOL(___preempt_schedule_notrace)
#endif
#if defined(CONFIG_TRACE_IRQFLAGS) \
......
#ifdef CONFIG_64BIT
#define KSYM_ALIGN 16
#endif
#include <asm-generic/export.h>
......@@ -46,9 +46,7 @@ obj-$(CONFIG_MODIFY_LDT_SYSCALL) += ldt.o
obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
obj-$(CONFIG_IRQ_WORK) += irq_work.o
obj-y += probe_roms.o
obj-$(CONFIG_X86_32) += i386_ksyms_32.o
obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
obj-$(CONFIG_X86_64) += mcount_64.o
obj-$(CONFIG_X86_64) += sys_x86_64.o mcount_64.o
obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o
obj-$(CONFIG_SYSFS) += ksysfs.o
obj-y += bootflag.o e820.o
......
......@@ -23,6 +23,7 @@
#include <asm/percpu.h>
#include <asm/nops.h>
#include <asm/bootparam.h>
#include <asm/export.h>
/* Physical address */
#define pa(X) ((X) - __PAGE_OFFSET)
......@@ -673,6 +674,7 @@ ENTRY(empty_zero_page)
.fill 4096,1,0
ENTRY(swapper_pg_dir)
.fill 1024,4,0
EXPORT_SYMBOL(empty_zero_page)
/*
* This starts the data section.
......
......@@ -21,6 +21,7 @@
#include <asm/percpu.h>
#include <asm/nops.h>
#include "../entry/calling.h"
#include <asm/export.h>
#ifdef CONFIG_PARAVIRT
#include <asm/asm-offsets.h>
......@@ -486,10 +487,12 @@ early_gdt_descr_base:
ENTRY(phys_base)
/* This must match the first entry in level2_kernel_pgt */
.quad 0x0000000000000000
EXPORT_SYMBOL(phys_base)
#include "../../x86/xen/xen-head.S"
__PAGE_ALIGNED_BSS
NEXT_PAGE(empty_zero_page)
.skip PAGE_SIZE
EXPORT_SYMBOL(empty_zero_page)
#include <linux/export.h>
#include <linux/spinlock_types.h>
#include <asm/checksum.h>
#include <asm/pgtable.h>
#include <asm/desc.h>
#include <asm/ftrace.h>
#ifdef CONFIG_FUNCTION_TRACER
/* mcount is defined in assembly */
EXPORT_SYMBOL(mcount);
#endif
/*
* Note, this is a prototype to get at the symbol for
* the export, but dont use it from C code, it is used
* by assembly code and is not using C calling convention!
*/
#ifndef CONFIG_X86_CMPXCHG64
extern void cmpxchg8b_emu(void);
EXPORT_SYMBOL(cmpxchg8b_emu);
#endif
/* Networking helper routines. */
EXPORT_SYMBOL(csum_partial_copy_generic);
EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2);
EXPORT_SYMBOL(__get_user_4);
EXPORT_SYMBOL(__get_user_8);
EXPORT_SYMBOL(__put_user_1);
EXPORT_SYMBOL(__put_user_2);
EXPORT_SYMBOL(__put_user_4);
EXPORT_SYMBOL(__put_user_8);
EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(empty_zero_page);
#ifdef CONFIG_PREEMPT
EXPORT_SYMBOL(___preempt_schedule);
EXPORT_SYMBOL(___preempt_schedule_notrace);
#endif
EXPORT_SYMBOL(__sw_hweight32);
......@@ -7,6 +7,7 @@
#include <linux/linkage.h>
#include <asm/ptrace.h>
#include <asm/ftrace.h>
#include <asm/export.h>
.code64
......@@ -294,6 +295,7 @@ trace:
jmp fgraph_trace
END(function_hook)
#endif /* CONFIG_DYNAMIC_FTRACE */
EXPORT_SYMBOL(function_hook)
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
......
/* Exports for assembly files.
All C exports should go in the respective C files. */
#include <linux/export.h>
#include <linux/spinlock_types.h>
#include <linux/smp.h>
#include <net/checksum.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
#include <asm/desc.h>
#include <asm/ftrace.h>
#ifdef CONFIG_FUNCTION_TRACER
/* mcount and __fentry__ are defined in assembly */
#ifdef CC_USING_FENTRY
EXPORT_SYMBOL(__fentry__);
#else
EXPORT_SYMBOL(mcount);
#endif
#endif
EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2);
EXPORT_SYMBOL(__get_user_4);
EXPORT_SYMBOL(__get_user_8);
EXPORT_SYMBOL(__put_user_1);
EXPORT_SYMBOL(__put_user_2);
EXPORT_SYMBOL(__put_user_4);
EXPORT_SYMBOL(__put_user_8);
EXPORT_SYMBOL(copy_user_generic_string);
EXPORT_SYMBOL(copy_user_generic_unrolled);
EXPORT_SYMBOL(copy_user_enhanced_fast_string);
EXPORT_SYMBOL(__copy_user_nocache);
EXPORT_SYMBOL(_copy_from_user);
EXPORT_SYMBOL(_copy_to_user);
EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled);
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(__sw_hweight32);
EXPORT_SYMBOL(__sw_hweight64);
/*
* Export string functions. We normally rely on gcc builtin for most of these,
* but gcc sometimes decides not to inline them.
*/
#undef memcpy
#undef memset
#undef memmove
extern void *__memset(void *, int, __kernel_size_t);
extern void *__memcpy(void *, const void *, __kernel_size_t);
extern void *__memmove(void *, const void *, __kernel_size_t);
extern void *memset(void *, int, __kernel_size_t);
extern void *memcpy(void *, const void *, __kernel_size_t);
extern void *memmove(void *, const void *, __kernel_size_t);
EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(__memmove);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
#ifndef CONFIG_DEBUG_VIRTUAL
EXPORT_SYMBOL(phys_base);
#endif
EXPORT_SYMBOL(empty_zero_page);
#ifndef CONFIG_PARAVIRT
EXPORT_SYMBOL(native_load_gs_index);
#endif
#ifdef CONFIG_PREEMPT
EXPORT_SYMBOL(___preempt_schedule);
EXPORT_SYMBOL(___preempt_schedule_notrace);
#endif
......@@ -28,6 +28,7 @@
#include <linux/linkage.h>
#include <asm/errno.h>
#include <asm/asm.h>
#include <asm/export.h>
/*
* computes a partial checksum, e.g. for TCP/UDP fragments
......@@ -251,6 +252,7 @@ ENTRY(csum_partial)
ENDPROC(csum_partial)
#endif
EXPORT_SYMBOL(csum_partial)
/*
unsigned int csum_partial_copy_generic (const char *src, char *dst,
......@@ -490,3 +492,4 @@ ENDPROC(csum_partial_copy_generic)
#undef ROUND1
#endif
EXPORT_SYMBOL(csum_partial_copy_generic)
#include <linux/linkage.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
#include <asm/export.h>
/*
* Most CPUs support enhanced REP MOVSB/STOSB instructions. It is
......@@ -23,6 +24,7 @@ ENTRY(clear_page)
rep stosq
ret
ENDPROC(clear_page)
EXPORT_SYMBOL(clear_page)
ENTRY(clear_page_orig)
......
......@@ -7,6 +7,7 @@
*/
#include <linux/linkage.h>
#include <asm/export.h>
.text
......@@ -48,3 +49,4 @@ ENTRY(cmpxchg8b_emu)
ret
ENDPROC(cmpxchg8b_emu)
EXPORT_SYMBOL(cmpxchg8b_emu)
......@@ -3,6 +3,7 @@
#include <linux/linkage.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
#include <asm/export.h>
/*
* Some CPUs run faster using the string copy instructions (sane microcode).
......@@ -17,6 +18,7 @@ ENTRY(copy_page)
rep movsq
ret
ENDPROC(copy_page)
EXPORT_SYMBOL(copy_page)
ENTRY(copy_page_regs)
subq $2*8, %rsp
......
......@@ -14,6 +14,7 @@
#include <asm/alternative-asm.h>
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/export.h>
/* Standard copy_to_user with segment limit checking */
ENTRY(_copy_to_user)
......@@ -29,6 +30,7 @@ ENTRY(_copy_to_user)
"jmp copy_user_enhanced_fast_string", \
X86_FEATURE_ERMS
ENDPROC(_copy_to_user)
EXPORT_SYMBOL(_copy_to_user)
/* Standard copy_from_user with segment limit checking */
ENTRY(_copy_from_user)
......@@ -44,6 +46,8 @@ ENTRY(_copy_from_user)
"jmp copy_user_enhanced_fast_string", \
X86_FEATURE_ERMS
ENDPROC(_copy_from_user)
EXPORT_SYMBOL(_copy_from_user)
.section .fixup,"ax"
/* must zero dest */
......@@ -155,6 +159,7 @@ ENTRY(copy_user_generic_unrolled)
_ASM_EXTABLE(21b,50b)
_ASM_EXTABLE(22b,50b)
ENDPROC(copy_user_generic_unrolled)
EXPORT_SYMBOL(copy_user_generic_unrolled)
/* Some CPUs run faster using the string copy instructions.
* This is also a lot simpler. Use them when possible.
......@@ -200,6 +205,7 @@ ENTRY(copy_user_generic_string)
_ASM_EXTABLE(1b,11b)
_ASM_EXTABLE(3b,12b)
ENDPROC(copy_user_generic_string)
EXPORT_SYMBOL(copy_user_generic_string)
/*
* Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
......@@ -229,6 +235,7 @@ ENTRY(copy_user_enhanced_fast_string)
_ASM_EXTABLE(1b,12b)
ENDPROC(copy_user_enhanced_fast_string)
EXPORT_SYMBOL(copy_user_enhanced_fast_string)
/*
* copy_user_nocache - Uncached memory copy with exception handling
......@@ -379,3 +386,4 @@ ENTRY(__copy_user_nocache)
_ASM_EXTABLE(40b,.L_fixup_1b_copy)
_ASM_EXTABLE(41b,.L_fixup_1b_copy)
ENDPROC(__copy_user_nocache)
EXPORT_SYMBOL(__copy_user_nocache)
......@@ -135,6 +135,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
return (__force __wsum)add32_with_carry(do_csum(buff, len),
(__force u32)sum);
}
EXPORT_SYMBOL(csum_partial);
/*
* this routine is used for miscellaneous IP-like checksums, mainly
......
......@@ -32,6 +32,7 @@
#include <asm/thread_info.h>
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/export.h>
.text
ENTRY(__get_user_1)
......@@ -44,6 +45,7 @@ ENTRY(__get_user_1)
ASM_CLAC
ret
ENDPROC(__get_user_1)
EXPORT_SYMBOL(__get_user_1)
ENTRY(__get_user_2)
add $1,%_ASM_AX
......@@ -57,6 +59,7 @@ ENTRY(__get_user_2)
ASM_CLAC
ret
ENDPROC(__get_user_2)
EXPORT_SYMBOL(__get_user_2)
ENTRY(__get_user_4)
add $3,%_ASM_AX
......@@ -70,6 +73,7 @@ ENTRY(__get_user_4)
ASM_CLAC
ret
ENDPROC(__get_user_4)
EXPORT_SYMBOL(__get_user_4)
ENTRY(__get_user_8)
#ifdef CONFIG_X86_64
......@@ -97,6 +101,7 @@ ENTRY(__get_user_8)
ret
#endif
ENDPROC(__get_user_8)
EXPORT_SYMBOL(__get_user_8)
bad_get_user:
......
#include <linux/linkage.h>
#include <asm/export.h>
#include <asm/asm.h>
......@@ -32,6 +33,7 @@ ENTRY(__sw_hweight32)
__ASM_SIZE(pop,) %__ASM_REG(dx)
ret
ENDPROC(__sw_hweight32)
EXPORT_SYMBOL(__sw_hweight32)
ENTRY(__sw_hweight64)
#ifdef CONFIG_X86_64
......@@ -77,3 +79,4 @@ ENTRY(__sw_hweight64)
ret
#endif
ENDPROC(__sw_hweight64)
EXPORT_SYMBOL(__sw_hweight64)
......@@ -4,6 +4,7 @@
#include <asm/errno.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
#include <asm/export.h>
/*
* We build a jump to memcpy_orig by default which gets NOPped out on
......@@ -40,6 +41,8 @@ ENTRY(memcpy)
ret
ENDPROC(memcpy)
ENDPROC(__memcpy)
EXPORT_SYMBOL(memcpy)
EXPORT_SYMBOL(__memcpy)
/*
* memcpy_erms() - enhanced fast string memcpy. This is faster and
......@@ -274,6 +277,7 @@ ENTRY(memcpy_mcsafe_unrolled)
xorq %rax, %rax
ret
ENDPROC(memcpy_mcsafe_unrolled)
EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled)
.section .fixup, "ax"
/* Return -EFAULT for any failure */
......
......@@ -8,6 +8,7 @@
#include <linux/linkage.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
#include <asm/export.h>
#undef memmove
......@@ -207,3 +208,5 @@ ENTRY(__memmove)
retq
ENDPROC(__memmove)
ENDPROC(memmove)
EXPORT_SYMBOL(__memmove)
EXPORT_SYMBOL(memmove)
......@@ -3,6 +3,7 @@
#include <linux/linkage.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
#include <asm/export.h>
.weak memset
......@@ -43,6 +44,8 @@ ENTRY(__memset)
ret
ENDPROC(memset)
ENDPROC(__memset)
EXPORT_SYMBOL(memset)
EXPORT_SYMBOL(__memset)
/*
* ISO C memset - set a memory block to a byte value. This function uses
......
......@@ -15,6 +15,7 @@
#include <asm/errno.h>
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/export.h>
/*
......@@ -43,6 +44,7 @@ ENTRY(__put_user_1)
xor %eax,%eax
EXIT
ENDPROC(__put_user_1)
EXPORT_SYMBOL(__put_user_1)
ENTRY(__put_user_2)
ENTER
......@@ -55,6 +57,7 @@ ENTRY(__put_user_2)
xor %eax,%eax
EXIT
ENDPROC(__put_user_2)
EXPORT_SYMBOL(__put_user_2)
ENTRY(__put_user_4)
ENTER
......@@ -67,6 +70,7 @@ ENTRY(__put_user_4)
xor %eax,%eax
EXIT
ENDPROC(__put_user_4)
EXPORT_SYMBOL(__put_user_4)
ENTRY(__put_user_8)
ENTER
......@@ -82,6 +86,7 @@ ENTRY(__put_user_8)
xor %eax,%eax
EXIT
ENDPROC(__put_user_8)
EXPORT_SYMBOL(__put_user_8)
bad_put_user:
movl $-EFAULT,%eax
......
#include <linux/string.h>
#include <linux/export.h>
char *strstr(const char *cs, const char *ct)
{
......@@ -28,4 +29,4 @@ __asm__ __volatile__(
: "dx", "di");
return __res;
}
EXPORT_SYMBOL(strstr);
......@@ -8,7 +8,7 @@ else
BITS := 64
endif
obj-y = bug.o bugs_$(BITS).o delay.o fault.o ksyms.o ldt.o \
obj-y = bug.o bugs_$(BITS).o delay.o fault.o ldt.o \
ptrace_$(BITS).o ptrace_user.o setjmp_$(BITS).o signal.o \
stub_$(BITS).o stub_segv.o \
sys_call_table_$(BITS).o sysrq_$(BITS).o tls_$(BITS).o \
......
......@@ -27,6 +27,7 @@
#include <asm/errno.h>
#include <asm/asm.h>
#include <asm/export.h>
/*
* computes a partial checksum, e.g. for TCP/UDP fragments
......@@ -214,3 +215,4 @@ csum_partial:
ret
#endif
EXPORT_SYMBOL(csum_partial)
#include <linux/module.h>
#include <asm/string.h>
#include <asm/checksum.h>
#ifndef CONFIG_X86_32
/*XXX: we need them because they would be exported by x86_64 */
#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
EXPORT_SYMBOL(memcpy);
#else
EXPORT_SYMBOL(__memcpy);
#endif
#endif
EXPORT_SYMBOL(csum_partial);
#ifndef __ASM_GENERIC_EXPORT_H
#define __ASM_GENERIC_EXPORT_H
#ifndef KSYM_FUNC
#define KSYM_FUNC(x) x
#endif
#ifdef CONFIG_64BIT
#define __put .quad
#ifndef KSYM_ALIGN
#define KSYM_ALIGN 8
#endif
#ifndef KCRC_ALIGN
#define KCRC_ALIGN 8
#endif
#else
#define __put .long
#ifndef KSYM_ALIGN
#define KSYM_ALIGN 4
#endif
#ifndef KCRC_ALIGN
#define KCRC_ALIGN 4
#endif
#endif
#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
#define KSYM(name) _##name
#else
#define KSYM(name) name
#endif
/*
* note on .section use: @progbits vs %progbits nastiness doesn't matter,
* since we immediately emit into those sections anyway.
*/
.macro ___EXPORT_SYMBOL name,val,sec
#ifdef CONFIG_MODULES
.globl KSYM(__ksymtab_\name)
.section ___ksymtab\sec+\name,"a"
.balign KSYM_ALIGN
KSYM(__ksymtab_\name):
__put \val, KSYM(__kstrtab_\name)
.previous
.section __ksymtab_strings,"a"
KSYM(__kstrtab_\name):
#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
.asciz "_\name"
#else
.asciz "\name"
#endif
.previous
#ifdef CONFIG_MODVERSIONS
.section ___kcrctab\sec+\name,"a"
.balign KCRC_ALIGN
KSYM(__kcrctab_\name):
__put KSYM(__crc_\name)
.weak KSYM(__crc_\name)
.previous
#endif
#endif
.endm
#undef __put
#if defined(__KSYM_DEPS__)
#define __EXPORT_SYMBOL(sym, val, sec) === __KSYM_##sym ===
#elif defined(CONFIG_TRIM_UNUSED_KSYMS)
#include <linux/kconfig.h>
#include <generated/autoksyms.h>
#define __EXPORT_SYMBOL(sym, val, sec) \
__cond_export_sym(sym, val, sec, config_enabled(__KSYM_##sym))
#define __cond_export_sym(sym, val, sec, conf) \
___cond_export_sym(sym, val, sec, conf)
#define ___cond_export_sym(sym, val, sec, enabled) \
__cond_export_sym_##enabled(sym, val, sec)
#define __cond_export_sym_1(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
#define __cond_export_sym_0(sym, val, sec) /* nothing */
#else
#define __EXPORT_SYMBOL(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
#endif
#define EXPORT_SYMBOL(name) \
__EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)),)
#define EXPORT_SYMBOL_GPL(name) \
__EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)), _gpl)
#define EXPORT_DATA_SYMBOL(name) \
__EXPORT_SYMBOL(name, KSYM(name),)
#define EXPORT_DATA_SYMBOL_GPL(name) \
__EXPORT_SYMBOL(name, KSYM(name),_gpl)
#endif
......@@ -196,9 +196,14 @@
*(.dtb.init.rodata) \
VMLINUX_SYMBOL(__dtb_end) = .;
/* .data section */
/*
* .data section
* LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections generates
* .data.identifier which needs to be pulled in with .data, but don't want to
* pull in .data..stuff which has its own requirements. Same for bss.
*/
#define DATA_DATA \
*(.data) \
*(.data .data.[0-9a-zA-Z_]*) \
*(.ref.data) \
*(.data..shared_aligned) /* percpu related */ \
MEM_KEEP(init.data) \
......@@ -320,76 +325,76 @@
/* Kernel symbol table: Normal symbols */ \
__ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab) = .; \
*(SORT(___ksymtab+*)) \
KEEP(*(SORT(___ksymtab+*))) \
VMLINUX_SYMBOL(__stop___ksymtab) = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
*(SORT(___ksymtab_gpl+*)) \
KEEP(*(SORT(___ksymtab_gpl+*))) \
VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
} \
\
/* Kernel symbol table: Normal unused symbols */ \
__ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
*(SORT(___ksymtab_unused+*)) \
KEEP(*(SORT(___ksymtab_unused+*))) \
VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
} \
\
/* Kernel symbol table: GPL-only unused symbols */ \
__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
*(SORT(___ksymtab_unused_gpl+*)) \
KEEP(*(SORT(___ksymtab_unused_gpl+*))) \
VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
} \
\
/* Kernel symbol table: GPL-future-only symbols */ \
__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
*(SORT(___ksymtab_gpl_future+*)) \
KEEP(*(SORT(___ksymtab_gpl_future+*))) \
VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
} \
\
/* Kernel symbol table: Normal symbols */ \
__kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab) = .; \
*(SORT(___kcrctab+*)) \
KEEP(*(SORT(___kcrctab+*))) \
VMLINUX_SYMBOL(__stop___kcrctab) = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
*(SORT(___kcrctab_gpl+*)) \
KEEP(*(SORT(___kcrctab_gpl+*))) \
VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
} \
\
/* Kernel symbol table: Normal unused symbols */ \
__kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
*(SORT(___kcrctab_unused+*)) \
KEEP(*(SORT(___kcrctab_unused+*))) \
VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
} \
\
/* Kernel symbol table: GPL-only unused symbols */ \
__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
*(SORT(___kcrctab_unused_gpl+*)) \
KEEP(*(SORT(___kcrctab_unused_gpl+*))) \
VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
} \
\
/* Kernel symbol table: GPL-future-only symbols */ \
__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
*(SORT(___kcrctab_gpl_future+*)) \
KEEP(*(SORT(___kcrctab_gpl_future+*))) \
VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
} \
\
/* Kernel symbol table: strings */ \
__ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
*(__ksymtab_strings) \
KEEP(*(__ksymtab_strings)) \
} \
\
/* __*init sections */ \
......@@ -424,12 +429,17 @@
#define SECURITY_INIT \
.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__security_initcall_start) = .; \
*(.security_initcall.init) \
KEEP(*(.security_initcall.init)) \
VMLINUX_SYMBOL(__security_initcall_end) = .; \
}
/* .text section. Map to function alignment to avoid address changes
* during second ld run in second ld pass when generating System.map */
* during second ld run in second ld pass when generating System.map
* LD_DEAD_CODE_DATA_ELIMINATION option enables -ffunction-sections generates
* .text.identifier which needs to be pulled in with .text , but some
* architectures define .text.foo which is not intended to be pulled in here.
* Those enabling LD_DEAD_CODE_DATA_ELIMINATION must ensure they don't have
* conflicting section names, and must pull in .text.[0-9a-zA-Z_]* */
#define TEXT_TEXT \
ALIGN_FUNCTION(); \
*(.text.hot .text .text.fixup .text.unlikely) \
......@@ -533,6 +543,7 @@
/* init and exit section handling */
#define INIT_DATA \
KEEP(*(SORT(___kentry+*))) \
*(.init.data) \
MEM_DISCARD(init.data) \
KERNEL_CTORS() \
......@@ -599,7 +610,7 @@
BSS_FIRST_SECTIONS \
*(.bss..page_aligned) \
*(.dynbss) \
*(.bss) \
*(.bss .bss.[0-9a-zA-Z_]*) \
*(COMMON) \
}
......@@ -682,12 +693,12 @@
#define INIT_CALLS_LEVEL(level) \
VMLINUX_SYMBOL(__initcall##level##_start) = .; \
*(.initcall##level##.init) \
*(.initcall##level##s.init) \
KEEP(*(.initcall##level##.init)) \
KEEP(*(.initcall##level##s.init)) \
#define INIT_CALLS \
VMLINUX_SYMBOL(__initcall_start) = .; \
*(.initcallearly.init) \
KEEP(*(.initcallearly.init)) \
INIT_CALLS_LEVEL(0) \
INIT_CALLS_LEVEL(1) \
INIT_CALLS_LEVEL(2) \
......@@ -701,21 +712,21 @@
#define CON_INITCALL \
VMLINUX_SYMBOL(__con_initcall_start) = .; \
*(.con_initcall.init) \
KEEP(*(.con_initcall.init)) \
VMLINUX_SYMBOL(__con_initcall_end) = .;
#define SECURITY_INITCALL \
VMLINUX_SYMBOL(__security_initcall_start) = .; \
*(.security_initcall.init) \
KEEP(*(.security_initcall.init)) \
VMLINUX_SYMBOL(__security_initcall_end) = .;
#ifdef CONFIG_BLK_DEV_INITRD
#define INIT_RAM_FS \
. = ALIGN(4); \
VMLINUX_SYMBOL(__initramfs_start) = .; \
*(.init.ramfs) \
KEEP(*(.init.ramfs)) \
. = ALIGN(8); \
*(.init.ramfs.info)
KEEP(*(.init.ramfs.info))
#else
#define INIT_RAM_FS
#endif
......
......@@ -182,6 +182,29 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# define unreachable() do { } while (1)
#endif
/*
* KENTRY - kernel entry point
* This can be used to annotate symbols (functions or data) that are used
* without their linker symbol being referenced explicitly. For example,
* interrupt vector handlers, or functions in the kernel image that are found
* programatically.
*
* Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
* are handled in their own way (with KEEP() in linker scripts).
*
* KENTRY can be avoided if the symbols in question are marked as KEEP() in the
* linker script. For example an architecture could KEEP() its entire
* boot/exception vector code rather than annotate each function and data.
*/
#ifndef KENTRY
# define KENTRY(sym) \
extern typeof(sym) sym; \
static const unsigned long __kentry_##sym \
__used \
__attribute__((section("___kentry" "+" #sym ), used)) \
= (unsigned long)&sym;
#endif
#ifndef RELOC_HIDE
# define RELOC_HIDE(ptr, off) \
({ unsigned long __ptr; \
......
#ifndef _LINUX_EXPORT_H
#define _LINUX_EXPORT_H
/*
* Export symbols from the kernel to modules. Forked from module.h
* to reduce the amount of pointless cruft we feed to gcc when only
......@@ -42,27 +43,26 @@ extern struct module __this_module;
#ifdef CONFIG_MODVERSIONS
/* Mark the CRC weak since genksyms apparently decides not to
* generate a checksums for some symbols */
#define __CRC_SYMBOL(sym, sec) \
extern __visible void *__crc_##sym __attribute__((weak)); \
static const unsigned long __kcrctab_##sym \
__used \
__attribute__((section("___kcrctab" sec "+" #sym), unused)) \
#define __CRC_SYMBOL(sym, sec) \
extern __visible void *__crc_##sym __attribute__((weak)); \
static const unsigned long __kcrctab_##sym \
__used \
__attribute__((section("___kcrctab" sec "+" #sym), used)) \
= (unsigned long) &__crc_##sym;
#else
#define __CRC_SYMBOL(sym, sec)
#endif
/* For every exported symbol, place a struct in the __ksymtab section */
#define ___EXPORT_SYMBOL(sym, sec) \
extern typeof(sym) sym; \
__CRC_SYMBOL(sym, sec) \
static const char __kstrtab_##sym[] \
__attribute__((section("__ksymtab_strings"), aligned(1))) \
= VMLINUX_SYMBOL_STR(sym); \
extern const struct kernel_symbol __ksymtab_##sym; \
__visible const struct kernel_symbol __ksymtab_##sym \
__used \
__attribute__((section("___ksymtab" sec "+" #sym), unused)) \
#define ___EXPORT_SYMBOL(sym, sec) \
extern typeof(sym) sym; \
__CRC_SYMBOL(sym, sec) \
static const char __kstrtab_##sym[] \
__attribute__((section("__ksymtab_strings"), aligned(1))) \
= VMLINUX_SYMBOL_STR(sym); \
static const struct kernel_symbol __ksymtab_##sym \
__used \
__attribute__((section("___ksymtab" sec "+" #sym), used)) \
= { (unsigned long)&sym, __kstrtab_##sym }
#if defined(__KSYM_DEPS__)
......
......@@ -139,24 +139,8 @@ extern bool initcall_debug;
#ifndef __ASSEMBLY__
#ifdef CONFIG_LTO
/* Work around a LTO gcc problem: when there is no reference to a variable
* in a module it will be moved to the end of the program. This causes
* reordering of initcalls which the kernel does not like.
* Add a dummy reference function to avoid this. The function is
* deleted by the linker.
*/
#define LTO_REFERENCE_INITCALL(x) \
; /* yes this is needed */ \
static __used __exit void *reference_##x(void) \
{ \
return &x; \
}
#else
#define LTO_REFERENCE_INITCALL(x)
#endif
/* initcalls are now grouped by functionality into separate
/*
* initcalls are now grouped by functionality into separate
* subsections. Ordering inside the subsections is determined
* by link order.
* For backwards compatibility, initcall() puts the call in
......@@ -164,12 +148,16 @@ extern bool initcall_debug;
*
* The `id' arg to __define_initcall() is needed so that multiple initcalls
* can point at the same handler without causing duplicate-symbol build errors.
*
* Initcalls are run by placing pointers in initcall sections that the
* kernel iterates at runtime. The linker can do dead code / data elimination
* and remove that completely, so the initcall sections have to be marked
* as KEEP() in the linker script.
*/
#define __define_initcall(fn, id) \
static initcall_t __initcall_##fn##id __used \
__attribute__((__section__(".initcall" #id ".init"))) = fn; \
LTO_REFERENCE_INITCALL(__initcall_##fn##id)
__attribute__((__section__(".initcall" #id ".init"))) = fn;
/*
* Early initcalls run before initializing SMP.
......@@ -205,15 +193,15 @@ extern bool initcall_debug;
#define __initcall(fn) device_initcall(fn)
#define __exitcall(fn) \
#define __exitcall(fn) \
static exitcall_t __exitcall_##fn __exit_call = fn
#define console_initcall(fn) \
static initcall_t __initcall_##fn \
#define console_initcall(fn) \
static initcall_t __initcall_##fn \
__used __section(.con_initcall.init) = fn
#define security_initcall(fn) \
static initcall_t __initcall_##fn \
#define security_initcall(fn) \
static initcall_t __initcall_##fn \
__used __section(.security_initcall.init) = fn
struct obs_kernel_param {
......
......@@ -2,6 +2,8 @@
# Makefile for the linux kernel.
#
ccflags-y := -fno-function-sections -fno-data-sections
obj-y := main.o version.o mounts.o
ifneq ($(CONFIG_BLK_DEV_INITRD),y)
obj-y += noinitramfs.o
......
......@@ -81,6 +81,7 @@ endif
ifneq ($(strip $(lib-y) $(lib-m) $(lib-)),)
lib-target := $(obj)/lib.a
obj-y += $(obj)/lib-ksyms.o
endif
ifneq ($(strip $(obj-y) $(obj-m) $(obj-) $(subdir-m) $(lib-target)),)
......@@ -358,12 +359,22 @@ $(sort $(subdir-obj-y)): $(subdir-ym) ;
# Rule to compile a set of .o files into one .o file
#
ifdef builtin-target
quiet_cmd_link_o_target = LD $@
ifdef CONFIG_THIN_ARCHIVES
cmd_make_builtin = rm -f $@; $(AR) rcST$(KBUILD_ARFLAGS)
cmd_make_empty_builtin = rm -f $@; $(AR) rcST$(KBUILD_ARFLAGS)
quiet_cmd_link_o_target = AR $@
else
cmd_make_builtin = $(LD) $(ld_flags) -r -o
cmd_make_empty_builtin = rm -f $@; $(AR) rcs$(KBUILD_ARFLAGS)
quiet_cmd_link_o_target = LD $@
endif
# If the list of objects to link is empty, just create an empty built-in.o
cmd_link_o_target = $(if $(strip $(obj-y)),\
$(LD) $(ld_flags) -r -o $@ $(filter $(obj-y), $^) \
$(cmd_make_builtin) $@ $(filter $(obj-y), $^) \
$(cmd_secanalysis),\
rm -f $@; $(AR) rcs$(KBUILD_ARFLAGS) $@)
$(cmd_make_empty_builtin) $@)
$(builtin-target): $(obj-y) FORCE
$(call if_changed,link_o_target)
......@@ -389,12 +400,36 @@ $(modorder-target): $(subdir-ym) FORCE
#
ifdef lib-target
quiet_cmd_link_l_target = AR $@
cmd_link_l_target = rm -f $@; $(AR) rcs$(KBUILD_ARFLAGS) $@ $(lib-y)
ifdef CONFIG_THIN_ARCHIVES
cmd_link_l_target = rm -f $@; $(AR) rcsT$(KBUILD_ARFLAGS) $@ $(lib-y)
else
cmd_link_l_target = rm -f $@; $(AR) rcs$(KBUILD_ARFLAGS) $@ $(lib-y)
endif
$(lib-target): $(lib-y) FORCE
$(call if_changed,link_l_target)
targets += $(lib-target)
dummy-object = $(obj)/.lib_exports.o
ksyms-lds = $(dot-target).lds
ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
ref_prefix = EXTERN(_
else
ref_prefix = EXTERN(
endif
quiet_cmd_export_list = EXPORTS $@
cmd_export_list = $(OBJDUMP) -h $< | \
sed -ne '/___ksymtab/{s/.*+/$(ref_prefix)/;s/ .*/)/;p}' >$(ksyms-lds);\
rm -f $(dummy-object);\
$(AR) rcs$(KBUILD_ARFLAGS) $(dummy-object);\
$(LD) $(ld_flags) -r -o $@ -T $(ksyms-lds) $(dummy-object);\
rm $(dummy-object) $(ksyms-lds)
$(obj)/lib-ksyms.o: $(lib-target) FORCE
$(call if_changed,export_list)
endif
#
......
......@@ -115,14 +115,18 @@ $(modules:.ko=.mod.o): %.mod.o: %.mod.c FORCE
targets += $(modules:.ko=.mod.o)
# Step 6), final link of the modules
ARCH_POSTLINK := $(wildcard $(srctree)/arch/$(SRCARCH)/Makefile.postlink)
# Step 6), final link of the modules with optional arch pass after final link
quiet_cmd_ld_ko_o = LD [M] $@
cmd_ld_ko_o = $(LD) -r $(LDFLAGS) \
$(KBUILD_LDFLAGS_MODULE) $(LDFLAGS_MODULE) \
-o $@ $(filter-out FORCE,$^)
cmd_ld_ko_o = \
$(LD) -r $(LDFLAGS) \
$(KBUILD_LDFLAGS_MODULE) $(LDFLAGS_MODULE) \
-o $@ $(filter-out FORCE,$^) ; \
$(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true)
$(modules): %.ko :%.o %.mod.o FORCE
$(call if_changed,ld_ko_o)
+$(call if_changed,ld_ko_o)
targets += $(modules)
......
......@@ -82,8 +82,7 @@
* to date before even starting the recursive build, so it's too late
* at this point anyway.
*
* The algorithm to grep for "CONFIG_..." is bit unusual, but should
* be fast ;-) We don't even try to really parse the header files, but
* We don't even try to really parse the header files, but
* merely grep, i.e. if CONFIG_FOO is mentioned in a comment, it will
* be picked up as well. It's not a problem with respect to
* correctness, since that can only give too many dependencies, thus
......@@ -115,11 +114,6 @@
#include <ctype.h>
#include <arpa/inet.h>
#define INT_CONF ntohl(0x434f4e46)
#define INT_ONFI ntohl(0x4f4e4649)
#define INT_NFIG ntohl(0x4e464947)
#define INT_FIG_ ntohl(0x4649475f)
int insert_extra_deps;
char *target;
char *depfile;
......@@ -241,37 +235,22 @@ static void use_config(const char *m, int slen)
print_config(m, slen);
}
static void parse_config_file(const char *map, size_t len)
static void parse_config_file(const char *p)
{
const int *end = (const int *) (map + len);
/* start at +1, so that p can never be < map */
const int *m = (const int *) map + 1;
const char *p, *q;
for (; m < end; m++) {
if (*m == INT_CONF) { p = (char *) m ; goto conf; }
if (*m == INT_ONFI) { p = (char *) m-1; goto conf; }
if (*m == INT_NFIG) { p = (char *) m-2; goto conf; }
if (*m == INT_FIG_) { p = (char *) m-3; goto conf; }
continue;
conf:
if (p > map + len - 7)
continue;
if (memcmp(p, "CONFIG_", 7))
continue;
const char *q, *r;
while ((p = strstr(p, "CONFIG_"))) {
p += 7;
for (q = p; q < map + len; q++) {
if (!(isalnum(*q) || *q == '_'))
goto found;
}
continue;
found:
if (!memcmp(q - 7, "_MODULE", 7))
q -= 7;
if (q - p < 0)
continue;
use_config(p, q - p);
q = p;
while (*q && (isalnum(*q) || *q == '_'))
q++;
if (memcmp(q - 7, "_MODULE", 7) == 0)
r = q - 7;
else
r = q;
if (r > p)
use_config(p, r - p);
p = q;
}
}
......@@ -291,7 +270,7 @@ static void do_config_file(const char *filename)
{
struct stat st;
int fd;
void *map;
char *map;
fd = open(filename, O_RDONLY);
if (fd < 0) {
......@@ -308,18 +287,23 @@ static void do_config_file(const char *filename)
close(fd);
return;
}
map = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
if ((long) map == -1) {
perror("fixdep: mmap");
map = malloc(st.st_size + 1);
if (!map) {
perror("fixdep: malloc");
close(fd);
return;
}
if (read(fd, map, st.st_size) != st.st_size) {
perror("fixdep: read");
close(fd);
return;
}
map[st.st_size] = '\0';
close(fd);
parse_config_file(map, st.st_size);
munmap(map, st.st_size);
parse_config_file(map);
close(fd);
free(map);
}
/*
......@@ -446,22 +430,8 @@ static void print_deps(void)
close(fd);
}
static void traps(void)
{
static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
int *p = (int *)test;
if (*p != INT_CONF) {
fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianness? %#x\n",
*p);
exit(2);
}
}
int main(int argc, char *argv[])
{
traps();
if (argc == 5 && !strcmp(argv[1], "-e")) {
insert_extra_deps = 1;
argv++;
......
......@@ -97,7 +97,10 @@ print_mtime() {
}
list_parse() {
[ ! -L "$1" ] && echo "$1 \\" || :
if [ -L "$1" ]; then
return
fi
echo "$1" | sed 's/:/\\:/g; s/$/ \\/'
}
# for each file print a line in following format
......
......@@ -289,6 +289,23 @@ repeat:
}
break;
case ST_TYPEOF_1:
if (token == IDENT)
{
if (is_reserved_word(yytext, yyleng)
|| find_symbol(yytext, SYM_TYPEDEF, 1))
{
yyless(0);
unput('(');
lexstate = ST_NORMAL;
token = TYPEOF_KEYW;
break;
}
_APP("(", 1);
}
lexstate = ST_TYPEOF;
/* FALLTHRU */
case ST_TYPEOF:
switch (token)
{
......@@ -313,24 +330,6 @@ repeat:
}
break;
case ST_TYPEOF_1:
if (token == IDENT)
{
if (is_reserved_word(yytext, yyleng)
|| find_symbol(yytext, SYM_TYPEDEF, 1))
{
yyless(0);
unput('(');
lexstate = ST_NORMAL;
token = TYPEOF_KEYW;
break;
}
_APP("(", 1);
}
APP;
lexstate = ST_TYPEOF;
goto repeat;
case ST_BRACKET:
APP;
switch (token)
......
......@@ -2098,6 +2098,23 @@ repeat:
}
break;
case ST_TYPEOF_1:
if (token == IDENT)
{
if (is_reserved_word(yytext, yyleng)
|| find_symbol(yytext, SYM_TYPEDEF, 1))
{
yyless(0);
unput('(');
lexstate = ST_NORMAL;
token = TYPEOF_KEYW;
break;
}
_APP("(", 1);
}
lexstate = ST_TYPEOF;
/* FALLTHRU */
case ST_TYPEOF:
switch (token)
{
......@@ -2122,24 +2139,6 @@ repeat:
}
break;
case ST_TYPEOF_1:
if (token == IDENT)
{
if (is_reserved_word(yytext, yyleng)
|| find_symbol(yytext, SYM_TYPEDEF, 1))
{
yyless(0);
unput('(');
lexstate = ST_NORMAL;
token = TYPEOF_KEYW;
break;
}
_APP("(", 1);
}
APP;
lexstate = ST_TYPEOF;
goto repeat;
case ST_BRACKET:
APP;
switch (token)
......
......@@ -37,12 +37,40 @@ info()
fi
}
# Thin archive build here makes a final archive with
# symbol table and indexes from vmlinux objects, which can be
# used as input to linker.
#
# Traditional incremental style of link does not require this step
#
# built-in.o output file
#
archive_builtin()
{
if [ -n "${CONFIG_THIN_ARCHIVES}" ]; then
info AR built-in.o
rm -f built-in.o;
${AR} rcsT${KBUILD_ARFLAGS} built-in.o \
${KBUILD_VMLINUX_INIT} \
${KBUILD_VMLINUX_MAIN}
fi
}
# Link of vmlinux.o used for section mismatch analysis
# ${1} output file
modpost_link()
{
${LD} ${LDFLAGS} -r -o ${1} ${KBUILD_VMLINUX_INIT} \
--start-group ${KBUILD_VMLINUX_MAIN} --end-group
local objects
if [ -n "${CONFIG_THIN_ARCHIVES}" ]; then
objects="--whole-archive built-in.o"
else
objects="${KBUILD_VMLINUX_INIT} \
--start-group \
${KBUILD_VMLINUX_MAIN} \
--end-group"
fi
${LD} ${LDFLAGS} -r -o ${1} ${objects}
}
# Link of vmlinux
......@@ -51,18 +79,36 @@ modpost_link()
vmlinux_link()
{
local lds="${objtree}/${KBUILD_LDS}"
local objects
if [ "${SRCARCH}" != "um" ]; then
${LD} ${LDFLAGS} ${LDFLAGS_vmlinux} -o ${2} \
-T ${lds} ${KBUILD_VMLINUX_INIT} \
--start-group ${KBUILD_VMLINUX_MAIN} --end-group ${1}
if [ -n "${CONFIG_THIN_ARCHIVES}" ]; then
objects="--whole-archive built-in.o ${1}"
else
objects="${KBUILD_VMLINUX_INIT} \
--start-group \
${KBUILD_VMLINUX_MAIN} \
--end-group \
${1}"
fi
${LD} ${LDFLAGS} ${LDFLAGS_vmlinux} -o ${2} \
-T ${lds} ${objects}
else
${CC} ${CFLAGS_vmlinux} -o ${2} \
-Wl,-T,${lds} ${KBUILD_VMLINUX_INIT} \
-Wl,--start-group \
${KBUILD_VMLINUX_MAIN} \
-Wl,--end-group \
-lutil -lrt -lpthread ${1}
if [ -n "${CONFIG_THIN_ARCHIVES}" ]; then
objects="-Wl,--whole-archive built-in.o ${1}"
else
objects="${KBUILD_VMLINUX_INIT} \
-Wl,--start-group \
${KBUILD_VMLINUX_MAIN} \
-Wl,--end-group \
${1}"
fi
${CC} ${CFLAGS_vmlinux} -o ${2} \
-Wl,-T,${lds} \
${objects} \
-lutil -lrt -lpthread
rm -f linux
fi
}
......@@ -119,6 +165,7 @@ cleanup()
rm -f .tmp_kallsyms*
rm -f .tmp_version
rm -f .tmp_vmlinux*
rm -f built-in.o
rm -f System.map
rm -f vmlinux
rm -f vmlinux.o
......@@ -162,6 +209,8 @@ case "${KCONFIG_CONFIG}" in
. "./${KCONFIG_CONFIG}"
esac
archive_builtin
#link vmlinux.o
info LD vmlinux.o
modpost_link vmlinux.o
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册