提交 29a011fc 编写于 作者: S Sathvika Vasireddy 提交者: Michael Ellerman

powerpc: Fix objtool unannotated intra-function call warnings

Objtool throws unannotated intra-function call warnings in the following
assembly files:

arch/powerpc/kernel/vector.o: warning: objtool: .text+0x53c: unannotated intra-function call

arch/powerpc/kvm/book3s_hv_rmhandlers.o: warning: objtool: .text+0x60: unannotated intra-function call
arch/powerpc/kvm/book3s_hv_rmhandlers.o: warning: objtool: .text+0x124: unannotated intra-function call
arch/powerpc/kvm/book3s_hv_rmhandlers.o: warning: objtool: .text+0x5d4: unannotated intra-function call
arch/powerpc/kvm/book3s_hv_rmhandlers.o: warning: objtool: .text+0x5dc: unannotated intra-function call
arch/powerpc/kvm/book3s_hv_rmhandlers.o: warning: objtool: .text+0xcb8: unannotated intra-function call
arch/powerpc/kvm/book3s_hv_rmhandlers.o: warning: objtool: .text+0xd0c: unannotated intra-function call
arch/powerpc/kvm/book3s_hv_rmhandlers.o: warning: objtool: .text+0x1030: unannotated intra-function call

arch/powerpc/kernel/head_64.o: warning: objtool: .text+0x358: unannotated intra-function call
arch/powerpc/kernel/head_64.o: warning: objtool: .text+0x728: unannotated intra-function call
arch/powerpc/kernel/head_64.o: warning: objtool: .text+0x4d94: unannotated intra-function call
arch/powerpc/kernel/head_64.o: warning: objtool: .text+0x4ec4: unannotated intra-function call

arch/powerpc/kvm/book3s_hv_interrupts.o: warning: objtool: .text+0x6c: unannotated intra-function call
arch/powerpc/kernel/misc_64.o: warning: objtool: .text+0x64: unannotated intra-function call

Objtool does not add STT_NOTYPE symbols with size 0 to the rbtree, which
is why find_call_destination() function is not able to find the
destination symbol for 'bl' instruction. For such symbols, objtool is
throwing unannotated intra-function call warnings in assembly files. Fix
these warnings by annotating those symbols with SYM_FUNC_START_LOCAL and
SYM_FUNC_END macros, inorder to set symbol type to STT_FUNC and symbol
size accordingly.
Tested-by: NNaveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Reviewed-by: NNaveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Reviewed-by: NChristophe Leroy <christophe.leroy@csgroup.eu>
Acked-by: NJosh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: NSathvika Vasireddy <sv@linux.ibm.com>
Signed-off-by: NMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20221114175754.1131267-4-sv@linux.ibm.com
上级 01f2cf0b
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
* *
*/ */
#include <linux/linkage.h>
#include <asm/hw_irq.h> #include <asm/hw_irq.h>
#include <asm/exception-64s.h> #include <asm/exception-64s.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
...@@ -3112,7 +3113,7 @@ _GLOBAL(enable_machine_check) ...@@ -3112,7 +3113,7 @@ _GLOBAL(enable_machine_check)
blr blr
/* MSR[RI] should be clear because this uses SRR[01] */ /* MSR[RI] should be clear because this uses SRR[01] */
disable_machine_check: SYM_FUNC_START_LOCAL(disable_machine_check)
mflr r0 mflr r0
bcl 20,31,$+4 bcl 20,31,$+4
0: mflr r3 0: mflr r3
...@@ -3125,3 +3126,4 @@ disable_machine_check: ...@@ -3125,3 +3126,4 @@ disable_machine_check:
RFI_TO_KERNEL RFI_TO_KERNEL
1: mtlr r0 1: mtlr r0
blr blr
SYM_FUNC_END(disable_machine_check)
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
* variants. * variants.
*/ */
#include <linux/linkage.h>
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/reg.h> #include <asm/reg.h>
...@@ -462,7 +463,7 @@ generic_secondary_common_init: ...@@ -462,7 +463,7 @@ generic_secondary_common_init:
* Assumes we're mapped EA == RA if the MMU is on. * Assumes we're mapped EA == RA if the MMU is on.
*/ */
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
__mmu_off: SYM_FUNC_START_LOCAL(__mmu_off)
mfmsr r3 mfmsr r3
andi. r0,r3,MSR_IR|MSR_DR andi. r0,r3,MSR_IR|MSR_DR
beqlr beqlr
...@@ -473,6 +474,7 @@ __mmu_off: ...@@ -473,6 +474,7 @@ __mmu_off:
sync sync
rfid rfid
b . /* prevent speculative execution */ b . /* prevent speculative execution */
SYM_FUNC_END(__mmu_off)
#endif #endif
...@@ -869,7 +871,7 @@ _GLOBAL(start_secondary_resume) ...@@ -869,7 +871,7 @@ _GLOBAL(start_secondary_resume)
/* /*
* This subroutine clobbers r11 and r12 * This subroutine clobbers r11 and r12
*/ */
enable_64b_mode: SYM_FUNC_START_LOCAL(enable_64b_mode)
mfmsr r11 /* grab the current MSR */ mfmsr r11 /* grab the current MSR */
#ifdef CONFIG_PPC_BOOK3E_64 #ifdef CONFIG_PPC_BOOK3E_64
oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */ oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */
...@@ -881,6 +883,7 @@ enable_64b_mode: ...@@ -881,6 +883,7 @@ enable_64b_mode:
isync isync
#endif #endif
blr blr
SYM_FUNC_END(enable_64b_mode)
/* /*
* This puts the TOC pointer into r2, offset by 0x8000 (as expected * This puts the TOC pointer into r2, offset by 0x8000 (as expected
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
* PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
*/ */
#include <linux/linkage.h>
#include <linux/sys.h> #include <linux/sys.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/errno.h> #include <asm/errno.h>
...@@ -353,7 +354,7 @@ _GLOBAL(kexec_smp_wait) ...@@ -353,7 +354,7 @@ _GLOBAL(kexec_smp_wait)
* *
* don't overwrite r3 here, it is live for kexec_wait above. * don't overwrite r3 here, it is live for kexec_wait above.
*/ */
real_mode: /* assume normal blr return */ SYM_FUNC_START_LOCAL(real_mode) /* assume normal blr return */
#ifdef CONFIG_PPC_BOOK3E_64 #ifdef CONFIG_PPC_BOOK3E_64
/* Create an identity mapping. */ /* Create an identity mapping. */
b kexec_create_tlb b kexec_create_tlb
...@@ -370,6 +371,7 @@ real_mode: /* assume normal blr return */ ...@@ -370,6 +371,7 @@ real_mode: /* assume normal blr return */
mtspr SPRN_SRR0,r11 mtspr SPRN_SRR0,r11
rfid rfid
#endif #endif
SYM_FUNC_END(real_mode)
/* /*
* kexec_sequence(newstack, start, image, control, clear_all(), * kexec_sequence(newstack, start, image, control, clear_all(),
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
#include <asm/reg.h> #include <asm/reg.h>
...@@ -185,7 +186,7 @@ fphalf: ...@@ -185,7 +186,7 @@ fphalf:
* Internal routine to enable floating point and set FPSCR to 0. * Internal routine to enable floating point and set FPSCR to 0.
* Don't call it from C; it doesn't use the normal calling convention. * Don't call it from C; it doesn't use the normal calling convention.
*/ */
fpenable: SYM_FUNC_START_LOCAL(fpenable)
#ifdef CONFIG_PPC32 #ifdef CONFIG_PPC32
stwu r1,-64(r1) stwu r1,-64(r1)
#else #else
...@@ -202,6 +203,7 @@ fpenable: ...@@ -202,6 +203,7 @@ fpenable:
mffs fr31 mffs fr31
MTFSF_L(fr1) MTFSF_L(fr1)
blr blr
SYM_FUNC_END(fpenable)
fpdisable: fpdisable:
mtlr r12 mtlr r12
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
* Authors: Alexander Graf <agraf@suse.de> * Authors: Alexander Graf <agraf@suse.de>
*/ */
#include <linux/linkage.h>
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
#include <asm/reg.h> #include <asm/reg.h>
...@@ -107,7 +108,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) ...@@ -107,7 +108,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
/* /*
* void kvmhv_save_host_pmu(void) * void kvmhv_save_host_pmu(void)
*/ */
kvmhv_save_host_pmu: SYM_FUNC_START_LOCAL(kvmhv_save_host_pmu)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
/* Work around P8 PMAE bug */ /* Work around P8 PMAE bug */
li r3, -1 li r3, -1
...@@ -154,3 +155,4 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -154,3 +155,4 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
stw r8, HSTATE_PMC5(r13) stw r8, HSTATE_PMC5(r13)
stw r9, HSTATE_PMC6(r13) stw r9, HSTATE_PMC6(r13)
31: blr 31: blr
SYM_FUNC_END(kvmhv_save_host_pmu)
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
* Authors: Alexander Graf <agraf@suse.de> * Authors: Alexander Graf <agraf@suse.de>
*/ */
#include <linux/linkage.h>
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
#include <asm/code-patching-asm.h> #include <asm/code-patching-asm.h>
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
...@@ -2358,7 +2359,7 @@ hmi_realmode: ...@@ -2358,7 +2359,7 @@ hmi_realmode:
* This routine calls kvmppc_read_intr, a C function, if an external * This routine calls kvmppc_read_intr, a C function, if an external
* interrupt is pending. * interrupt is pending.
*/ */
kvmppc_check_wake_reason: SYM_FUNC_START_LOCAL(kvmppc_check_wake_reason)
mfspr r6, SPRN_SRR1 mfspr r6, SPRN_SRR1
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */ rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
...@@ -2427,6 +2428,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -2427,6 +2428,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
addi r1, r1, PPC_MIN_STKFRM addi r1, r1, PPC_MIN_STKFRM
mtlr r0 mtlr r0
blr blr
SYM_FUNC_END(kvmppc_check_wake_reason)
/* /*
* Save away FP, VMX and VSX registers. * Save away FP, VMX and VSX registers.
...@@ -2434,7 +2436,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -2434,7 +2436,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
* N.B. r30 and r31 are volatile across this function, * N.B. r30 and r31 are volatile across this function,
* thus it is not callable from C. * thus it is not callable from C.
*/ */
kvmppc_save_fp: SYM_FUNC_START_LOCAL(kvmppc_save_fp)
mflr r30 mflr r30
mr r31,r3 mr r31,r3
mfmsr r5 mfmsr r5
...@@ -2462,6 +2464,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) ...@@ -2462,6 +2464,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
stw r6,VCPU_VRSAVE(r31) stw r6,VCPU_VRSAVE(r31)
mtlr r30 mtlr r30
blr blr
SYM_FUNC_END(kvmppc_save_fp)
/* /*
* Load up FP, VMX and VSX registers * Load up FP, VMX and VSX registers
...@@ -2469,7 +2472,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) ...@@ -2469,7 +2472,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
* N.B. r30 and r31 are volatile across this function, * N.B. r30 and r31 are volatile across this function,
* thus it is not callable from C. * thus it is not callable from C.
*/ */
kvmppc_load_fp: SYM_FUNC_START_LOCAL(kvmppc_load_fp)
mflr r30 mflr r30
mr r31,r4 mr r31,r4
mfmsr r9 mfmsr r9
...@@ -2498,6 +2501,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) ...@@ -2498,6 +2501,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
mtlr r30 mtlr r30
mr r4,r31 mr r4,r31
blr blr
SYM_FUNC_END(kvmppc_load_fp)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/* /*
...@@ -2746,7 +2750,7 @@ kvmppc_bad_host_intr: ...@@ -2746,7 +2750,7 @@ kvmppc_bad_host_intr:
* r9 has a vcpu pointer (in) * r9 has a vcpu pointer (in)
* r0 is used as a scratch register * r0 is used as a scratch register
*/ */
kvmppc_msr_interrupt: SYM_FUNC_START_LOCAL(kvmppc_msr_interrupt)
rldicl r0, r11, 64 - MSR_TS_S_LG, 62 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
cmpwi r0, 2 /* Check if we are in transactional state.. */ cmpwi r0, 2 /* Check if we are in transactional state.. */
ld r11, VCPU_INTR_MSR(r9) ld r11, VCPU_INTR_MSR(r9)
...@@ -2755,13 +2759,14 @@ kvmppc_msr_interrupt: ...@@ -2755,13 +2759,14 @@ kvmppc_msr_interrupt:
li r0, 1 li r0, 1
1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
blr blr
SYM_FUNC_END(kvmppc_msr_interrupt)
/* /*
* void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu) * void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu)
* *
* Load up guest PMU state. R3 points to the vcpu struct. * Load up guest PMU state. R3 points to the vcpu struct.
*/ */
kvmhv_load_guest_pmu: SYM_FUNC_START_LOCAL(kvmhv_load_guest_pmu)
mr r4, r3 mr r4, r3
mflr r0 mflr r0
li r3, 1 li r3, 1
...@@ -2811,13 +2816,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -2811,13 +2816,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
isync isync
mtlr r0 mtlr r0
blr blr
SYM_FUNC_END(kvmhv_load_guest_pmu)
/* /*
* void kvmhv_load_host_pmu(void) * void kvmhv_load_host_pmu(void)
* *
* Reload host PMU state saved in the PACA by kvmhv_save_host_pmu. * Reload host PMU state saved in the PACA by kvmhv_save_host_pmu.
*/ */
kvmhv_load_host_pmu: SYM_FUNC_START_LOCAL(kvmhv_load_host_pmu)
mflr r0 mflr r0
lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */ lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */
cmpwi r4, 0 cmpwi r4, 0
...@@ -2859,6 +2865,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -2859,6 +2865,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
isync isync
mtlr r0 mtlr r0
23: blr 23: blr
SYM_FUNC_END(kvmhv_load_host_pmu)
/* /*
* void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use) * void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use)
...@@ -2866,7 +2873,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -2866,7 +2873,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
* Save guest PMU state into the vcpu struct. * Save guest PMU state into the vcpu struct.
* r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA) * r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA)
*/ */
kvmhv_save_guest_pmu: SYM_FUNC_START_LOCAL(kvmhv_save_guest_pmu)
mr r9, r3 mr r9, r3
mr r8, r4 mr r8, r4
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
...@@ -2942,6 +2949,7 @@ BEGIN_FTR_SECTION ...@@ -2942,6 +2949,7 @@ BEGIN_FTR_SECTION
mtspr SPRN_MMCRS, r4 mtspr SPRN_MMCRS, r4
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
22: blr 22: blr
SYM_FUNC_END(kvmhv_save_guest_pmu)
/* /*
* This works around a hardware bug on POWER8E processors, where * This works around a hardware bug on POWER8E processors, where
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册