提交 131484c8 编写于 作者: I Ingo Molnar

x86/debug: Remove perpetually broken, unmaintainable dwarf annotations

So the dwarf2 annotations in low level assembly code have
become an increasing hindrance: unreadable, messy macros
mixed into some of the most security sensitive code paths
of the Linux kernel.

These debug info annotations don't even buy the upstream
kernel anything: dwarf driven stack unwinding has caused
problems in the past so it's out of tree, and the upstream
kernel only uses the much more robust framepointers based
stack unwinding method.

In addition to that there's a steady, slow bitrot going
on with these annotations, requiring frequent fixups.
There's no tooling and no functionality upstream that
keeps it correct.

So burn down the sick forest, allowing new, healthier growth:

   27 files changed, 350 insertions(+), 1101 deletions(-)

Someone who has the willingness and time to do this
properly can attempt to reintroduce dwarf debuginfo in x86
assembly code plus dwarf unwinding from first principles,
with the following conditions:

 - it should be maximally readable, and maximally low-key to
   'ordinary' code reading and maintenance.

 - find a build time method to insert dwarf annotations
   automatically in the most common cases, for pop/push
   instructions that manipulate the stack pointer. This could
   be done for example via a preprocessing step that just
   looks for common patterns - plus special annotations for
   the few cases where we want to depart from the default.
   We have hundreds of CFI annotations, so automating most of
   that makes sense.

 - it should come with build tooling checks that ensure that
   CFI annotations are sensible. We've seen such efforts from
   the framepointer side, and there's no reason it couldn't be
   done on the dwarf side.

Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jan Beulich <JBeulich@suse.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: NIngo Molnar <mingo@kernel.org>
上级 cdeb6048
......@@ -149,12 +149,6 @@ endif
sp-$(CONFIG_X86_32) := esp
sp-$(CONFIG_X86_64) := rsp
# do binutils support CFI?
cfi := $(call as-instr,.cfi_startproc\n.cfi_rel_offset $(sp-y)$(comma)0\n.cfi_endproc,-DCONFIG_AS_CFI=1)
# is .cfi_signal_frame supported too?
cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1)
cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1)
# does binutils support specific instructions?
asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1)
asinstr += $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1)
......@@ -162,8 +156,8 @@ asinstr += $(call as-instr,crc32l %eax$(comma)%eax,-DCONFIG_AS_CRC32=1)
avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1)
avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)
KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr)
KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr)
KBUILD_AFLAGS += $(asinstr) $(avx_instr) $(avx2_instr)
KBUILD_CFLAGS += $(asinstr) $(avx_instr) $(avx2_instr)
LDFLAGS := -m elf_$(UTS_MACHINE)
......
......@@ -4,7 +4,6 @@
* Copyright 2000-2002 Andi Kleen, SuSE Labs.
*/
#include <asm/dwarf2.h>
#include <asm/calling.h>
#include <asm/asm-offsets.h>
#include <asm/current.h>
......@@ -60,17 +59,6 @@
movl %eax,%eax /* zero extension */
.endm
.macro CFI_STARTPROC32 simple
CFI_STARTPROC \simple
CFI_UNDEFINED r8
CFI_UNDEFINED r9
CFI_UNDEFINED r10
CFI_UNDEFINED r11
CFI_UNDEFINED r12
CFI_UNDEFINED r13
CFI_UNDEFINED r14
CFI_UNDEFINED r15
.endm
#ifdef CONFIG_PARAVIRT
ENTRY(native_usergs_sysret32)
......@@ -102,11 +90,6 @@ ENDPROC(native_usergs_sysret32)
* with the int 0x80 path.
*/
ENTRY(ia32_sysenter_target)
CFI_STARTPROC32 simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA rsp,0
CFI_REGISTER rsp,rbp
/*
* Interrupts are off on entry.
* We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
......@@ -121,25 +104,21 @@ ENTRY(ia32_sysenter_target)
movl %eax, %eax
movl ASM_THREAD_INFO(TI_sysenter_return, %rsp, 0), %r10d
CFI_REGISTER rip,r10
/* Construct struct pt_regs on stack */
pushq_cfi $__USER32_DS /* pt_regs->ss */
pushq_cfi %rbp /* pt_regs->sp */
CFI_REL_OFFSET rsp,0
pushfq_cfi /* pt_regs->flags */
pushq_cfi $__USER32_CS /* pt_regs->cs */
pushq_cfi %r10 /* pt_regs->ip = thread_info->sysenter_return */
CFI_REL_OFFSET rip,0
pushq_cfi_reg rax /* pt_regs->orig_ax */
pushq_cfi_reg rdi /* pt_regs->di */
pushq_cfi_reg rsi /* pt_regs->si */
pushq_cfi_reg rdx /* pt_regs->dx */
pushq_cfi_reg rcx /* pt_regs->cx */
pushq_cfi $-ENOSYS /* pt_regs->ax */
pushq $__USER32_DS /* pt_regs->ss */
pushq %rbp /* pt_regs->sp */
pushfq /* pt_regs->flags */
pushq $__USER32_CS /* pt_regs->cs */
pushq %r10 /* pt_regs->ip = thread_info->sysenter_return */
pushq %rax /* pt_regs->orig_ax */
pushq %rdi /* pt_regs->di */
pushq %rsi /* pt_regs->si */
pushq %rdx /* pt_regs->dx */
pushq %rcx /* pt_regs->cx */
pushq $-ENOSYS /* pt_regs->ax */
cld
sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
CFI_ADJUST_CFA_OFFSET 10*8
/*
* no need to do an access_ok check here because rbp has been
......@@ -161,8 +140,8 @@ sysenter_flags_fixed:
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
CFI_REMEMBER_STATE
jnz sysenter_tracesys
sysenter_do_call:
/* 32bit syscall -> 64bit C ABI argument conversion */
movl %edi,%r8d /* arg5 */
......@@ -193,14 +172,12 @@ sysexit_from_sys_call:
*/
andl $~TS_COMPAT,ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
movl RIP(%rsp),%ecx /* User %eip */
CFI_REGISTER rip,rcx
RESTORE_RSI_RDI
xorl %edx,%edx /* avoid info leaks */
xorq %r8,%r8
xorq %r9,%r9
xorq %r10,%r10
movl EFLAGS(%rsp),%r11d /* User eflags */
/*CFI_RESTORE rflags*/
TRACE_IRQS_ON
/*
......@@ -231,8 +208,6 @@ sysexit_from_sys_call:
*/
USERGS_SYSRET32
CFI_RESTORE_STATE
#ifdef CONFIG_AUDITSYSCALL
.macro auditsys_entry_common
movl %esi,%r8d /* 5th arg: 4th syscall arg */
......@@ -282,8 +257,8 @@ sysexit_audit:
#endif
sysenter_fix_flags:
pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
popfq_cfi
pushq $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
popfq
jmp sysenter_flags_fixed
sysenter_tracesys:
......@@ -298,7 +273,6 @@ sysenter_tracesys:
LOAD_ARGS32 /* reload args from stack in case ptrace changed it */
RESTORE_EXTRA_REGS
jmp sysenter_do_call
CFI_ENDPROC
ENDPROC(ia32_sysenter_target)
/*
......@@ -332,12 +306,6 @@ ENDPROC(ia32_sysenter_target)
* with the int 0x80 path.
*/
ENTRY(ia32_cstar_target)
CFI_STARTPROC32 simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA rsp,0
CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/
/*
* Interrupts are off on entry.
* We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
......@@ -345,7 +313,6 @@ ENTRY(ia32_cstar_target)
*/
SWAPGS_UNSAFE_STACK
movl %esp,%r8d
CFI_REGISTER rsp,r8
movq PER_CPU_VAR(cpu_current_top_of_stack),%rsp
ENABLE_INTERRUPTS(CLBR_NONE)
......@@ -353,22 +320,19 @@ ENTRY(ia32_cstar_target)
movl %eax,%eax
/* Construct struct pt_regs on stack */
pushq_cfi $__USER32_DS /* pt_regs->ss */
pushq_cfi %r8 /* pt_regs->sp */
CFI_REL_OFFSET rsp,0
pushq_cfi %r11 /* pt_regs->flags */
pushq_cfi $__USER32_CS /* pt_regs->cs */
pushq_cfi %rcx /* pt_regs->ip */
CFI_REL_OFFSET rip,0
pushq_cfi_reg rax /* pt_regs->orig_ax */
pushq_cfi_reg rdi /* pt_regs->di */
pushq_cfi_reg rsi /* pt_regs->si */
pushq_cfi_reg rdx /* pt_regs->dx */
pushq_cfi_reg rbp /* pt_regs->cx */
pushq $__USER32_DS /* pt_regs->ss */
pushq %r8 /* pt_regs->sp */
pushq %r11 /* pt_regs->flags */
pushq $__USER32_CS /* pt_regs->cs */
pushq %rcx /* pt_regs->ip */
pushq %rax /* pt_regs->orig_ax */
pushq %rdi /* pt_regs->di */
pushq %rsi /* pt_regs->si */
pushq %rdx /* pt_regs->dx */
pushq %rbp /* pt_regs->cx */
movl %ebp,%ecx
pushq_cfi $-ENOSYS /* pt_regs->ax */
pushq $-ENOSYS /* pt_regs->ax */
sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
CFI_ADJUST_CFA_OFFSET 10*8
/*
* no need to do an access_ok check here because r8 has been
......@@ -380,8 +344,8 @@ ENTRY(ia32_cstar_target)
ASM_CLAC
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
CFI_REMEMBER_STATE
jnz cstar_tracesys
cstar_do_call:
/* 32bit syscall -> 64bit C ABI argument conversion */
movl %edi,%r8d /* arg5 */
......@@ -403,15 +367,12 @@ sysretl_from_sys_call:
andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
RESTORE_RSI_RDI_RDX
movl RIP(%rsp),%ecx
CFI_REGISTER rip,rcx
movl EFLAGS(%rsp),%r11d
/*CFI_REGISTER rflags,r11*/
xorq %r10,%r10
xorq %r9,%r9
xorq %r8,%r8
TRACE_IRQS_ON
movl RSP(%rsp),%esp
CFI_RESTORE rsp
/*
* 64bit->32bit SYSRET restores eip from ecx,
* eflags from r11 (but RF and VM bits are forced to 0),
......@@ -430,7 +391,6 @@ sysretl_from_sys_call:
#ifdef CONFIG_AUDITSYSCALL
cstar_auditsys:
CFI_RESTORE_STATE
movl %r9d,R9(%rsp) /* register to be clobbered by call */
auditsys_entry_common
movl R9(%rsp),%r9d /* reload 6th syscall arg */
......@@ -460,7 +420,6 @@ ia32_badarg:
ASM_CLAC
movq $-EFAULT,%rax
jmp ia32_sysret
CFI_ENDPROC
/*
* Emulated IA32 system calls via int 0x80.
......@@ -484,15 +443,6 @@ ia32_badarg:
*/
ENTRY(ia32_syscall)
CFI_STARTPROC32 simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA rsp,5*8
/*CFI_REL_OFFSET ss,4*8 */
CFI_REL_OFFSET rsp,3*8
/*CFI_REL_OFFSET rflags,2*8 */
/*CFI_REL_OFFSET cs,1*8 */
CFI_REL_OFFSET rip,0*8
/*
* Interrupts are off on entry.
* We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
......@@ -506,15 +456,14 @@ ENTRY(ia32_syscall)
movl %eax,%eax
/* Construct struct pt_regs on stack (iret frame is already on stack) */
pushq_cfi_reg rax /* pt_regs->orig_ax */
pushq_cfi_reg rdi /* pt_regs->di */
pushq_cfi_reg rsi /* pt_regs->si */
pushq_cfi_reg rdx /* pt_regs->dx */
pushq_cfi_reg rcx /* pt_regs->cx */
pushq_cfi $-ENOSYS /* pt_regs->ax */
pushq %rax /* pt_regs->orig_ax */
pushq %rdi /* pt_regs->di */
pushq %rsi /* pt_regs->si */
pushq %rdx /* pt_regs->dx */
pushq %rcx /* pt_regs->cx */
pushq $-ENOSYS /* pt_regs->ax */
cld
sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
CFI_ADJUST_CFA_OFFSET 10*8
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
......@@ -544,7 +493,6 @@ ia32_tracesys:
LOAD_ARGS32 /* reload args from stack in case ptrace changed it */
RESTORE_EXTRA_REGS
jmp ia32_do_call
CFI_ENDPROC
END(ia32_syscall)
.macro PTREGSCALL label, func
......@@ -554,8 +502,6 @@ GLOBAL(\label)
jmp ia32_ptregs_common
.endm
CFI_STARTPROC32
PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn
PTREGSCALL stub32_sigreturn, sys32_sigreturn
PTREGSCALL stub32_fork, sys_fork
......@@ -569,23 +515,8 @@ GLOBAL(stub32_clone)
ALIGN
ia32_ptregs_common:
CFI_ENDPROC
CFI_STARTPROC32 simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA rsp,SIZEOF_PTREGS
CFI_REL_OFFSET rax,RAX
CFI_REL_OFFSET rcx,RCX
CFI_REL_OFFSET rdx,RDX
CFI_REL_OFFSET rsi,RSI
CFI_REL_OFFSET rdi,RDI
CFI_REL_OFFSET rip,RIP
/* CFI_REL_OFFSET cs,CS*/
/* CFI_REL_OFFSET rflags,EFLAGS*/
CFI_REL_OFFSET rsp,RSP
/* CFI_REL_OFFSET ss,SS*/
SAVE_EXTRA_REGS 8
call *%rax
RESTORE_EXTRA_REGS 8
ret
CFI_ENDPROC
END(ia32_ptregs_common)
......@@ -46,8 +46,6 @@ For 32-bit we have the following conventions - kernel is built with
*/
#include <asm/dwarf2.h>
#ifdef CONFIG_X86_64
/*
......@@ -92,27 +90,26 @@ For 32-bit we have the following conventions - kernel is built with
.macro ALLOC_PT_GPREGS_ON_STACK addskip=0
subq $15*8+\addskip, %rsp
CFI_ADJUST_CFA_OFFSET 15*8+\addskip
.endm
.macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
.if \r11
movq_cfi r11, 6*8+\offset
movq %r11, 6*8+\offset(%rsp)
.endif
.if \r8910
movq_cfi r10, 7*8+\offset
movq_cfi r9, 8*8+\offset
movq_cfi r8, 9*8+\offset
movq %r10, 7*8+\offset(%rsp)
movq %r9, 8*8+\offset(%rsp)
movq %r8, 9*8+\offset(%rsp)
.endif
.if \rax
movq_cfi rax, 10*8+\offset
movq %rax, 10*8+\offset(%rsp)
.endif
.if \rcx
movq_cfi rcx, 11*8+\offset
movq %rcx, 11*8+\offset(%rsp)
.endif
movq_cfi rdx, 12*8+\offset
movq_cfi rsi, 13*8+\offset
movq_cfi rdi, 14*8+\offset
movq %rdx, 12*8+\offset(%rsp)
movq %rsi, 13*8+\offset(%rsp)
movq %rdi, 14*8+\offset(%rsp)
.endm
.macro SAVE_C_REGS offset=0
SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
......@@ -131,24 +128,24 @@ For 32-bit we have the following conventions - kernel is built with
.endm
.macro SAVE_EXTRA_REGS offset=0
movq_cfi r15, 0*8+\offset
movq_cfi r14, 1*8+\offset
movq_cfi r13, 2*8+\offset
movq_cfi r12, 3*8+\offset
movq_cfi rbp, 4*8+\offset
movq_cfi rbx, 5*8+\offset
movq %r15, 0*8+\offset(%rsp)
movq %r14, 1*8+\offset(%rsp)
movq %r13, 2*8+\offset(%rsp)
movq %r12, 3*8+\offset(%rsp)
movq %rbp, 4*8+\offset(%rsp)
movq %rbx, 5*8+\offset(%rsp)
.endm
.macro SAVE_EXTRA_REGS_RBP offset=0
movq_cfi rbp, 4*8+\offset
movq %rbp, 4*8+\offset(%rsp)
.endm
.macro RESTORE_EXTRA_REGS offset=0
movq_cfi_restore 0*8+\offset, r15
movq_cfi_restore 1*8+\offset, r14
movq_cfi_restore 2*8+\offset, r13
movq_cfi_restore 3*8+\offset, r12
movq_cfi_restore 4*8+\offset, rbp
movq_cfi_restore 5*8+\offset, rbx
movq 0*8+\offset(%rsp), %r15
movq 1*8+\offset(%rsp), %r14
movq 2*8+\offset(%rsp), %r13
movq 3*8+\offset(%rsp), %r12
movq 4*8+\offset(%rsp), %rbp
movq 5*8+\offset(%rsp), %rbx
.endm
.macro ZERO_EXTRA_REGS
......@@ -162,24 +159,24 @@ For 32-bit we have the following conventions - kernel is built with
.macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
.if \rstor_r11
movq_cfi_restore 6*8, r11
movq 6*8(%rsp), %r11
.endif
.if \rstor_r8910
movq_cfi_restore 7*8, r10
movq_cfi_restore 8*8, r9
movq_cfi_restore 9*8, r8
movq 7*8(%rsp), %r10
movq 8*8(%rsp), %r9
movq 9*8(%rsp), %r8
.endif
.if \rstor_rax
movq_cfi_restore 10*8, rax
movq 10*8(%rsp), %rax
.endif
.if \rstor_rcx
movq_cfi_restore 11*8, rcx
movq 11*8(%rsp), %rcx
.endif
.if \rstor_rdx
movq_cfi_restore 12*8, rdx
movq 12*8(%rsp), %rdx
.endif
movq_cfi_restore 13*8, rsi
movq_cfi_restore 14*8, rdi
movq 13*8(%rsp), %rsi
movq 14*8(%rsp), %rdi
.endm
.macro RESTORE_C_REGS
RESTORE_C_REGS_HELPER 1,1,1,1,1
......@@ -205,7 +202,6 @@ For 32-bit we have the following conventions - kernel is built with
.macro REMOVE_PT_GPREGS_FROM_STACK addskip=0
addq $15*8+\addskip, %rsp
CFI_ADJUST_CFA_OFFSET -(15*8+\addskip)
.endm
.macro icebp
......@@ -224,23 +220,23 @@ For 32-bit we have the following conventions - kernel is built with
*/
.macro SAVE_ALL
pushl_cfi_reg eax
pushl_cfi_reg ebp
pushl_cfi_reg edi
pushl_cfi_reg esi
pushl_cfi_reg edx
pushl_cfi_reg ecx
pushl_cfi_reg ebx
pushl %eax
pushl %ebp
pushl %edi
pushl %esi
pushl %edx
pushl %ecx
pushl %ebx
.endm
.macro RESTORE_ALL
popl_cfi_reg ebx
popl_cfi_reg ecx
popl_cfi_reg edx
popl_cfi_reg esi
popl_cfi_reg edi
popl_cfi_reg ebp
popl_cfi_reg eax
popl %ebx
popl %ecx
popl %edx
popl %esi
popl %edi
popl %ebp
popl %eax
.endm
#endif /* CONFIG_X86_64 */
......
#ifndef _ASM_X86_DWARF2_H
#define _ASM_X86_DWARF2_H
#ifndef __ASSEMBLY__
#warning "asm/dwarf2.h should be only included in pure assembly files"
#endif
/*
* Macros for dwarf2 CFI unwind table entries.
* See "as.info" for details on these pseudo ops. Unfortunately
* they are only supported in very new binutils, so define them
* away for older version.
*/
#ifdef CONFIG_AS_CFI
#define CFI_STARTPROC .cfi_startproc
#define CFI_ENDPROC .cfi_endproc
#define CFI_DEF_CFA .cfi_def_cfa
#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
#define CFI_OFFSET .cfi_offset
#define CFI_REL_OFFSET .cfi_rel_offset
#define CFI_REGISTER .cfi_register
#define CFI_RESTORE .cfi_restore
#define CFI_REMEMBER_STATE .cfi_remember_state
#define CFI_RESTORE_STATE .cfi_restore_state
#define CFI_UNDEFINED .cfi_undefined
#define CFI_ESCAPE .cfi_escape
#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
#define CFI_SIGNAL_FRAME .cfi_signal_frame
#else
#define CFI_SIGNAL_FRAME
#endif
#if defined(CONFIG_AS_CFI_SECTIONS) && defined(__ASSEMBLY__)
/*
* Emit CFI data in .debug_frame sections, not .eh_frame sections.
* The latter we currently just discard since we don't do DWARF
* unwinding at runtime. So only the offline DWARF information is
* useful to anyone. Note we should not use this directive if this
* file is used in the vDSO assembly, or if vmlinux.lds.S gets
* changed so it doesn't discard .eh_frame.
*/
.cfi_sections .debug_frame
#endif
#else
/*
* Due to the structure of pre-exisiting code, don't use assembler line
* comment character # to ignore the arguments. Instead, use a dummy macro.
*/
.macro cfi_ignore a=0, b=0, c=0, d=0
.endm
#define CFI_STARTPROC cfi_ignore
#define CFI_ENDPROC cfi_ignore
#define CFI_DEF_CFA cfi_ignore
#define CFI_DEF_CFA_REGISTER cfi_ignore
#define CFI_DEF_CFA_OFFSET cfi_ignore
#define CFI_ADJUST_CFA_OFFSET cfi_ignore
#define CFI_OFFSET cfi_ignore
#define CFI_REL_OFFSET cfi_ignore
#define CFI_REGISTER cfi_ignore
#define CFI_RESTORE cfi_ignore
#define CFI_REMEMBER_STATE cfi_ignore
#define CFI_RESTORE_STATE cfi_ignore
#define CFI_UNDEFINED cfi_ignore
#define CFI_ESCAPE cfi_ignore
#define CFI_SIGNAL_FRAME cfi_ignore
#endif
/*
* An attempt to make CFI annotations more or less
* correct and shorter. It is implied that you know
* what you're doing if you use them.
*/
#ifdef __ASSEMBLY__
#ifdef CONFIG_X86_64
.macro pushq_cfi reg
pushq \reg
CFI_ADJUST_CFA_OFFSET 8
.endm
.macro pushq_cfi_reg reg
pushq %\reg
CFI_ADJUST_CFA_OFFSET 8
CFI_REL_OFFSET \reg, 0
.endm
.macro popq_cfi reg
popq \reg
CFI_ADJUST_CFA_OFFSET -8
.endm
.macro popq_cfi_reg reg
popq %\reg
CFI_ADJUST_CFA_OFFSET -8
CFI_RESTORE \reg
.endm
.macro pushfq_cfi
pushfq
CFI_ADJUST_CFA_OFFSET 8
.endm
.macro popfq_cfi
popfq
CFI_ADJUST_CFA_OFFSET -8
.endm
.macro movq_cfi reg offset=0
movq %\reg, \offset(%rsp)
CFI_REL_OFFSET \reg, \offset
.endm
.macro movq_cfi_restore offset reg
movq \offset(%rsp), %\reg
CFI_RESTORE \reg
.endm
#else /*!CONFIG_X86_64*/
.macro pushl_cfi reg
pushl \reg
CFI_ADJUST_CFA_OFFSET 4
.endm
.macro pushl_cfi_reg reg
pushl %\reg
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET \reg, 0
.endm
.macro popl_cfi reg
popl \reg
CFI_ADJUST_CFA_OFFSET -4
.endm
.macro popl_cfi_reg reg
popl %\reg
CFI_ADJUST_CFA_OFFSET -4
CFI_RESTORE \reg
.endm
.macro pushfl_cfi
pushfl
CFI_ADJUST_CFA_OFFSET 4
.endm
.macro popfl_cfi
popfl
CFI_ADJUST_CFA_OFFSET -4
.endm
.macro movl_cfi reg offset=0
movl %\reg, \offset(%esp)
CFI_REL_OFFSET \reg, \offset
.endm
.macro movl_cfi_restore offset reg
movl \offset(%esp), %\reg
CFI_RESTORE \reg
.endm
#endif /*!CONFIG_X86_64*/
#endif /*__ASSEMBLY__*/
#endif /* _ASM_X86_DWARF2_H */
#ifdef __ASSEMBLY__
#include <asm/asm.h>
#include <asm/dwarf2.h>
/* The annotation hides the frame from the unwinder and makes it look
like a ordinary ebp save/restore. This avoids some special cases for
frame pointer later */
#ifdef CONFIG_FRAME_POINTER
.macro FRAME
__ASM_SIZE(push,_cfi) %__ASM_REG(bp)
CFI_REL_OFFSET __ASM_REG(bp), 0
__ASM_SIZE(push,) %__ASM_REG(bp)
__ASM_SIZE(mov) %__ASM_REG(sp), %__ASM_REG(bp)
.endm
.macro ENDFRAME
__ASM_SIZE(pop,_cfi) %__ASM_REG(bp)
CFI_RESTORE __ASM_REG(bp)
__ASM_SIZE(pop,) %__ASM_REG(bp)
.endm
#else
.macro FRAME
......
......@@ -50,7 +50,6 @@
#include <asm/smp.h>
#include <asm/page_types.h>
#include <asm/percpu.h>
#include <asm/dwarf2.h>
#include <asm/processor-flags.h>
#include <asm/ftrace.h>
#include <asm/irq_vectors.h>
......@@ -113,11 +112,10 @@
/* unfortunately push/pop can't be no-op */
.macro PUSH_GS
pushl_cfi $0
pushl $0
.endm
.macro POP_GS pop=0
addl $(4 + \pop), %esp
CFI_ADJUST_CFA_OFFSET -(4 + \pop)
.endm
.macro POP_GS_EX
.endm
......@@ -137,16 +135,13 @@
#else /* CONFIG_X86_32_LAZY_GS */
.macro PUSH_GS
pushl_cfi %gs
/*CFI_REL_OFFSET gs, 0*/
pushl %gs
.endm
.macro POP_GS pop=0
98: popl_cfi %gs
/*CFI_RESTORE gs*/
98: popl %gs
.if \pop <> 0
add $\pop, %esp
CFI_ADJUST_CFA_OFFSET -\pop
.endif
.endm
.macro POP_GS_EX
......@@ -170,11 +165,9 @@
.macro GS_TO_REG reg
movl %gs, \reg
/*CFI_REGISTER gs, \reg*/
.endm
.macro REG_TO_PTGS reg
movl \reg, PT_GS(%esp)
/*CFI_REL_OFFSET gs, PT_GS*/
.endm
.macro SET_KERNEL_GS reg
movl $(__KERNEL_STACK_CANARY), \reg
......@@ -186,26 +179,16 @@
.macro SAVE_ALL
cld
PUSH_GS
pushl_cfi %fs
/*CFI_REL_OFFSET fs, 0;*/
pushl_cfi %es
/*CFI_REL_OFFSET es, 0;*/
pushl_cfi %ds
/*CFI_REL_OFFSET ds, 0;*/
pushl_cfi %eax
CFI_REL_OFFSET eax, 0
pushl_cfi %ebp
CFI_REL_OFFSET ebp, 0
pushl_cfi %edi
CFI_REL_OFFSET edi, 0
pushl_cfi %esi
CFI_REL_OFFSET esi, 0
pushl_cfi %edx
CFI_REL_OFFSET edx, 0
pushl_cfi %ecx
CFI_REL_OFFSET ecx, 0
pushl_cfi %ebx
CFI_REL_OFFSET ebx, 0
pushl %fs
pushl %es
pushl %ds
pushl %eax
pushl %ebp
pushl %edi
pushl %esi
pushl %edx
pushl %ecx
pushl %ebx
movl $(__USER_DS), %edx
movl %edx, %ds
movl %edx, %es
......@@ -215,30 +198,20 @@
.endm
.macro RESTORE_INT_REGS
popl_cfi %ebx
CFI_RESTORE ebx
popl_cfi %ecx
CFI_RESTORE ecx
popl_cfi %edx
CFI_RESTORE edx
popl_cfi %esi
CFI_RESTORE esi
popl_cfi %edi
CFI_RESTORE edi
popl_cfi %ebp
CFI_RESTORE ebp
popl_cfi %eax
CFI_RESTORE eax
popl %ebx
popl %ecx
popl %edx
popl %esi
popl %edi
popl %ebp
popl %eax
.endm
.macro RESTORE_REGS pop=0
RESTORE_INT_REGS
1: popl_cfi %ds
/*CFI_RESTORE ds;*/
2: popl_cfi %es
/*CFI_RESTORE es;*/
3: popl_cfi %fs
/*CFI_RESTORE fs;*/
1: popl %ds
2: popl %es
3: popl %fs
POP_GS \pop
.pushsection .fixup, "ax"
4: movl $0, (%esp)
......@@ -254,64 +227,27 @@
POP_GS_EX
.endm
.macro RING0_INT_FRAME
CFI_STARTPROC simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA esp, 3*4
/*CFI_OFFSET cs, -2*4;*/
CFI_OFFSET eip, -3*4
.endm
.macro RING0_EC_FRAME
CFI_STARTPROC simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA esp, 4*4
/*CFI_OFFSET cs, -2*4;*/
CFI_OFFSET eip, -3*4
.endm
.macro RING0_PTREGS_FRAME
CFI_STARTPROC simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
/*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
CFI_OFFSET eip, PT_EIP-PT_OLDESP
/*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
/*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
CFI_OFFSET eax, PT_EAX-PT_OLDESP
CFI_OFFSET ebp, PT_EBP-PT_OLDESP
CFI_OFFSET edi, PT_EDI-PT_OLDESP
CFI_OFFSET esi, PT_ESI-PT_OLDESP
CFI_OFFSET edx, PT_EDX-PT_OLDESP
CFI_OFFSET ecx, PT_ECX-PT_OLDESP
CFI_OFFSET ebx, PT_EBX-PT_OLDESP
.endm
ENTRY(ret_from_fork)
CFI_STARTPROC
pushl_cfi %eax
pushl %eax
call schedule_tail
GET_THREAD_INFO(%ebp)
popl_cfi %eax
pushl_cfi $0x0202 # Reset kernel eflags
popfl_cfi
popl %eax
pushl $0x0202 # Reset kernel eflags
popfl
jmp syscall_exit
CFI_ENDPROC
END(ret_from_fork)
ENTRY(ret_from_kernel_thread)
CFI_STARTPROC
pushl_cfi %eax
pushl %eax
call schedule_tail
GET_THREAD_INFO(%ebp)
popl_cfi %eax
pushl_cfi $0x0202 # Reset kernel eflags
popfl_cfi
popl %eax
pushl $0x0202 # Reset kernel eflags
popfl
movl PT_EBP(%esp),%eax
call *PT_EBX(%esp)
movl $0,PT_EAX(%esp)
jmp syscall_exit
CFI_ENDPROC
ENDPROC(ret_from_kernel_thread)
/*
......@@ -323,7 +259,6 @@ ENDPROC(ret_from_kernel_thread)
# userspace resumption stub bypassing syscall exit tracing
ALIGN
RING0_PTREGS_FRAME
ret_from_exception:
preempt_stop(CLBR_ANY)
ret_from_intr:
......@@ -367,17 +302,12 @@ need_resched:
jmp need_resched
END(resume_kernel)
#endif
CFI_ENDPROC
/* SYSENTER_RETURN points to after the "sysenter" instruction in
the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
# sysenter call handler stub
ENTRY(ia32_sysenter_target)
CFI_STARTPROC simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA esp, 0
CFI_REGISTER esp, ebp
movl TSS_sysenter_sp0(%esp),%esp
sysenter_past_esp:
/*
......@@ -385,14 +315,11 @@ sysenter_past_esp:
* enough kernel state to call TRACE_IRQS_OFF can be called - but
* we immediately enable interrupts at that point anyway.
*/
pushl_cfi $__USER_DS
/*CFI_REL_OFFSET ss, 0*/
pushl_cfi %ebp
CFI_REL_OFFSET esp, 0
pushfl_cfi
pushl $__USER_DS
pushl %ebp
pushfl
orl $X86_EFLAGS_IF, (%esp)
pushl_cfi $__USER_CS
/*CFI_REL_OFFSET cs, 0*/
pushl $__USER_CS
/*
* Push current_thread_info()->sysenter_return to the stack.
* A tiny bit of offset fixup is necessary: TI_sysenter_return
......@@ -401,10 +328,9 @@ sysenter_past_esp:
* TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack;
* and THREAD_SIZE takes us to the bottom.
*/
pushl_cfi ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
CFI_REL_OFFSET eip, 0
pushl ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
pushl_cfi %eax
pushl %eax
SAVE_ALL
ENABLE_INTERRUPTS(CLBR_NONE)
......@@ -453,11 +379,11 @@ sysenter_audit:
/* movl PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */
movl PT_EBX(%esp), %edx /* ebx/a0: 2nd arg to audit */
/* movl PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */
pushl_cfi PT_ESI(%esp) /* a3: 5th arg */
pushl_cfi PT_EDX+4(%esp) /* a2: 4th arg */
pushl PT_ESI(%esp) /* a3: 5th arg */
pushl PT_EDX+4(%esp) /* a2: 4th arg */
call __audit_syscall_entry
popl_cfi %ecx /* get that remapped edx off the stack */
popl_cfi %ecx /* get that remapped esi off the stack */
popl %ecx /* get that remapped edx off the stack */
popl %ecx /* get that remapped esi off the stack */
movl PT_EAX(%esp),%eax /* reload syscall number */
jmp sysenter_do_call
......@@ -480,7 +406,6 @@ sysexit_audit:
jmp sysenter_exit
#endif
CFI_ENDPROC
.pushsection .fixup,"ax"
2: movl $0,PT_FS(%esp)
jmp 1b
......@@ -491,9 +416,8 @@ ENDPROC(ia32_sysenter_target)
# system call handler stub
ENTRY(system_call)
RING0_INT_FRAME # can't unwind into user space anyway
ASM_CLAC
pushl_cfi %eax # save orig_eax
pushl %eax # save orig_eax
SAVE_ALL
GET_THREAD_INFO(%ebp)
# system call tracing in operation / emulation
......@@ -527,7 +451,6 @@ restore_all_notrace:
movb PT_CS(%esp), %al
andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
CFI_REMEMBER_STATE
je ldt_ss # returning to user-space with LDT SS
#endif
restore_nocheck:
......@@ -543,7 +466,6 @@ ENTRY(iret_exc)
_ASM_EXTABLE(irq_return,iret_exc)
#ifdef CONFIG_X86_ESPFIX32
CFI_RESTORE_STATE
ldt_ss:
#ifdef CONFIG_PARAVIRT
/*
......@@ -577,22 +499,19 @@ ldt_ss:
shr $16, %edx
mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
pushl_cfi $__ESPFIX_SS
pushl_cfi %eax /* new kernel esp */
pushl $__ESPFIX_SS
pushl %eax /* new kernel esp */
/* Disable interrupts, but do not irqtrace this section: we
* will soon execute iret and the tracer was already set to
* the irqstate after the iret */
DISABLE_INTERRUPTS(CLBR_EAX)
lss (%esp), %esp /* switch to espfix segment */
CFI_ADJUST_CFA_OFFSET -8
jmp restore_nocheck
#endif
CFI_ENDPROC
ENDPROC(system_call)
# perform work that needs to be done immediately before resumption
ALIGN
RING0_PTREGS_FRAME # can't unwind into user space anyway
work_pending:
testb $_TIF_NEED_RESCHED, %cl
jz work_notifysig
......@@ -634,9 +553,9 @@ work_notifysig: # deal with pending signals and
#ifdef CONFIG_VM86
ALIGN
work_notifysig_v86:
pushl_cfi %ecx # save ti_flags for do_notify_resume
pushl %ecx # save ti_flags for do_notify_resume
call save_v86_state # %eax contains pt_regs pointer
popl_cfi %ecx
popl %ecx
movl %eax, %esp
jmp 1b
#endif
......@@ -666,9 +585,7 @@ syscall_exit_work:
call syscall_trace_leave
jmp resume_userspace
END(syscall_exit_work)
CFI_ENDPROC
RING0_INT_FRAME # can't unwind into user space anyway
syscall_fault:
ASM_CLAC
GET_THREAD_INFO(%ebp)
......@@ -685,7 +602,6 @@ sysenter_badsys:
movl $-ENOSYS,%eax
jmp sysenter_after_call
END(sysenter_badsys)
CFI_ENDPROC
.macro FIXUP_ESPFIX_STACK
/*
......@@ -701,10 +617,9 @@ END(sysenter_badsys)
mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
shl $16, %eax
addl %esp, %eax /* the adjusted stack pointer */
pushl_cfi $__KERNEL_DS
pushl_cfi %eax
pushl $__KERNEL_DS
pushl %eax
lss (%esp), %esp /* switch to the normal stack segment */
CFI_ADJUST_CFA_OFFSET -8
#endif
.endm
.macro UNWIND_ESPFIX_STACK
......@@ -728,13 +643,11 @@ END(sysenter_badsys)
*/
.align 8
ENTRY(irq_entries_start)
RING0_INT_FRAME
vector=FIRST_EXTERNAL_VECTOR
.rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
pushl_cfi $(~vector+0x80) /* Note: always in signed byte range */
pushl $(~vector+0x80) /* Note: always in signed byte range */
vector=vector+1
jmp common_interrupt
CFI_ADJUST_CFA_OFFSET -4
.align 8
.endr
END(irq_entries_start)
......@@ -753,19 +666,16 @@ common_interrupt:
call do_IRQ
jmp ret_from_intr
ENDPROC(common_interrupt)
CFI_ENDPROC
#define BUILD_INTERRUPT3(name, nr, fn) \
ENTRY(name) \
RING0_INT_FRAME; \
ASM_CLAC; \
pushl_cfi $~(nr); \
pushl $~(nr); \
SAVE_ALL; \
TRACE_IRQS_OFF \
movl %esp,%eax; \
call fn; \
jmp ret_from_intr; \
CFI_ENDPROC; \
ENDPROC(name)
......@@ -784,37 +694,31 @@ ENDPROC(name)
#include <asm/entry_arch.h>
ENTRY(coprocessor_error)
RING0_INT_FRAME
ASM_CLAC
pushl_cfi $0
pushl_cfi $do_coprocessor_error
pushl $0
pushl $do_coprocessor_error
jmp error_code
CFI_ENDPROC
END(coprocessor_error)
ENTRY(simd_coprocessor_error)
RING0_INT_FRAME
ASM_CLAC
pushl_cfi $0
pushl $0
#ifdef CONFIG_X86_INVD_BUG
/* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
ALTERNATIVE "pushl_cfi $do_general_protection", \
ALTERNATIVE "pushl $do_general_protection", \
"pushl $do_simd_coprocessor_error", \
X86_FEATURE_XMM
#else
pushl_cfi $do_simd_coprocessor_error
pushl $do_simd_coprocessor_error
#endif
jmp error_code
CFI_ENDPROC
END(simd_coprocessor_error)
ENTRY(device_not_available)
RING0_INT_FRAME
ASM_CLAC
pushl_cfi $-1 # mark this as an int
pushl_cfi $do_device_not_available
pushl $-1 # mark this as an int
pushl $do_device_not_available
jmp error_code
CFI_ENDPROC
END(device_not_available)
#ifdef CONFIG_PARAVIRT
......@@ -830,115 +734,89 @@ END(native_irq_enable_sysexit)
#endif
ENTRY(overflow)
RING0_INT_FRAME
ASM_CLAC
pushl_cfi $0
pushl_cfi $do_overflow
pushl $0
pushl $do_overflow
jmp error_code
CFI_ENDPROC
END(overflow)
ENTRY(bounds)
RING0_INT_FRAME
ASM_CLAC
pushl_cfi $0
pushl_cfi $do_bounds
pushl $0
pushl $do_bounds
jmp error_code
CFI_ENDPROC
END(bounds)
ENTRY(invalid_op)
RING0_INT_FRAME
ASM_CLAC
pushl_cfi $0
pushl_cfi $do_invalid_op
pushl $0
pushl $do_invalid_op
jmp error_code
CFI_ENDPROC
END(invalid_op)
ENTRY(coprocessor_segment_overrun)
RING0_INT_FRAME
ASM_CLAC
pushl_cfi $0
pushl_cfi $do_coprocessor_segment_overrun
pushl $0
pushl $do_coprocessor_segment_overrun
jmp error_code
CFI_ENDPROC
END(coprocessor_segment_overrun)
ENTRY(invalid_TSS)
RING0_EC_FRAME
ASM_CLAC
pushl_cfi $do_invalid_TSS
pushl $do_invalid_TSS
jmp error_code
CFI_ENDPROC
END(invalid_TSS)
ENTRY(segment_not_present)
RING0_EC_FRAME
ASM_CLAC
pushl_cfi $do_segment_not_present
pushl $do_segment_not_present
jmp error_code
CFI_ENDPROC
END(segment_not_present)
ENTRY(stack_segment)
RING0_EC_FRAME
ASM_CLAC
pushl_cfi $do_stack_segment
pushl $do_stack_segment
jmp error_code
CFI_ENDPROC
END(stack_segment)
ENTRY(alignment_check)
RING0_EC_FRAME
ASM_CLAC
pushl_cfi $do_alignment_check
pushl $do_alignment_check
jmp error_code
CFI_ENDPROC
END(alignment_check)
ENTRY(divide_error)
RING0_INT_FRAME
ASM_CLAC
pushl_cfi $0 # no error code
pushl_cfi $do_divide_error
pushl $0 # no error code
pushl $do_divide_error
jmp error_code
CFI_ENDPROC
END(divide_error)
#ifdef CONFIG_X86_MCE
ENTRY(machine_check)
RING0_INT_FRAME
ASM_CLAC
pushl_cfi $0
pushl_cfi machine_check_vector
pushl $0
pushl machine_check_vector
jmp error_code
CFI_ENDPROC
END(machine_check)
#endif
ENTRY(spurious_interrupt_bug)
RING0_INT_FRAME
ASM_CLAC
pushl_cfi $0
pushl_cfi $do_spurious_interrupt_bug
pushl $0
pushl $do_spurious_interrupt_bug
jmp error_code
CFI_ENDPROC
END(spurious_interrupt_bug)
#ifdef CONFIG_XEN
/* Xen doesn't set %esp to be precisely what the normal sysenter
entrypoint expects, so fix it up before using the normal path. */
ENTRY(xen_sysenter_target)
RING0_INT_FRAME
addl $5*4, %esp /* remove xen-provided frame */
CFI_ADJUST_CFA_OFFSET -5*4
jmp sysenter_past_esp
CFI_ENDPROC
ENTRY(xen_hypervisor_callback)
CFI_STARTPROC
pushl_cfi $-1 /* orig_ax = -1 => not a system call */
pushl $-1 /* orig_ax = -1 => not a system call */
SAVE_ALL
TRACE_IRQS_OFF
......@@ -962,7 +840,6 @@ ENTRY(xen_do_upcall)
call xen_maybe_preempt_hcall
#endif
jmp ret_from_intr
CFI_ENDPROC
ENDPROC(xen_hypervisor_callback)
# Hypervisor uses this for application faults while it executes.
......@@ -976,8 +853,7 @@ ENDPROC(xen_hypervisor_callback)
# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
# We distinguish between categories by maintaining a status value in EAX.
ENTRY(xen_failsafe_callback)
CFI_STARTPROC
pushl_cfi %eax
pushl %eax
movl $1,%eax
1: mov 4(%esp),%ds
2: mov 8(%esp),%es
......@@ -986,15 +862,13 @@ ENTRY(xen_failsafe_callback)
/* EAX == 0 => Category 1 (Bad segment)
EAX != 0 => Category 2 (Bad IRET) */
testl %eax,%eax
popl_cfi %eax
popl %eax
lea 16(%esp),%esp
CFI_ADJUST_CFA_OFFSET -16
jz 5f
jmp iret_exc
5: pushl_cfi $-1 /* orig_ax = -1 => not a system call */
5: pushl $-1 /* orig_ax = -1 => not a system call */
SAVE_ALL
jmp ret_from_exception
CFI_ENDPROC
.section .fixup,"ax"
6: xorl %eax,%eax
......@@ -1195,34 +1069,28 @@ return_to_handler:
#ifdef CONFIG_TRACING
ENTRY(trace_page_fault)
RING0_EC_FRAME
ASM_CLAC
pushl_cfi $trace_do_page_fault
pushl $trace_do_page_fault
jmp error_code
CFI_ENDPROC
END(trace_page_fault)
#endif
ENTRY(page_fault)
RING0_EC_FRAME
ASM_CLAC
pushl_cfi $do_page_fault
pushl $do_page_fault
ALIGN
error_code:
/* the function address is in %gs's slot on the stack */
pushl_cfi %fs
/*CFI_REL_OFFSET fs, 0*/
pushl_cfi %es
/*CFI_REL_OFFSET es, 0*/
pushl_cfi %ds
/*CFI_REL_OFFSET ds, 0*/
pushl_cfi_reg eax
pushl_cfi_reg ebp
pushl_cfi_reg edi
pushl_cfi_reg esi
pushl_cfi_reg edx
pushl_cfi_reg ecx
pushl_cfi_reg ebx
pushl %fs
pushl %es
pushl %ds
pushl %eax
pushl %ebp
pushl %edi
pushl %esi
pushl %edx
pushl %ecx
pushl %ebx
cld
movl $(__KERNEL_PERCPU), %ecx
movl %ecx, %fs
......@@ -1240,7 +1108,6 @@ error_code:
movl %esp,%eax # pt_regs pointer
call *%edi
jmp ret_from_exception
CFI_ENDPROC
END(page_fault)
/*
......@@ -1261,29 +1128,24 @@ END(page_fault)
jne \ok
\label:
movl TSS_sysenter_sp0 + \offset(%esp), %esp
CFI_DEF_CFA esp, 0
CFI_UNDEFINED eip
pushfl_cfi
pushl_cfi $__KERNEL_CS
pushl_cfi $sysenter_past_esp
CFI_REL_OFFSET eip, 0
pushfl
pushl $__KERNEL_CS
pushl $sysenter_past_esp
.endm
ENTRY(debug)
RING0_INT_FRAME
ASM_CLAC
cmpl $ia32_sysenter_target,(%esp)
jne debug_stack_correct
FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
debug_stack_correct:
pushl_cfi $-1 # mark this as an int
pushl $-1 # mark this as an int
SAVE_ALL
TRACE_IRQS_OFF
xorl %edx,%edx # error code 0
movl %esp,%eax # pt_regs pointer
call do_debug
jmp ret_from_exception
CFI_ENDPROC
END(debug)
/*
......@@ -1295,45 +1157,40 @@ END(debug)
* fault happened on the sysenter path.
*/
ENTRY(nmi)
RING0_INT_FRAME
ASM_CLAC
#ifdef CONFIG_X86_ESPFIX32
pushl_cfi %eax
pushl %eax
movl %ss, %eax
cmpw $__ESPFIX_SS, %ax
popl_cfi %eax
popl %eax
je nmi_espfix_stack
#endif
cmpl $ia32_sysenter_target,(%esp)
je nmi_stack_fixup
pushl_cfi %eax
pushl %eax
movl %esp,%eax
/* Do not access memory above the end of our stack page,
* it might not exist.
*/
andl $(THREAD_SIZE-1),%eax
cmpl $(THREAD_SIZE-20),%eax
popl_cfi %eax
popl %eax
jae nmi_stack_correct
cmpl $ia32_sysenter_target,12(%esp)
je nmi_debug_stack_check
nmi_stack_correct:
/* We have a RING0_INT_FRAME here */
pushl_cfi %eax
pushl %eax
SAVE_ALL
xorl %edx,%edx # zero error code
movl %esp,%eax # pt_regs pointer
call do_nmi
jmp restore_all_notrace
CFI_ENDPROC
nmi_stack_fixup:
RING0_INT_FRAME
FIX_STACK 12, nmi_stack_correct, 1
jmp nmi_stack_correct
nmi_debug_stack_check:
/* We have a RING0_INT_FRAME here */
cmpw $__KERNEL_CS,16(%esp)
jne nmi_stack_correct
cmpl $debug,(%esp)
......@@ -1345,57 +1202,48 @@ nmi_debug_stack_check:
#ifdef CONFIG_X86_ESPFIX32
nmi_espfix_stack:
/* We have a RING0_INT_FRAME here.
*
/*
* create the pointer to lss back
*/
pushl_cfi %ss
pushl_cfi %esp
pushl %ss
pushl %esp
addl $4, (%esp)
/* copy the iret frame of 12 bytes */
.rept 3
pushl_cfi 16(%esp)
pushl 16(%esp)
.endr
pushl_cfi %eax
pushl %eax
SAVE_ALL
FIXUP_ESPFIX_STACK # %eax == %esp
xorl %edx,%edx # zero error code
call do_nmi
RESTORE_REGS
lss 12+4(%esp), %esp # back to espfix stack
CFI_ADJUST_CFA_OFFSET -24
jmp irq_return
#endif
CFI_ENDPROC
END(nmi)
ENTRY(int3)
RING0_INT_FRAME
ASM_CLAC
pushl_cfi $-1 # mark this as an int
pushl $-1 # mark this as an int
SAVE_ALL
TRACE_IRQS_OFF
xorl %edx,%edx # zero error code
movl %esp,%eax # pt_regs pointer
call do_int3
jmp ret_from_exception
CFI_ENDPROC
END(int3)
ENTRY(general_protection)
RING0_EC_FRAME
pushl_cfi $do_general_protection
pushl $do_general_protection
jmp error_code
CFI_ENDPROC
END(general_protection)
#ifdef CONFIG_KVM_GUEST
ENTRY(async_page_fault)
RING0_EC_FRAME
ASM_CLAC
pushl_cfi $do_async_page_fault
pushl $do_async_page_fault
jmp error_code
CFI_ENDPROC
END(async_page_fault)
#endif
......@@ -19,8 +19,6 @@
* at the top of the kernel process stack.
*
* Some macro usage:
* - CFI macros are used to generate dwarf2 unwind information for better
* backtraces. They don't change any code.
* - ENTRY/END Define functions in the symbol table.
* - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
* - idtentry - Define exception entry points.
......@@ -30,7 +28,6 @@
#include <asm/segment.h>
#include <asm/cache.h>
#include <asm/errno.h>
#include <asm/dwarf2.h>
#include <asm/calling.h>
#include <asm/asm-offsets.h>
#include <asm/msr.h>
......@@ -112,61 +109,6 @@ ENDPROC(native_usergs_sysret64)
# define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ
#endif
/*
* empty frame
*/
.macro EMPTY_FRAME start=1 offset=0
.if \start
CFI_STARTPROC simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA rsp,8+\offset
.else
CFI_DEF_CFA_OFFSET 8+\offset
.endif
.endm
/*
* initial frame state for interrupts (and exceptions without error code)
*/
.macro INTR_FRAME start=1 offset=0
EMPTY_FRAME \start, 5*8+\offset
/*CFI_REL_OFFSET ss, 4*8+\offset*/
CFI_REL_OFFSET rsp, 3*8+\offset
/*CFI_REL_OFFSET rflags, 2*8+\offset*/
/*CFI_REL_OFFSET cs, 1*8+\offset*/
CFI_REL_OFFSET rip, 0*8+\offset
.endm
/*
* initial frame state for exceptions with error code (and interrupts
* with vector already pushed)
*/
.macro XCPT_FRAME start=1 offset=0
INTR_FRAME \start, 1*8+\offset
.endm
/*
* frame that enables passing a complete pt_regs to a C function.
*/
.macro DEFAULT_FRAME start=1 offset=0
XCPT_FRAME \start, ORIG_RAX+\offset
CFI_REL_OFFSET rdi, RDI+\offset
CFI_REL_OFFSET rsi, RSI+\offset
CFI_REL_OFFSET rdx, RDX+\offset
CFI_REL_OFFSET rcx, RCX+\offset
CFI_REL_OFFSET rax, RAX+\offset
CFI_REL_OFFSET r8, R8+\offset
CFI_REL_OFFSET r9, R9+\offset
CFI_REL_OFFSET r10, R10+\offset
CFI_REL_OFFSET r11, R11+\offset
CFI_REL_OFFSET rbx, RBX+\offset
CFI_REL_OFFSET rbp, RBP+\offset
CFI_REL_OFFSET r12, R12+\offset
CFI_REL_OFFSET r13, R13+\offset
CFI_REL_OFFSET r14, R14+\offset
CFI_REL_OFFSET r15, R15+\offset
.endm
/*
* 64bit SYSCALL instruction entry. Up to 6 arguments in registers.
*
......@@ -196,12 +138,6 @@ ENDPROC(native_usergs_sysret64)
*/
ENTRY(system_call)
CFI_STARTPROC simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA rsp,0
CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/
/*
* Interrupts are off on entry.
* We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
......@@ -219,8 +155,8 @@ GLOBAL(system_call_after_swapgs)
movq PER_CPU_VAR(cpu_current_top_of_stack),%rsp
/* Construct struct pt_regs on stack */
pushq_cfi $__USER_DS /* pt_regs->ss */
pushq_cfi PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */
pushq $__USER_DS /* pt_regs->ss */
pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */
/*
* Re-enable interrupts.
* We use 'rsp_scratch' as a scratch space, hence irq-off block above
......@@ -229,22 +165,20 @@ GLOBAL(system_call_after_swapgs)
* with using rsp_scratch:
*/
ENABLE_INTERRUPTS(CLBR_NONE)
pushq_cfi %r11 /* pt_regs->flags */
pushq_cfi $__USER_CS /* pt_regs->cs */
pushq_cfi %rcx /* pt_regs->ip */
CFI_REL_OFFSET rip,0
pushq_cfi_reg rax /* pt_regs->orig_ax */
pushq_cfi_reg rdi /* pt_regs->di */
pushq_cfi_reg rsi /* pt_regs->si */
pushq_cfi_reg rdx /* pt_regs->dx */
pushq_cfi_reg rcx /* pt_regs->cx */
pushq_cfi $-ENOSYS /* pt_regs->ax */
pushq_cfi_reg r8 /* pt_regs->r8 */
pushq_cfi_reg r9 /* pt_regs->r9 */
pushq_cfi_reg r10 /* pt_regs->r10 */
pushq_cfi_reg r11 /* pt_regs->r11 */
pushq %r11 /* pt_regs->flags */
pushq $__USER_CS /* pt_regs->cs */
pushq %rcx /* pt_regs->ip */
pushq %rax /* pt_regs->orig_ax */
pushq %rdi /* pt_regs->di */
pushq %rsi /* pt_regs->si */
pushq %rdx /* pt_regs->dx */
pushq %rcx /* pt_regs->cx */
pushq $-ENOSYS /* pt_regs->ax */
pushq %r8 /* pt_regs->r8 */
pushq %r9 /* pt_regs->r9 */
pushq %r10 /* pt_regs->r10 */
pushq %r11 /* pt_regs->r11 */
sub $(6*8),%rsp /* pt_regs->bp,bx,r12-15 not saved */
CFI_ADJUST_CFA_OFFSET 6*8
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jnz tracesys
......@@ -282,13 +216,9 @@ system_call_fastpath:
testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jnz int_ret_from_sys_call_irqs_off /* Go to the slow path */
CFI_REMEMBER_STATE
RESTORE_C_REGS_EXCEPT_RCX_R11
movq RIP(%rsp),%rcx
CFI_REGISTER rip,rcx
movq EFLAGS(%rsp),%r11
/*CFI_REGISTER rflags,r11*/
movq RSP(%rsp),%rsp
/*
* 64bit SYSRET restores rip from rcx,
......@@ -307,8 +237,6 @@ system_call_fastpath:
*/
USERGS_SYSRET64
CFI_RESTORE_STATE
/* Do syscall entry tracing */
tracesys:
movq %rsp, %rdi
......@@ -374,9 +302,9 @@ int_careful:
jnc int_very_careful
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE)
pushq_cfi %rdi
pushq %rdi
SCHEDULE_USER
popq_cfi %rdi
popq %rdi
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
jmp int_with_check
......@@ -389,10 +317,10 @@ int_very_careful:
/* Check for syscall exit trace */
testl $_TIF_WORK_SYSCALL_EXIT,%edx
jz int_signal
pushq_cfi %rdi
pushq %rdi
leaq 8(%rsp),%rdi # &ptregs -> arg1
call syscall_trace_leave
popq_cfi %rdi
popq %rdi
andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
jmp int_restore_rest
......@@ -475,27 +403,21 @@ syscall_return:
* perf profiles. Nothing jumps here.
*/
syscall_return_via_sysret:
CFI_REMEMBER_STATE
/* rcx and r11 are already restored (see code above) */
RESTORE_C_REGS_EXCEPT_RCX_R11
movq RSP(%rsp),%rsp
USERGS_SYSRET64
CFI_RESTORE_STATE
opportunistic_sysret_failed:
SWAPGS
jmp restore_c_regs_and_iret
CFI_ENDPROC
END(system_call)
.macro FORK_LIKE func
ENTRY(stub_\func)
CFI_STARTPROC
DEFAULT_FRAME 0, 8 /* offset 8: return address */
SAVE_EXTRA_REGS 8
jmp sys_\func
CFI_ENDPROC
END(stub_\func)
.endm
......@@ -504,8 +426,6 @@ END(stub_\func)
FORK_LIKE vfork
ENTRY(stub_execve)
CFI_STARTPROC
DEFAULT_FRAME 0, 8
call sys_execve
return_from_execve:
testl %eax, %eax
......@@ -515,11 +435,9 @@ return_from_execve:
1:
/* must use IRET code path (pt_regs->cs may have changed) */
addq $8, %rsp
CFI_ADJUST_CFA_OFFSET -8
ZERO_EXTRA_REGS
movq %rax,RAX(%rsp)
jmp int_ret_from_sys_call
CFI_ENDPROC
END(stub_execve)
/*
* Remaining execve stubs are only 7 bytes long.
......@@ -527,32 +445,23 @@ END(stub_execve)
*/
.align 8
GLOBAL(stub_execveat)
CFI_STARTPROC
DEFAULT_FRAME 0, 8
call sys_execveat
jmp return_from_execve
CFI_ENDPROC
END(stub_execveat)
#if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
.align 8
GLOBAL(stub_x32_execve)
GLOBAL(stub32_execve)
CFI_STARTPROC
DEFAULT_FRAME 0, 8
call compat_sys_execve
jmp return_from_execve
CFI_ENDPROC
END(stub32_execve)
END(stub_x32_execve)
.align 8
GLOBAL(stub_x32_execveat)
GLOBAL(stub32_execveat)
CFI_STARTPROC
DEFAULT_FRAME 0, 8
call compat_sys_execveat
jmp return_from_execve
CFI_ENDPROC
END(stub32_execveat)
END(stub_x32_execveat)
#endif
......@@ -562,8 +471,6 @@ END(stub_x32_execveat)
* This cannot be done with SYSRET, so use the IRET return path instead.
*/
ENTRY(stub_rt_sigreturn)
CFI_STARTPROC
DEFAULT_FRAME 0, 8
/*
* SAVE_EXTRA_REGS result is not normally needed:
* sigreturn overwrites all pt_regs->GPREGS.
......@@ -575,21 +482,16 @@ ENTRY(stub_rt_sigreturn)
call sys_rt_sigreturn
return_from_stub:
addq $8, %rsp
CFI_ADJUST_CFA_OFFSET -8
RESTORE_EXTRA_REGS
movq %rax,RAX(%rsp)
jmp int_ret_from_sys_call
CFI_ENDPROC
END(stub_rt_sigreturn)
#ifdef CONFIG_X86_X32_ABI
ENTRY(stub_x32_rt_sigreturn)
CFI_STARTPROC
DEFAULT_FRAME 0, 8
SAVE_EXTRA_REGS 8
call sys32_x32_rt_sigreturn
jmp return_from_stub
CFI_ENDPROC
END(stub_x32_rt_sigreturn)
#endif
......@@ -599,12 +501,11 @@ END(stub_x32_rt_sigreturn)
* rdi: prev task we switched from
*/
ENTRY(ret_from_fork)
DEFAULT_FRAME
LOCK ; btr $TIF_FORK,TI_flags(%r8)
pushq_cfi $0x0002
popfq_cfi # reset kernel eflags
pushq $0x0002
popfq # reset kernel eflags
call schedule_tail # rdi: 'prev' task parameter
......@@ -628,7 +529,6 @@ ENTRY(ret_from_fork)
movl $0, RAX(%rsp)
RESTORE_EXTRA_REGS
jmp int_ret_from_sys_call
CFI_ENDPROC
END(ret_from_fork)
/*
......@@ -637,16 +537,13 @@ END(ret_from_fork)
*/
.align 8
ENTRY(irq_entries_start)
INTR_FRAME
vector=FIRST_EXTERNAL_VECTOR
.rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
pushq_cfi $(~vector+0x80) /* Note: always in signed byte range */
pushq $(~vector+0x80) /* Note: always in signed byte range */
vector=vector+1
jmp common_interrupt
CFI_ADJUST_CFA_OFFSET -8
.align 8
.endr
CFI_ENDPROC
END(irq_entries_start)
/*
......@@ -688,17 +585,7 @@ END(irq_entries_start)
movq %rsp, %rsi
incl PER_CPU_VAR(irq_count)
cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
CFI_DEF_CFA_REGISTER rsi
pushq %rsi
/*
* For debugger:
* "CFA (Current Frame Address) is the value on stack + offset"
*/
CFI_ESCAPE 0x0f /* DW_CFA_def_cfa_expression */, 6, \
0x77 /* DW_OP_breg7 (rsp) */, 0, \
0x06 /* DW_OP_deref */, \
0x08 /* DW_OP_const1u */, SIZEOF_PTREGS-RBP, \
0x22 /* DW_OP_plus */
/* We entered an interrupt context - irqs are off: */
TRACE_IRQS_OFF
......@@ -711,7 +598,6 @@ END(irq_entries_start)
*/
.p2align CONFIG_X86_L1_CACHE_SHIFT
common_interrupt:
XCPT_FRAME
ASM_CLAC
addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
interrupt do_IRQ
......@@ -723,11 +609,8 @@ ret_from_intr:
/* Restore saved previous stack */
popq %rsi
CFI_DEF_CFA rsi,SIZEOF_PTREGS-RBP /* reg/off reset after def_cfa_expr */
/* return code expects complete pt_regs - adjust rsp accordingly: */
leaq -RBP(%rsi),%rsp
CFI_DEF_CFA_REGISTER rsp
CFI_ADJUST_CFA_OFFSET RBP
testb $3, CS(%rsp)
jz retint_kernel
......@@ -743,7 +626,6 @@ retint_check:
LOCKDEP_SYS_EXIT_IRQ
movl TI_flags(%rcx),%edx
andl %edi,%edx
CFI_REMEMBER_STATE
jnz retint_careful
retint_swapgs: /* return to user-space */
......@@ -807,8 +689,8 @@ native_irq_return_iret:
#ifdef CONFIG_X86_ESPFIX64
native_irq_return_ldt:
pushq_cfi %rax
pushq_cfi %rdi
pushq %rax
pushq %rdi
SWAPGS
movq PER_CPU_VAR(espfix_waddr),%rdi
movq %rax,(0*8)(%rdi) /* RAX */
......@@ -823,24 +705,23 @@ native_irq_return_ldt:
movq (5*8)(%rsp),%rax /* RSP */
movq %rax,(4*8)(%rdi)
andl $0xffff0000,%eax
popq_cfi %rdi
popq %rdi
orq PER_CPU_VAR(espfix_stack),%rax
SWAPGS
movq %rax,%rsp
popq_cfi %rax
popq %rax
jmp native_irq_return_iret
#endif
/* edi: workmask, edx: work */
retint_careful:
CFI_RESTORE_STATE
bt $TIF_NEED_RESCHED,%edx
jnc retint_signal
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE)
pushq_cfi %rdi
pushq %rdi
SCHEDULE_USER
popq_cfi %rdi
popq %rdi
GET_THREAD_INFO(%rcx)
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
......@@ -862,7 +743,6 @@ retint_signal:
GET_THREAD_INFO(%rcx)
jmp retint_with_reschedule
CFI_ENDPROC
END(common_interrupt)
/*
......@@ -870,13 +750,11 @@ END(common_interrupt)
*/
.macro apicinterrupt3 num sym do_sym
ENTRY(\sym)
INTR_FRAME
ASM_CLAC
pushq_cfi $~(\num)
pushq $~(\num)
.Lcommon_\sym:
interrupt \do_sym
jmp ret_from_intr
CFI_ENDPROC
END(\sym)
.endm
......@@ -959,24 +837,17 @@ ENTRY(\sym)
.error "using shift_ist requires paranoid=1"
.endif
.if \has_error_code
XCPT_FRAME
.else
INTR_FRAME
.endif
ASM_CLAC
PARAVIRT_ADJUST_EXCEPTION_FRAME
.ifeq \has_error_code
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
pushq $-1 /* ORIG_RAX: no syscall to restart */
.endif
ALLOC_PT_GPREGS_ON_STACK
.if \paranoid
.if \paranoid == 1
CFI_REMEMBER_STATE
testb $3, CS(%rsp) /* If coming from userspace, switch */
jnz 1f /* stacks. */
.endif
......@@ -986,8 +857,6 @@ ENTRY(\sym)
.endif
/* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */
DEFAULT_FRAME 0
.if \paranoid
.if \shift_ist != -1
TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */
......@@ -1023,7 +892,6 @@ ENTRY(\sym)
.endif
.if \paranoid == 1
CFI_RESTORE_STATE
/*
* Paranoid entry from userspace. Switch stacks and treat it
* as a normal entry. This means that paranoid handlers
......@@ -1032,7 +900,6 @@ ENTRY(\sym)
1:
call error_entry
DEFAULT_FRAME 0
movq %rsp,%rdi /* pt_regs pointer */
call sync_regs
......@@ -1051,8 +918,6 @@ ENTRY(\sym)
jmp error_exit /* %ebx: no swapgs flag */
.endif
CFI_ENDPROC
END(\sym)
.endm
......@@ -1085,17 +950,15 @@ idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0
/* Reload gs selector with exception handling */
/* edi: new selector */
ENTRY(native_load_gs_index)
CFI_STARTPROC
pushfq_cfi
pushfq
DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
SWAPGS
gs_change:
movl %edi,%gs
2: mfence /* workaround */
SWAPGS
popfq_cfi
popfq
ret
CFI_ENDPROC
END(native_load_gs_index)
_ASM_EXTABLE(gs_change,bad_gs)
......@@ -1110,22 +973,15 @@ bad_gs:
/* Call softirq on interrupt stack. Interrupts are off. */
ENTRY(do_softirq_own_stack)
CFI_STARTPROC
pushq_cfi %rbp
CFI_REL_OFFSET rbp,0
pushq %rbp
mov %rsp,%rbp
CFI_DEF_CFA_REGISTER rbp
incl PER_CPU_VAR(irq_count)
cmove PER_CPU_VAR(irq_stack_ptr),%rsp
push %rbp # backlink for old unwinder
call __do_softirq
leaveq
CFI_RESTORE rbp
CFI_DEF_CFA_REGISTER rsp
CFI_ADJUST_CFA_OFFSET -8
decl PER_CPU_VAR(irq_count)
ret
CFI_ENDPROC
END(do_softirq_own_stack)
#ifdef CONFIG_XEN
......@@ -1145,28 +1001,22 @@ idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
* activation and restart the handler using the previous one.
*/
ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
CFI_STARTPROC
/*
* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
* see the correct pointer to the pt_regs
*/
movq %rdi, %rsp # we don't return, adjust the stack frame
CFI_ENDPROC
DEFAULT_FRAME
11: incl PER_CPU_VAR(irq_count)
movq %rsp,%rbp
CFI_DEF_CFA_REGISTER rbp
cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
pushq %rbp # backlink for old unwinder
call xen_evtchn_do_upcall
popq %rsp
CFI_DEF_CFA_REGISTER rsp
decl PER_CPU_VAR(irq_count)
#ifndef CONFIG_PREEMPT
call xen_maybe_preempt_hcall
#endif
jmp error_exit
CFI_ENDPROC
END(xen_do_hypervisor_callback)
/*
......@@ -1183,16 +1033,8 @@ END(xen_do_hypervisor_callback)
* with its current contents: any discrepancy means we in category 1.
*/
ENTRY(xen_failsafe_callback)
INTR_FRAME 1 (6*8)
/*CFI_REL_OFFSET gs,GS*/
/*CFI_REL_OFFSET fs,FS*/
/*CFI_REL_OFFSET es,ES*/
/*CFI_REL_OFFSET ds,DS*/
CFI_REL_OFFSET r11,8
CFI_REL_OFFSET rcx,0
movl %ds,%ecx
cmpw %cx,0x10(%rsp)
CFI_REMEMBER_STATE
jne 1f
movl %es,%ecx
cmpw %cx,0x18(%rsp)
......@@ -1205,29 +1047,21 @@ ENTRY(xen_failsafe_callback)
jne 1f
/* All segments match their saved values => Category 2 (Bad IRET). */
movq (%rsp),%rcx
CFI_RESTORE rcx
movq 8(%rsp),%r11
CFI_RESTORE r11
addq $0x30,%rsp
CFI_ADJUST_CFA_OFFSET -0x30
pushq_cfi $0 /* RIP */
pushq_cfi %r11
pushq_cfi %rcx
pushq $0 /* RIP */
pushq %r11
pushq %rcx
jmp general_protection
CFI_RESTORE_STATE
1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
movq (%rsp),%rcx
CFI_RESTORE rcx
movq 8(%rsp),%r11
CFI_RESTORE r11
addq $0x30,%rsp
CFI_ADJUST_CFA_OFFSET -0x30
pushq_cfi $-1 /* orig_ax = -1 => not a system call */
pushq $-1 /* orig_ax = -1 => not a system call */
ALLOC_PT_GPREGS_ON_STACK
SAVE_C_REGS
SAVE_EXTRA_REGS
jmp error_exit
CFI_ENDPROC
END(xen_failsafe_callback)
apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
......@@ -1263,7 +1097,6 @@ idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(
* Return: ebx=0: need swapgs on exit, ebx=1: otherwise
*/
ENTRY(paranoid_entry)
XCPT_FRAME 1 15*8
cld
SAVE_C_REGS 8
SAVE_EXTRA_REGS 8
......@@ -1275,7 +1108,6 @@ ENTRY(paranoid_entry)
SWAPGS
xorl %ebx,%ebx
1: ret
CFI_ENDPROC
END(paranoid_entry)
/*
......@@ -1290,7 +1122,6 @@ END(paranoid_entry)
*/
/* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */
ENTRY(paranoid_exit)
DEFAULT_FRAME
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF_DEBUG
testl %ebx,%ebx /* swapgs needed? */
......@@ -1305,7 +1136,6 @@ paranoid_exit_restore:
RESTORE_C_REGS
REMOVE_PT_GPREGS_FROM_STACK 8
INTERRUPT_RETURN
CFI_ENDPROC
END(paranoid_exit)
/*
......@@ -1313,7 +1143,6 @@ END(paranoid_exit)
* Return: ebx=0: need swapgs on exit, ebx=1: otherwise
*/
ENTRY(error_entry)
XCPT_FRAME 1 15*8
cld
SAVE_C_REGS 8
SAVE_EXTRA_REGS 8
......@@ -1333,7 +1162,6 @@ error_sti:
* for these here too.
*/
error_kernelspace:
CFI_REL_OFFSET rcx, RCX+8
incl %ebx
leaq native_irq_return_iret(%rip),%rcx
cmpq %rcx,RIP+8(%rsp)
......@@ -1357,13 +1185,11 @@ error_bad_iret:
mov %rax,%rsp
decl %ebx /* Return to usergs */
jmp error_sti
CFI_ENDPROC
END(error_entry)
/* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */
ENTRY(error_exit)
DEFAULT_FRAME
movl %ebx,%eax
RESTORE_EXTRA_REGS
DISABLE_INTERRUPTS(CLBR_NONE)
......@@ -1377,12 +1203,10 @@ ENTRY(error_exit)
andl %edi,%edx
jnz retint_careful
jmp retint_swapgs
CFI_ENDPROC
END(error_exit)
/* Runs on exception stack */
ENTRY(nmi)
INTR_FRAME
PARAVIRT_ADJUST_EXCEPTION_FRAME
/*
* We allow breakpoints in NMIs. If a breakpoint occurs, then
......@@ -1417,8 +1241,7 @@ ENTRY(nmi)
*/
/* Use %rdx as our temp variable throughout */
pushq_cfi %rdx
CFI_REL_OFFSET rdx, 0
pushq %rdx
/*
* If %cs was not the kernel segment, then the NMI triggered in user
......@@ -1452,8 +1275,6 @@ ENTRY(nmi)
jb first_nmi
/* Ah, it is within the NMI stack, treat it as nested */
CFI_REMEMBER_STATE
nested_nmi:
/*
* Do nothing if we interrupted the fixup in repeat_nmi.
......@@ -1471,26 +1292,22 @@ nested_nmi:
/* Set up the interrupted NMIs stack to jump to repeat_nmi */
leaq -1*8(%rsp), %rdx
movq %rdx, %rsp
CFI_ADJUST_CFA_OFFSET 1*8
leaq -10*8(%rsp), %rdx
pushq_cfi $__KERNEL_DS
pushq_cfi %rdx
pushfq_cfi
pushq_cfi $__KERNEL_CS
pushq_cfi $repeat_nmi
pushq $__KERNEL_DS
pushq %rdx
pushfq
pushq $__KERNEL_CS
pushq $repeat_nmi
/* Put stack back */
addq $(6*8), %rsp
CFI_ADJUST_CFA_OFFSET -6*8
nested_nmi_out:
popq_cfi %rdx
CFI_RESTORE rdx
popq %rdx
/* No need to check faults here */
INTERRUPT_RETURN
CFI_RESTORE_STATE
first_nmi:
/*
* Because nested NMIs will use the pushed location that we
......@@ -1529,22 +1346,19 @@ first_nmi:
*/
/* Do not pop rdx, nested NMIs will corrupt that part of the stack */
movq (%rsp), %rdx
CFI_RESTORE rdx
/* Set the NMI executing variable on the stack. */
pushq_cfi $1
pushq $1
/*
* Leave room for the "copied" frame
*/
subq $(5*8), %rsp
CFI_ADJUST_CFA_OFFSET 5*8
/* Copy the stack frame to the Saved frame */
.rept 5
pushq_cfi 11*8(%rsp)
pushq 11*8(%rsp)
.endr
CFI_DEF_CFA_OFFSET 5*8
/* Everything up to here is safe from nested NMIs */
......@@ -1567,12 +1381,10 @@ repeat_nmi:
/* Make another copy, this one may be modified by nested NMIs */
addq $(10*8), %rsp
CFI_ADJUST_CFA_OFFSET -10*8
.rept 5
pushq_cfi -6*8(%rsp)
pushq -6*8(%rsp)
.endr
subq $(5*8), %rsp
CFI_DEF_CFA_OFFSET 5*8
end_repeat_nmi:
/*
......@@ -1580,7 +1392,7 @@ end_repeat_nmi:
* NMI if the first NMI took an exception and reset our iret stack
* so that we repeat another NMI.
*/
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
pushq $-1 /* ORIG_RAX: no syscall to restart */
ALLOC_PT_GPREGS_ON_STACK
/*
......@@ -1591,7 +1403,6 @@ end_repeat_nmi:
* exceptions might do.
*/
call paranoid_entry
DEFAULT_FRAME 0
/*
* Save off the CR2 register. If we take a page fault in the NMI then
......@@ -1628,13 +1439,10 @@ nmi_restore:
/* Clear the NMI executing stack variable */
movq $0, 5*8(%rsp)
jmp irq_return
CFI_ENDPROC
END(nmi)
ENTRY(ignore_sysret)
CFI_STARTPROC
mov $-ENOSYS,%eax
sysret
CFI_ENDPROC
END(ignore_sysret)
......@@ -11,26 +11,23 @@
#include <linux/linkage.h>
#include <asm/alternative-asm.h>
#include <asm/dwarf2.h>
/* if you want SMP support, implement these with real spinlocks */
.macro LOCK reg
pushfl_cfi
pushfl
cli
.endm
.macro UNLOCK reg
popfl_cfi
popfl
.endm
#define BEGIN(op) \
.macro endp; \
CFI_ENDPROC; \
ENDPROC(atomic64_##op##_386); \
.purgem endp; \
.endm; \
ENTRY(atomic64_##op##_386); \
CFI_STARTPROC; \
LOCK v;
#define ENDP endp
......
......@@ -11,7 +11,6 @@
#include <linux/linkage.h>
#include <asm/alternative-asm.h>
#include <asm/dwarf2.h>
.macro read64 reg
movl %ebx, %eax
......@@ -22,16 +21,11 @@
.endm
ENTRY(atomic64_read_cx8)
CFI_STARTPROC
read64 %ecx
ret
CFI_ENDPROC
ENDPROC(atomic64_read_cx8)
ENTRY(atomic64_set_cx8)
CFI_STARTPROC
1:
/* we don't need LOCK_PREFIX since aligned 64-bit writes
* are atomic on 586 and newer */
......@@ -39,28 +33,23 @@ ENTRY(atomic64_set_cx8)
jne 1b
ret
CFI_ENDPROC
ENDPROC(atomic64_set_cx8)
ENTRY(atomic64_xchg_cx8)
CFI_STARTPROC
1:
LOCK_PREFIX
cmpxchg8b (%esi)
jne 1b
ret
CFI_ENDPROC
ENDPROC(atomic64_xchg_cx8)
.macro addsub_return func ins insc
ENTRY(atomic64_\func\()_return_cx8)
CFI_STARTPROC
pushl_cfi_reg ebp
pushl_cfi_reg ebx
pushl_cfi_reg esi
pushl_cfi_reg edi
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl %eax, %esi
movl %edx, %edi
......@@ -79,12 +68,11 @@ ENTRY(atomic64_\func\()_return_cx8)
10:
movl %ebx, %eax
movl %ecx, %edx
popl_cfi_reg edi
popl_cfi_reg esi
popl_cfi_reg ebx
popl_cfi_reg ebp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
CFI_ENDPROC
ENDPROC(atomic64_\func\()_return_cx8)
.endm
......@@ -93,8 +81,7 @@ addsub_return sub sub sbb
.macro incdec_return func ins insc
ENTRY(atomic64_\func\()_return_cx8)
CFI_STARTPROC
pushl_cfi_reg ebx
pushl %ebx
read64 %esi
1:
......@@ -109,9 +96,8 @@ ENTRY(atomic64_\func\()_return_cx8)
10:
movl %ebx, %eax
movl %ecx, %edx
popl_cfi_reg ebx
popl %ebx
ret
CFI_ENDPROC
ENDPROC(atomic64_\func\()_return_cx8)
.endm
......@@ -119,8 +105,7 @@ incdec_return inc add adc
incdec_return dec sub sbb
ENTRY(atomic64_dec_if_positive_cx8)
CFI_STARTPROC
pushl_cfi_reg ebx
pushl %ebx
read64 %esi
1:
......@@ -136,18 +121,16 @@ ENTRY(atomic64_dec_if_positive_cx8)
2:
movl %ebx, %eax
movl %ecx, %edx
popl_cfi_reg ebx
popl %ebx
ret
CFI_ENDPROC
ENDPROC(atomic64_dec_if_positive_cx8)
ENTRY(atomic64_add_unless_cx8)
CFI_STARTPROC
pushl_cfi_reg ebp
pushl_cfi_reg ebx
pushl %ebp
pushl %ebx
/* these just push these two parameters on the stack */
pushl_cfi_reg edi
pushl_cfi_reg ecx
pushl %edi
pushl %ecx
movl %eax, %ebp
movl %edx, %edi
......@@ -168,21 +151,18 @@ ENTRY(atomic64_add_unless_cx8)
movl $1, %eax
3:
addl $8, %esp
CFI_ADJUST_CFA_OFFSET -8
popl_cfi_reg ebx
popl_cfi_reg ebp
popl %ebx
popl %ebp
ret
4:
cmpl %edx, 4(%esp)
jne 2b
xorl %eax, %eax
jmp 3b
CFI_ENDPROC
ENDPROC(atomic64_add_unless_cx8)
ENTRY(atomic64_inc_not_zero_cx8)
CFI_STARTPROC
pushl_cfi_reg ebx
pushl %ebx
read64 %esi
1:
......@@ -199,7 +179,6 @@ ENTRY(atomic64_inc_not_zero_cx8)
movl $1, %eax
3:
popl_cfi_reg ebx
popl %ebx
ret
CFI_ENDPROC
ENDPROC(atomic64_inc_not_zero_cx8)
......@@ -26,7 +26,6 @@
*/
#include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/errno.h>
#include <asm/asm.h>
......@@ -50,9 +49,8 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
* alignment for the unrolled loop.
*/
ENTRY(csum_partial)
CFI_STARTPROC
pushl_cfi_reg esi
pushl_cfi_reg ebx
pushl %esi
pushl %ebx
movl 20(%esp),%eax # Function arg: unsigned int sum
movl 16(%esp),%ecx # Function arg: int len
movl 12(%esp),%esi # Function arg: unsigned char *buff
......@@ -129,10 +127,9 @@ ENTRY(csum_partial)
jz 8f
roll $8, %eax
8:
popl_cfi_reg ebx
popl_cfi_reg esi
popl %ebx
popl %esi
ret
CFI_ENDPROC
ENDPROC(csum_partial)
#else
......@@ -140,9 +137,8 @@ ENDPROC(csum_partial)
/* Version for PentiumII/PPro */
ENTRY(csum_partial)
CFI_STARTPROC
pushl_cfi_reg esi
pushl_cfi_reg ebx
pushl %esi
pushl %ebx
movl 20(%esp),%eax # Function arg: unsigned int sum
movl 16(%esp),%ecx # Function arg: int len
movl 12(%esp),%esi # Function arg: const unsigned char *buf
......@@ -249,10 +245,9 @@ ENTRY(csum_partial)
jz 90f
roll $8, %eax
90:
popl_cfi_reg ebx
popl_cfi_reg esi
popl %ebx
popl %esi
ret
CFI_ENDPROC
ENDPROC(csum_partial)
#endif
......@@ -287,12 +282,10 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
#define FP 12
ENTRY(csum_partial_copy_generic)
CFI_STARTPROC
subl $4,%esp
CFI_ADJUST_CFA_OFFSET 4
pushl_cfi_reg edi
pushl_cfi_reg esi
pushl_cfi_reg ebx
pushl %edi
pushl %esi
pushl %ebx
movl ARGBASE+16(%esp),%eax # sum
movl ARGBASE+12(%esp),%ecx # len
movl ARGBASE+4(%esp),%esi # src
......@@ -401,12 +394,11 @@ DST( movb %cl, (%edi) )
.previous
popl_cfi_reg ebx
popl_cfi_reg esi
popl_cfi_reg edi
popl_cfi %ecx # equivalent to addl $4,%esp
popl %ebx
popl %esi
popl %edi
popl %ecx # equivalent to addl $4,%esp
ret
CFI_ENDPROC
ENDPROC(csum_partial_copy_generic)
#else
......@@ -426,10 +418,9 @@ ENDPROC(csum_partial_copy_generic)
#define ARGBASE 12
ENTRY(csum_partial_copy_generic)
CFI_STARTPROC
pushl_cfi_reg ebx
pushl_cfi_reg edi
pushl_cfi_reg esi
pushl %ebx
pushl %edi
pushl %esi
movl ARGBASE+4(%esp),%esi #src
movl ARGBASE+8(%esp),%edi #dst
movl ARGBASE+12(%esp),%ecx #len
......@@ -489,11 +480,10 @@ DST( movb %dl, (%edi) )
jmp 7b
.previous
popl_cfi_reg esi
popl_cfi_reg edi
popl_cfi_reg ebx
popl %esi
popl %edi
popl %ebx
ret
CFI_ENDPROC
ENDPROC(csum_partial_copy_generic)
#undef ROUND
......
#include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/cpufeature.h>
#include <asm/alternative-asm.h>
......@@ -15,7 +14,6 @@
* %rdi - page
*/
ENTRY(clear_page)
CFI_STARTPROC
ALTERNATIVE_2 "jmp clear_page_orig", "", X86_FEATURE_REP_GOOD, \
"jmp clear_page_c_e", X86_FEATURE_ERMS
......@@ -24,11 +22,9 @@ ENTRY(clear_page)
xorl %eax,%eax
rep stosq
ret
CFI_ENDPROC
ENDPROC(clear_page)
ENTRY(clear_page_orig)
CFI_STARTPROC
xorl %eax,%eax
movl $4096/64,%ecx
......@@ -48,14 +44,11 @@ ENTRY(clear_page_orig)
jnz .Lloop
nop
ret
CFI_ENDPROC
ENDPROC(clear_page_orig)
ENTRY(clear_page_c_e)
CFI_STARTPROC
movl $4096,%ecx
xorl %eax,%eax
rep stosb
ret
CFI_ENDPROC
ENDPROC(clear_page_c_e)
......@@ -6,7 +6,6 @@
*
*/
#include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/percpu.h>
.text
......@@ -21,7 +20,6 @@
* %al : Operation successful
*/
ENTRY(this_cpu_cmpxchg16b_emu)
CFI_STARTPROC
#
# Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not
......@@ -32,7 +30,7 @@ CFI_STARTPROC
# *atomic* on a single cpu (as provided by the this_cpu_xx class of
# macros).
#
pushfq_cfi
pushfq
cli
cmpq PER_CPU_VAR((%rsi)), %rax
......@@ -43,17 +41,13 @@ CFI_STARTPROC
movq %rbx, PER_CPU_VAR((%rsi))
movq %rcx, PER_CPU_VAR(8(%rsi))
CFI_REMEMBER_STATE
popfq_cfi
popfq
mov $1, %al
ret
CFI_RESTORE_STATE
.Lnot_same:
popfq_cfi
popfq
xor %al,%al
ret
CFI_ENDPROC
ENDPROC(this_cpu_cmpxchg16b_emu)
......@@ -7,7 +7,6 @@
*/
#include <linux/linkage.h>
#include <asm/dwarf2.h>
.text
......@@ -20,14 +19,13 @@
* %ecx : high 32 bits of new value
*/
ENTRY(cmpxchg8b_emu)
CFI_STARTPROC
#
# Emulate 'cmpxchg8b (%esi)' on UP except we don't
# set the whole ZF thing (caller will just compare
# eax:edx with the expected value)
#
pushfl_cfi
pushfl
cli
cmpl (%esi), %eax
......@@ -38,18 +36,15 @@ CFI_STARTPROC
movl %ebx, (%esi)
movl %ecx, 4(%esi)
CFI_REMEMBER_STATE
popfl_cfi
popfl
ret
CFI_RESTORE_STATE
.Lnot_same:
movl (%esi), %eax
.Lhalf_same:
movl 4(%esi), %edx
popfl_cfi
popfl
ret
CFI_ENDPROC
ENDPROC(cmpxchg8b_emu)
/* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */
#include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/cpufeature.h>
#include <asm/alternative-asm.h>
......@@ -13,22 +12,16 @@
*/
ALIGN
ENTRY(copy_page)
CFI_STARTPROC
ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD
movl $4096/8, %ecx
rep movsq
ret
CFI_ENDPROC
ENDPROC(copy_page)
ENTRY(copy_page_regs)
CFI_STARTPROC
subq $2*8, %rsp
CFI_ADJUST_CFA_OFFSET 2*8
movq %rbx, (%rsp)
CFI_REL_OFFSET rbx, 0
movq %r12, 1*8(%rsp)
CFI_REL_OFFSET r12, 1*8
movl $(4096/64)-5, %ecx
.p2align 4
......@@ -87,11 +80,7 @@ ENTRY(copy_page_regs)
jnz .Loop2
movq (%rsp), %rbx
CFI_RESTORE rbx
movq 1*8(%rsp), %r12
CFI_RESTORE r12
addq $2*8, %rsp
CFI_ADJUST_CFA_OFFSET -2*8
ret
CFI_ENDPROC
ENDPROC(copy_page_regs)
......@@ -7,7 +7,6 @@
*/
#include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/current.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
......@@ -18,7 +17,6 @@
/* Standard copy_to_user with segment limit checking */
ENTRY(_copy_to_user)
CFI_STARTPROC
GET_THREAD_INFO(%rax)
movq %rdi,%rcx
addq %rdx,%rcx
......@@ -30,12 +28,10 @@ ENTRY(_copy_to_user)
X86_FEATURE_REP_GOOD, \
"jmp copy_user_enhanced_fast_string", \
X86_FEATURE_ERMS
CFI_ENDPROC
ENDPROC(_copy_to_user)
/* Standard copy_from_user with segment limit checking */
ENTRY(_copy_from_user)
CFI_STARTPROC
GET_THREAD_INFO(%rax)
movq %rsi,%rcx
addq %rdx,%rcx
......@@ -47,14 +43,12 @@ ENTRY(_copy_from_user)
X86_FEATURE_REP_GOOD, \
"jmp copy_user_enhanced_fast_string", \
X86_FEATURE_ERMS
CFI_ENDPROC
ENDPROC(_copy_from_user)
.section .fixup,"ax"
/* must zero dest */
ENTRY(bad_from_user)
bad_from_user:
CFI_STARTPROC
movl %edx,%ecx
xorl %eax,%eax
rep
......@@ -62,7 +56,6 @@ bad_from_user:
bad_to_user:
movl %edx,%eax
ret
CFI_ENDPROC
ENDPROC(bad_from_user)
.previous
......@@ -80,7 +73,6 @@ ENDPROC(bad_from_user)
* eax uncopied bytes or 0 if successful.
*/
ENTRY(copy_user_generic_unrolled)
CFI_STARTPROC
ASM_STAC
cmpl $8,%edx
jb 20f /* less then 8 bytes, go to byte copy loop */
......@@ -162,7 +154,6 @@ ENTRY(copy_user_generic_unrolled)
_ASM_EXTABLE(19b,40b)
_ASM_EXTABLE(21b,50b)
_ASM_EXTABLE(22b,50b)
CFI_ENDPROC
ENDPROC(copy_user_generic_unrolled)
/* Some CPUs run faster using the string copy instructions.
......@@ -184,7 +175,6 @@ ENDPROC(copy_user_generic_unrolled)
* eax uncopied bytes or 0 if successful.
*/
ENTRY(copy_user_generic_string)
CFI_STARTPROC
ASM_STAC
cmpl $8,%edx
jb 2f /* less than 8 bytes, go to byte copy loop */
......@@ -209,7 +199,6 @@ ENTRY(copy_user_generic_string)
_ASM_EXTABLE(1b,11b)
_ASM_EXTABLE(3b,12b)
CFI_ENDPROC
ENDPROC(copy_user_generic_string)
/*
......@@ -225,7 +214,6 @@ ENDPROC(copy_user_generic_string)
* eax uncopied bytes or 0 if successful.
*/
ENTRY(copy_user_enhanced_fast_string)
CFI_STARTPROC
ASM_STAC
movl %edx,%ecx
1: rep
......@@ -240,7 +228,6 @@ ENTRY(copy_user_enhanced_fast_string)
.previous
_ASM_EXTABLE(1b,12b)
CFI_ENDPROC
ENDPROC(copy_user_enhanced_fast_string)
/*
......@@ -248,7 +235,6 @@ ENDPROC(copy_user_enhanced_fast_string)
* This will force destination/source out of cache for more performance.
*/
ENTRY(__copy_user_nocache)
CFI_STARTPROC
ASM_STAC
cmpl $8,%edx
jb 20f /* less then 8 bytes, go to byte copy loop */
......@@ -332,5 +318,4 @@ ENTRY(__copy_user_nocache)
_ASM_EXTABLE(19b,40b)
_ASM_EXTABLE(21b,50b)
_ASM_EXTABLE(22b,50b)
CFI_ENDPROC
ENDPROC(__copy_user_nocache)
......@@ -6,7 +6,6 @@
* for more details. No warranty for anything given at all.
*/
#include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/errno.h>
#include <asm/asm.h>
......@@ -47,23 +46,16 @@
ENTRY(csum_partial_copy_generic)
CFI_STARTPROC
cmpl $3*64, %edx
jle .Lignore
.Lignore:
subq $7*8, %rsp
CFI_ADJUST_CFA_OFFSET 7*8
movq %rbx, 2*8(%rsp)
CFI_REL_OFFSET rbx, 2*8
movq %r12, 3*8(%rsp)
CFI_REL_OFFSET r12, 3*8
movq %r14, 4*8(%rsp)
CFI_REL_OFFSET r14, 4*8
movq %r13, 5*8(%rsp)
CFI_REL_OFFSET r13, 5*8
movq %rbp, 6*8(%rsp)
CFI_REL_OFFSET rbp, 6*8
movq %r8, (%rsp)
movq %r9, 1*8(%rsp)
......@@ -206,22 +198,14 @@ ENTRY(csum_partial_copy_generic)
addl %ebx, %eax
adcl %r9d, %eax /* carry */
CFI_REMEMBER_STATE
.Lende:
movq 2*8(%rsp), %rbx
CFI_RESTORE rbx
movq 3*8(%rsp), %r12
CFI_RESTORE r12
movq 4*8(%rsp), %r14
CFI_RESTORE r14
movq 5*8(%rsp), %r13
CFI_RESTORE r13
movq 6*8(%rsp), %rbp
CFI_RESTORE rbp
addq $7*8, %rsp
CFI_ADJUST_CFA_OFFSET -7*8
ret
CFI_RESTORE_STATE
/* Exception handlers. Very simple, zeroing is done in the wrappers */
.Lbad_source:
......@@ -237,5 +221,4 @@ ENTRY(csum_partial_copy_generic)
jz .Lende
movl $-EFAULT, (%rax)
jmp .Lende
CFI_ENDPROC
ENDPROC(csum_partial_copy_generic)
......@@ -26,7 +26,6 @@
*/
#include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/page_types.h>
#include <asm/errno.h>
#include <asm/asm-offsets.h>
......@@ -36,7 +35,6 @@
.text
ENTRY(__get_user_1)
CFI_STARTPROC
GET_THREAD_INFO(%_ASM_DX)
cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user
......@@ -45,11 +43,9 @@ ENTRY(__get_user_1)
xor %eax,%eax
ASM_CLAC
ret
CFI_ENDPROC
ENDPROC(__get_user_1)
ENTRY(__get_user_2)
CFI_STARTPROC
add $1,%_ASM_AX
jc bad_get_user
GET_THREAD_INFO(%_ASM_DX)
......@@ -60,11 +56,9 @@ ENTRY(__get_user_2)
xor %eax,%eax
ASM_CLAC
ret
CFI_ENDPROC
ENDPROC(__get_user_2)
ENTRY(__get_user_4)
CFI_STARTPROC
add $3,%_ASM_AX
jc bad_get_user
GET_THREAD_INFO(%_ASM_DX)
......@@ -75,11 +69,9 @@ ENTRY(__get_user_4)
xor %eax,%eax
ASM_CLAC
ret
CFI_ENDPROC
ENDPROC(__get_user_4)
ENTRY(__get_user_8)
CFI_STARTPROC
#ifdef CONFIG_X86_64
add $7,%_ASM_AX
jc bad_get_user
......@@ -104,28 +96,23 @@ ENTRY(__get_user_8)
ASM_CLAC
ret
#endif
CFI_ENDPROC
ENDPROC(__get_user_8)
bad_get_user:
CFI_STARTPROC
xor %edx,%edx
mov $(-EFAULT),%_ASM_AX
ASM_CLAC
ret
CFI_ENDPROC
END(bad_get_user)
#ifdef CONFIG_X86_32
bad_get_user_8:
CFI_STARTPROC
xor %edx,%edx
xor %ecx,%ecx
mov $(-EFAULT),%_ASM_AX
ASM_CLAC
ret
CFI_ENDPROC
END(bad_get_user_8)
#endif
......
......@@ -16,15 +16,12 @@
*/
#include <linux/linkage.h>
#include <asm/dwarf2.h>
/*
* override generic version in lib/iomap_copy.c
*/
ENTRY(__iowrite32_copy)
CFI_STARTPROC
movl %edx,%ecx
rep movsd
ret
CFI_ENDPROC
ENDPROC(__iowrite32_copy)
......@@ -2,7 +2,6 @@
#include <linux/linkage.h>
#include <asm/cpufeature.h>
#include <asm/dwarf2.h>
#include <asm/alternative-asm.h>
/*
......@@ -53,7 +52,6 @@ ENTRY(memcpy_erms)
ENDPROC(memcpy_erms)
ENTRY(memcpy_orig)
CFI_STARTPROC
movq %rdi, %rax
cmpq $0x20, %rdx
......@@ -178,5 +176,4 @@ ENTRY(memcpy_orig)
.Lend:
retq
CFI_ENDPROC
ENDPROC(memcpy_orig)
......@@ -6,7 +6,6 @@
* - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com>
*/
#include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/cpufeature.h>
#include <asm/alternative-asm.h>
......@@ -27,7 +26,6 @@
ENTRY(memmove)
ENTRY(__memmove)
CFI_STARTPROC
/* Handle more 32 bytes in loop */
mov %rdi, %rax
......@@ -207,6 +205,5 @@ ENTRY(__memmove)
movb %r11b, (%rdi)
13:
retq
CFI_ENDPROC
ENDPROC(__memmove)
ENDPROC(memmove)
/* Copyright 2002 Andi Kleen, SuSE Labs */
#include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/cpufeature.h>
#include <asm/alternative-asm.h>
......@@ -66,7 +65,6 @@ ENTRY(memset_erms)
ENDPROC(memset_erms)
ENTRY(memset_orig)
CFI_STARTPROC
movq %rdi,%r10
/* expand byte value */
......@@ -78,7 +76,6 @@ ENTRY(memset_orig)
movl %edi,%r9d
andl $7,%r9d
jnz .Lbad_alignment
CFI_REMEMBER_STATE
.Lafter_bad_alignment:
movq %rdx,%rcx
......@@ -128,7 +125,6 @@ ENTRY(memset_orig)
movq %r10,%rax
ret
CFI_RESTORE_STATE
.Lbad_alignment:
cmpq $7,%rdx
jbe .Lhandle_7
......@@ -139,5 +135,4 @@ ENTRY(memset_orig)
subq %r8,%rdx
jmp .Lafter_bad_alignment
.Lfinal:
CFI_ENDPROC
ENDPROC(memset_orig)
#include <linux/linkage.h>
#include <linux/errno.h>
#include <asm/dwarf2.h>
#include <asm/asm.h>
#include <asm/msr.h>
......@@ -13,9 +12,8 @@
*/
.macro op_safe_regs op
ENTRY(\op\()_safe_regs)
CFI_STARTPROC
pushq_cfi_reg rbx
pushq_cfi_reg rbp
pushq %rbx
pushq %rbp
movq %rdi, %r10 /* Save pointer */
xorl %r11d, %r11d /* Return value */
movl (%rdi), %eax
......@@ -25,7 +23,6 @@ ENTRY(\op\()_safe_regs)
movl 20(%rdi), %ebp
movl 24(%rdi), %esi
movl 28(%rdi), %edi
CFI_REMEMBER_STATE
1: \op
2: movl %eax, (%r10)
movl %r11d, %eax /* Return value */
......@@ -35,16 +32,14 @@ ENTRY(\op\()_safe_regs)
movl %ebp, 20(%r10)
movl %esi, 24(%r10)
movl %edi, 28(%r10)
popq_cfi_reg rbp
popq_cfi_reg rbx
popq %rbp
popq %rbx
ret
3:
CFI_RESTORE_STATE
movl $-EIO, %r11d
jmp 2b
_ASM_EXTABLE(1b, 3b)
CFI_ENDPROC
ENDPROC(\op\()_safe_regs)
.endm
......@@ -52,13 +47,12 @@ ENDPROC(\op\()_safe_regs)
.macro op_safe_regs op
ENTRY(\op\()_safe_regs)
CFI_STARTPROC
pushl_cfi_reg ebx
pushl_cfi_reg ebp
pushl_cfi_reg esi
pushl_cfi_reg edi
pushl_cfi $0 /* Return value */
pushl_cfi %eax
pushl %ebx
pushl %ebp
pushl %esi
pushl %edi
pushl $0 /* Return value */
pushl %eax
movl 4(%eax), %ecx
movl 8(%eax), %edx
movl 12(%eax), %ebx
......@@ -66,32 +60,28 @@ ENTRY(\op\()_safe_regs)
movl 24(%eax), %esi
movl 28(%eax), %edi
movl (%eax), %eax
CFI_REMEMBER_STATE
1: \op
2: pushl_cfi %eax
2: pushl %eax
movl 4(%esp), %eax
popl_cfi (%eax)
popl (%eax)
addl $4, %esp
CFI_ADJUST_CFA_OFFSET -4
movl %ecx, 4(%eax)
movl %edx, 8(%eax)
movl %ebx, 12(%eax)
movl %ebp, 20(%eax)
movl %esi, 24(%eax)
movl %edi, 28(%eax)
popl_cfi %eax
popl_cfi_reg edi
popl_cfi_reg esi
popl_cfi_reg ebp
popl_cfi_reg ebx
popl %eax
popl %edi
popl %esi
popl %ebp
popl %ebx
ret
3:
CFI_RESTORE_STATE
movl $-EIO, 4(%esp)
jmp 2b
_ASM_EXTABLE(1b, 3b)
CFI_ENDPROC
ENDPROC(\op\()_safe_regs)
.endm
......
......@@ -11,7 +11,6 @@
* return value.
*/
#include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/thread_info.h>
#include <asm/errno.h>
#include <asm/asm.h>
......@@ -30,11 +29,9 @@
* as they get called from within inline assembly.
*/
#define ENTER CFI_STARTPROC ; \
GET_THREAD_INFO(%_ASM_BX)
#define ENTER GET_THREAD_INFO(%_ASM_BX)
#define EXIT ASM_CLAC ; \
ret ; \
CFI_ENDPROC
ret
.text
ENTRY(__put_user_1)
......@@ -87,7 +84,6 @@ ENTRY(__put_user_8)
ENDPROC(__put_user_8)
bad_put_user:
CFI_STARTPROC
movl $-EFAULT,%eax
EXIT
END(bad_put_user)
......
......@@ -15,7 +15,6 @@
#include <linux/linkage.h>
#include <asm/alternative-asm.h>
#include <asm/dwarf2.h>
#define __ASM_HALF_REG(reg) __ASM_SEL(reg, e##reg)
#define __ASM_HALF_SIZE(inst) __ASM_SEL(inst##w, inst##l)
......@@ -34,10 +33,10 @@
*/
#define save_common_regs \
pushl_cfi_reg ecx
pushl %ecx
#define restore_common_regs \
popl_cfi_reg ecx
popl %ecx
/* Avoid uglifying the argument copying x86-64 needs to do. */
.macro movq src, dst
......@@ -64,50 +63,45 @@
*/
#define save_common_regs \
pushq_cfi_reg rdi; \
pushq_cfi_reg rsi; \
pushq_cfi_reg rcx; \
pushq_cfi_reg r8; \
pushq_cfi_reg r9; \
pushq_cfi_reg r10; \
pushq_cfi_reg r11
pushq %rdi; \
pushq %rsi; \
pushq %rcx; \
pushq %r8; \
pushq %r9; \
pushq %r10; \
pushq %r11
#define restore_common_regs \
popq_cfi_reg r11; \
popq_cfi_reg r10; \
popq_cfi_reg r9; \
popq_cfi_reg r8; \
popq_cfi_reg rcx; \
popq_cfi_reg rsi; \
popq_cfi_reg rdi
popq %r11; \
popq %r10; \
popq %r9; \
popq %r8; \
popq %rcx; \
popq %rsi; \
popq %rdi
#endif
/* Fix up special calling conventions */
ENTRY(call_rwsem_down_read_failed)
CFI_STARTPROC
save_common_regs
__ASM_SIZE(push,_cfi_reg) __ASM_REG(dx)
__ASM_SIZE(push,) %__ASM_REG(dx)
movq %rax,%rdi
call rwsem_down_read_failed
__ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx)
__ASM_SIZE(pop,) %__ASM_REG(dx)
restore_common_regs
ret
CFI_ENDPROC
ENDPROC(call_rwsem_down_read_failed)
ENTRY(call_rwsem_down_write_failed)
CFI_STARTPROC
save_common_regs
movq %rax,%rdi
call rwsem_down_write_failed
restore_common_regs
ret
CFI_ENDPROC
ENDPROC(call_rwsem_down_write_failed)
ENTRY(call_rwsem_wake)
CFI_STARTPROC
/* do nothing if still outstanding active readers */
__ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx)
jnz 1f
......@@ -116,17 +110,14 @@ ENTRY(call_rwsem_wake)
call rwsem_wake
restore_common_regs
1: ret
CFI_ENDPROC
ENDPROC(call_rwsem_wake)
ENTRY(call_rwsem_downgrade_wake)
CFI_STARTPROC
save_common_regs
__ASM_SIZE(push,_cfi_reg) __ASM_REG(dx)
__ASM_SIZE(push,) %__ASM_REG(dx)
movq %rax,%rdi
call rwsem_downgrade_wake
__ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx)
__ASM_SIZE(pop,) %__ASM_REG(dx)
restore_common_regs
ret
CFI_ENDPROC
ENDPROC(call_rwsem_downgrade_wake)
......@@ -6,16 +6,14 @@
*/
#include <linux/linkage.h>
#include <asm/asm.h>
#include <asm/dwarf2.h>
/* put return address in eax (arg1) */
.macro THUNK name, func, put_ret_addr_in_eax=0
.globl \name
\name:
CFI_STARTPROC
pushl_cfi_reg eax
pushl_cfi_reg ecx
pushl_cfi_reg edx
pushl %eax
pushl %ecx
pushl %edx
.if \put_ret_addr_in_eax
/* Place EIP in the arg1 */
......@@ -23,11 +21,10 @@
.endif
call \func
popl_cfi_reg edx
popl_cfi_reg ecx
popl_cfi_reg eax
popl %edx
popl %ecx
popl %eax
ret
CFI_ENDPROC
_ASM_NOKPROBE(\name)
.endm
......
......@@ -6,7 +6,6 @@
* Subject to the GNU public license, v.2. No warranty of any kind.
*/
#include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/calling.h>
#include <asm/asm.h>
......@@ -14,27 +13,25 @@
.macro THUNK name, func, put_ret_addr_in_rdi=0
.globl \name
\name:
CFI_STARTPROC
/* this one pushes 9 elems, the next one would be %rIP */
pushq_cfi_reg rdi
pushq_cfi_reg rsi
pushq_cfi_reg rdx
pushq_cfi_reg rcx
pushq_cfi_reg rax
pushq_cfi_reg r8
pushq_cfi_reg r9
pushq_cfi_reg r10
pushq_cfi_reg r11
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %rax
pushq %r8
pushq %r9
pushq %r10
pushq %r11
.if \put_ret_addr_in_rdi
/* 9*8(%rsp) is return addr on stack */
movq_cfi_restore 9*8, rdi
movq 9*8(%rsp), %rdi
.endif
call \func
jmp restore
CFI_ENDPROC
_ASM_NOKPROBE(\name)
.endm
......@@ -57,19 +54,16 @@
#if defined(CONFIG_TRACE_IRQFLAGS) \
|| defined(CONFIG_DEBUG_LOCK_ALLOC) \
|| defined(CONFIG_PREEMPT)
CFI_STARTPROC
CFI_ADJUST_CFA_OFFSET 9*8
restore:
popq_cfi_reg r11
popq_cfi_reg r10
popq_cfi_reg r9
popq_cfi_reg r8
popq_cfi_reg rax
popq_cfi_reg rcx
popq_cfi_reg rdx
popq_cfi_reg rsi
popq_cfi_reg rdi
popq %r11
popq %r10
popq %r9
popq %r8
popq %rax
popq %rcx
popq %rdx
popq %rsi
popq %rdi
ret
CFI_ENDPROC
_ASM_NOKPROBE(restore)
#endif
......@@ -8,7 +8,6 @@
* of the License.
*/
#include <linux/linkage.h>
#include <asm/dwarf2.h>
/*
* Calling convention :
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册