提交 53255c9a 编写于 作者: H Heiko Carstens 提交者: Martin Schwidefsky

s390/ftrace: remove 31 bit ftrace support

31 bit and 64 bit diverge more and more and it is rather painful
to keep both parts running.
To make things simpler just remove the 31 bit support which nobody
uses anyway.
Signed-off-by: NHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: NMartin Schwidefsky <schwidefsky@de.ibm.com>
上级 a62bc073
...@@ -117,11 +117,11 @@ config S390 ...@@ -117,11 +117,11 @@ config S390
select HAVE_CMPXCHG_LOCAL select HAVE_CMPXCHG_LOCAL
select HAVE_C_RECORDMCOUNT select HAVE_C_RECORDMCOUNT
select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_KMEMLEAK
select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE if 64BIT
select HAVE_DYNAMIC_FTRACE_WITH_REGS if 64BIT select HAVE_DYNAMIC_FTRACE_WITH_REGS if 64BIT
select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_TRACER if 64BIT
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER if 64BIT
select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP select HAVE_KERNEL_GZIP
......
...@@ -18,14 +18,8 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) ...@@ -18,14 +18,8 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#ifdef CONFIG_64BIT
#define MCOUNT_INSN_SIZE 18 #define MCOUNT_INSN_SIZE 18
#else
#define MCOUNT_INSN_SIZE 22
#endif
#ifdef CONFIG_64BIT
#define ARCH_SUPPORTS_FTRACE_OPS 1 #define ARCH_SUPPORTS_FTRACE_OPS 1
#endif
#endif /* _ASM_S390_FTRACE_H */ #endif /* _ASM_S390_FTRACE_H */
...@@ -52,8 +52,7 @@ obj-$(CONFIG_COMPAT) += compat_wrapper.o $(compat-obj-y) ...@@ -52,8 +52,7 @@ obj-$(CONFIG_COMPAT) += compat_wrapper.o $(compat-obj-y)
obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o) obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_UPROBES) += uprobes.o obj-$(CONFIG_UPROBES) += uprobes.o
......
...@@ -21,9 +21,8 @@ void mcount_replace_code(void); ...@@ -21,9 +21,8 @@ void mcount_replace_code(void);
void ftrace_disable_code(void); void ftrace_disable_code(void);
void ftrace_enable_insn(void); void ftrace_enable_insn(void);
#ifdef CONFIG_64BIT
/* /*
* The 64-bit mcount code looks like this: * The mcount code looks like this:
* stg %r14,8(%r15) # offset 0 * stg %r14,8(%r15) # offset 0
* larl %r1,<&counter> # offset 6 * larl %r1,<&counter> # offset 6
* brasl %r14,_mcount # offset 12 * brasl %r14,_mcount # offset 12
...@@ -34,7 +33,7 @@ void ftrace_enable_insn(void); ...@@ -34,7 +33,7 @@ void ftrace_enable_insn(void);
* Note: we do not patch the first instruction to an unconditional branch, * Note: we do not patch the first instruction to an unconditional branch,
* since that would break kprobes/jprobes. It is easier to leave the larl * since that would break kprobes/jprobes. It is easier to leave the larl
* instruction in and only modify the second instruction. * instruction in and only modify the second instruction.
* The 64-bit enabled ftrace code block looks like this: * The enabled ftrace code block looks like this:
* larl %r0,.+24 # offset 0 * larl %r0,.+24 # offset 0
* > lg %r1,__LC_FTRACE_FUNC # offset 6 * > lg %r1,__LC_FTRACE_FUNC # offset 6
* br %r1 # offset 12 * br %r1 # offset 12
...@@ -71,65 +70,15 @@ asm( ...@@ -71,65 +70,15 @@ asm(
#define MCOUNT_INSN_OFFSET 6 #define MCOUNT_INSN_OFFSET 6
#define FTRACE_INSN_SIZE 6 #define FTRACE_INSN_SIZE 6
#else /* CONFIG_64BIT */
/*
* The 31-bit mcount code looks like this:
* st %r14,4(%r15) # offset 0
* > bras %r1,0f # offset 4
* > .long _mcount # offset 8
* > .long <&counter> # offset 12
* > 0: l %r14,0(%r1) # offset 16
* > l %r1,4(%r1) # offset 20
* basr %r14,%r14 # offset 24
* l %r14,4(%r15) # offset 26
* Total length is 30 bytes. The twenty bytes starting from offset 4
* to offset 24 get overwritten by ftrace_make_nop / ftrace_make_call.
* The 31-bit enabled ftrace code block looks like this:
* st %r14,4(%r15) # offset 0
* > l %r14,__LC_FTRACE_FUNC # offset 4
* > j 0f # offset 8
* > .fill 12,1,0x07 # offset 12
* 0: basr %r14,%r14 # offset 24
* l %r14,4(%r14) # offset 26
* The return points of the mcount/ftrace function have the same offset 26.
* The 31-bit disabled ftrace code block looks like this:
* st %r14,4(%r15) # offset 0
* > j .+26 # offset 4
* > j 0f # offset 8
* > .fill 12,1,0x07 # offset 12
* 0: basr %r14,%r14 # offset 24
* l %r14,4(%r14) # offset 26
* The j instruction branches to offset 30 to skip as many instructions
* as possible.
*/
asm(
" .align 4\n"
"ftrace_disable_code:\n"
" j 1f\n"
" j 0f\n"
" .fill 12,1,0x07\n"
"0: basr %r14,%r14\n"
"1:\n"
" .align 4\n"
"ftrace_enable_insn:\n"
" l %r14,"__stringify(__LC_FTRACE_FUNC)"\n");
#define FTRACE_INSN_SIZE 4
#endif /* CONFIG_64BIT */
#ifdef CONFIG_64BIT
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr) unsigned long addr)
{ {
return 0; return 0;
} }
#endif
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr) unsigned long addr)
{ {
#ifdef CONFIG_64BIT
/* Initial replacement of the whole mcount block */ /* Initial replacement of the whole mcount block */
if (addr == MCOUNT_ADDR) { if (addr == MCOUNT_ADDR) {
if (probe_kernel_write((void *) rec->ip - MCOUNT_INSN_OFFSET, if (probe_kernel_write((void *) rec->ip - MCOUNT_INSN_OFFSET,
...@@ -138,7 +87,6 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, ...@@ -138,7 +87,6 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
return -EPERM; return -EPERM;
return 0; return 0;
} }
#endif
if (probe_kernel_write((void *) rec->ip, ftrace_disable_code, if (probe_kernel_write((void *) rec->ip, ftrace_disable_code,
MCOUNT_INSN_SIZE)) MCOUNT_INSN_SIZE))
return -EPERM; return -EPERM;
...@@ -196,8 +144,6 @@ unsigned long __kprobes prepare_ftrace_return(unsigned long parent, ...@@ -196,8 +144,6 @@ unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
* the original offset to prepare_ftrace_return and put it back. * the original offset to prepare_ftrace_return and put it back.
*/ */
#ifdef CONFIG_64BIT
int ftrace_enable_ftrace_graph_caller(void) int ftrace_enable_ftrace_graph_caller(void)
{ {
static unsigned short offset = 0x0002; static unsigned short offset = 0x0002;
...@@ -216,25 +162,4 @@ int ftrace_disable_ftrace_graph_caller(void) ...@@ -216,25 +162,4 @@ int ftrace_disable_ftrace_graph_caller(void)
&offset, sizeof(offset)); &offset, sizeof(offset));
} }
#else /* CONFIG_64BIT */
int ftrace_enable_ftrace_graph_caller(void)
{
unsigned short offset;
offset = ((void *) prepare_ftrace_return -
(void *) ftrace_graph_caller) / 2;
return probe_kernel_write((void *) ftrace_graph_caller + 2,
&offset, sizeof(offset));
}
int ftrace_disable_ftrace_graph_caller(void)
{
static unsigned short offset = 0x0002;
return probe_kernel_write((void *) ftrace_graph_caller + 2,
&offset, sizeof(offset));
}
#endif /* CONFIG_64BIT */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
...@@ -8,60 +8,73 @@ ...@@ -8,60 +8,73 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/ftrace.h> #include <asm/ftrace.h>
#include <asm/ptrace.h>
.section .kprobes.text, "ax" .section .kprobes.text, "ax"
ENTRY(ftrace_stub) ENTRY(ftrace_stub)
br %r14 br %r14
#define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
#define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS)
#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
ENTRY(_mcount) ENTRY(_mcount)
br %r14 br %r14
ENTRY(ftrace_caller) ENTRY(ftrace_caller)
stm %r2,%r5,16(%r15) .globl ftrace_regs_caller
bras %r1,1f .set ftrace_regs_caller,ftrace_caller
0: .long ftrace_trace_function lgr %r1,%r15
1: st %r14,56(%r15) aghi %r15,-STACK_FRAME_SIZE
lr %r0,%r15 stg %r1,__SF_BACKCHAIN(%r15)
ahi %r15,-96 stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
l %r3,100(%r15) stg %r0,(STACK_PTREGS_PSW+8)(%r15)
la %r2,0(%r14) stmg %r2,%r14,(STACK_PTREGS_GPRS+2*8)(%r15)
st %r0,__SF_BACKCHAIN(%r15) #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
la %r3,0(%r3) aghik %r2,%r0,-MCOUNT_INSN_SIZE
ahi %r2,-MCOUNT_INSN_SIZE lgrl %r4,function_trace_op
l %r14,0b-0b(%r1) lgrl %r1,ftrace_trace_function
l %r14,0(%r14) #else
basr %r14,%r14 lgr %r2,%r0
aghi %r2,-MCOUNT_INSN_SIZE
larl %r4,function_trace_op
lg %r4,0(%r4)
larl %r1,ftrace_trace_function
lg %r1,0(%r1)
#endif
lgr %r3,%r14
la %r5,STACK_PTREGS(%r15)
basr %r14,%r1
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
l %r2,100(%r15) # The j instruction gets runtime patched to a nop instruction.
l %r3,152(%r15)
ENTRY(ftrace_graph_caller)
# The bras instruction gets runtime patched to call prepare_ftrace_return.
# See ftrace_enable_ftrace_graph_caller. The patched instruction is: # See ftrace_enable_ftrace_graph_caller. The patched instruction is:
# bras %r14,prepare_ftrace_return # j .+4
bras %r14,0f ENTRY(ftrace_graph_caller)
0: st %r2,100(%r15) j ftrace_graph_caller_end
lg %r2,(STACK_PTREGS_GPRS+14*8)(%r15)
lg %r3,(STACK_PTREGS_PSW+8)(%r15)
brasl %r14,prepare_ftrace_return
stg %r2,(STACK_PTREGS_GPRS+14*8)(%r15)
ftrace_graph_caller_end:
.globl ftrace_graph_caller_end
#endif #endif
ahi %r15,96 lg %r1,(STACK_PTREGS_PSW+8)(%r15)
l %r14,56(%r15) lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
lm %r2,%r5,16(%r15) br %r1
br %r14
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(return_to_handler) ENTRY(return_to_handler)
stm %r2,%r5,16(%r15) stmg %r2,%r5,32(%r15)
st %r14,56(%r15) lgr %r1,%r15
lr %r0,%r15 aghi %r15,-STACK_FRAME_OVERHEAD
ahi %r15,-96 stg %r1,__SF_BACKCHAIN(%r15)
st %r0,__SF_BACKCHAIN(%r15) brasl %r14,ftrace_return_to_handler
bras %r1,0f aghi %r15,STACK_FRAME_OVERHEAD
.long ftrace_return_to_handler lgr %r14,%r2
0: l %r2,0b-0b(%r1) lmg %r2,%r5,32(%r15)
basr %r14,%r2
lr %r14,%r2
ahi %r15,96
lm %r2,%r5,16(%r15)
br %r14 br %r14
#endif #endif
/*
* Copyright IBM Corp. 2008, 2009
*
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
*
*/
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/ftrace.h>
#include <asm/ptrace.h>
.section .kprobes.text, "ax"
ENTRY(ftrace_stub)
br %r14
#define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
#define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS)
#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
ENTRY(_mcount)
br %r14
ENTRY(ftrace_caller)
.globl ftrace_regs_caller
.set ftrace_regs_caller,ftrace_caller
lgr %r1,%r15
aghi %r15,-STACK_FRAME_SIZE
stg %r1,__SF_BACKCHAIN(%r15)
stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
stg %r0,(STACK_PTREGS_PSW+8)(%r15)
stmg %r2,%r14,(STACK_PTREGS_GPRS+2*8)(%r15)
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
aghik %r2,%r0,-MCOUNT_INSN_SIZE
lgrl %r4,function_trace_op
lgrl %r1,ftrace_trace_function
#else
lgr %r2,%r0
aghi %r2,-MCOUNT_INSN_SIZE
larl %r4,function_trace_op
lg %r4,0(%r4)
larl %r1,ftrace_trace_function
lg %r1,0(%r1)
#endif
lgr %r3,%r14
la %r5,STACK_PTREGS(%r15)
basr %r14,%r1
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
# The j instruction gets runtime patched to a nop instruction.
# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
# j .+4
ENTRY(ftrace_graph_caller)
j ftrace_graph_caller_end
lg %r2,(STACK_PTREGS_GPRS+14*8)(%r15)
lg %r3,(STACK_PTREGS_PSW+8)(%r15)
brasl %r14,prepare_ftrace_return
stg %r2,(STACK_PTREGS_GPRS+14*8)(%r15)
ftrace_graph_caller_end:
.globl ftrace_graph_caller_end
#endif
lg %r1,(STACK_PTREGS_PSW+8)(%r15)
lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
br %r1
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(return_to_handler)
stmg %r2,%r5,32(%r15)
lgr %r1,%r15
aghi %r15,-STACK_FRAME_OVERHEAD
stg %r1,__SF_BACKCHAIN(%r15)
brasl %r14,ftrace_return_to_handler
aghi %r15,STACK_FRAME_OVERHEAD
lgr %r14,%r2
lmg %r2,%r5,32(%r15)
br %r14
#endif
...@@ -388,10 +388,6 @@ do_file(char const *const fname) ...@@ -388,10 +388,6 @@ do_file(char const *const fname)
"unrecognized ET_REL file: %s\n", fname); "unrecognized ET_REL file: %s\n", fname);
fail_file(); fail_file();
} }
if (w2(ehdr->e_machine) == EM_S390) {
reltype = R_390_32;
mcount_adjust_32 = -4;
}
if (w2(ehdr->e_machine) == EM_MIPS) { if (w2(ehdr->e_machine) == EM_MIPS) {
reltype = R_MIPS_32; reltype = R_MIPS_32;
is_fake_mcount32 = MIPS32_is_fake_mcount; is_fake_mcount32 = MIPS32_is_fake_mcount;
......
...@@ -241,13 +241,6 @@ if ($arch eq "x86_64") { ...@@ -241,13 +241,6 @@ if ($arch eq "x86_64") {
$objcopy .= " -O elf32-i386"; $objcopy .= " -O elf32-i386";
$cc .= " -m32"; $cc .= " -m32";
} elsif ($arch eq "s390" && $bits == 32) {
$mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_32\\s+_mcount\$";
$mcount_adjust = -4;
$alignment = 4;
$ld .= " -m elf_s390";
$cc .= " -m31";
} elsif ($arch eq "s390" && $bits == 64) { } elsif ($arch eq "s390" && $bits == 64) {
$mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$"; $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$";
$mcount_adjust = -8; $mcount_adjust = -8;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册