提交 60a011c7 编写于 作者: I Ingo Molnar

Merge branch 'tracing/function-return-tracer' into tracing/fastboot

...@@ -29,6 +29,7 @@ config X86 ...@@ -29,6 +29,7 @@ config X86
select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_RET_TRACER if X86_32
select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
select HAVE_ARCH_KGDB if !X86_VOYAGER select HAVE_ARCH_KGDB if !X86_VOYAGER
......
...@@ -20,4 +20,30 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) ...@@ -20,4 +20,30 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FUNCTION_RET_TRACER
#define FTRACE_RET_STACK_SIZE 20
#ifndef __ASSEMBLY__
/*
* Stack of return addresses for functions
* of a thread.
* Used in struct thread_info
*/
struct ftrace_ret_stack {
unsigned long ret;
unsigned long func;
unsigned long long calltime;
};
/*
* Primary handler of a function return.
* It relays on ftrace_return_to_handler.
* Defined in entry32.S
*/
extern void return_to_handler(void);
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_FUNCTION_RET_TRACER */
#endif /* _ASM_X86_FTRACE_H */ #endif /* _ASM_X86_FTRACE_H */
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
struct task_struct; struct task_struct;
struct exec_domain; struct exec_domain;
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/ftrace.h>
struct thread_info { struct thread_info {
struct task_struct *task; /* main task structure */ struct task_struct *task; /* main task structure */
...@@ -38,8 +39,30 @@ struct thread_info { ...@@ -38,8 +39,30 @@ struct thread_info {
*/ */
__u8 supervisor_stack[0]; __u8 supervisor_stack[0];
#endif #endif
#ifdef CONFIG_FUNCTION_RET_TRACER
/* Index of current stored adress in ret_stack */
int curr_ret_stack;
/* Stack of return addresses for return function tracing */
struct ftrace_ret_stack ret_stack[FTRACE_RET_STACK_SIZE];
#endif
}; };
#ifdef CONFIG_FUNCTION_RET_TRACER
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
.exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = 1, \
.addr_limit = KERNEL_DS, \
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
.curr_ret_stack = -1,\
}
#else
#define INIT_THREAD_INFO(tsk) \ #define INIT_THREAD_INFO(tsk) \
{ \ { \
.task = &tsk, \ .task = &tsk, \
...@@ -52,6 +75,7 @@ struct thread_info { ...@@ -52,6 +75,7 @@ struct thread_info {
.fn = do_no_restart_syscall, \ .fn = do_no_restart_syscall, \
}, \ }, \
} }
#endif
#define init_thread_info (init_thread_union.thread_info) #define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack) #define init_stack (init_thread_union.stack)
......
...@@ -14,6 +14,11 @@ CFLAGS_REMOVE_paravirt-spinlocks.o = -pg ...@@ -14,6 +14,11 @@ CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
CFLAGS_REMOVE_ftrace.o = -pg CFLAGS_REMOVE_ftrace.o = -pg
endif endif
ifdef CONFIG_FUNCTION_RET_TRACER
# Don't trace __switch_to() but let it for function tracer
CFLAGS_REMOVE_process_32.o = -pg
endif
# #
# vsyscalls (which work on the user stack) should have # vsyscalls (which work on the user stack) should have
# no stack-protector checks: # no stack-protector checks:
...@@ -65,6 +70,7 @@ obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o ...@@ -65,6 +70,7 @@ obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
obj-$(CONFIG_X86_IO_APIC) += io_apic.o obj-$(CONFIG_X86_IO_APIC) += io_apic.o
obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_RET_TRACER) += ftrace.o
obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
......
...@@ -1188,6 +1188,10 @@ ENTRY(mcount) ...@@ -1188,6 +1188,10 @@ ENTRY(mcount)
cmpl $ftrace_stub, ftrace_trace_function cmpl $ftrace_stub, ftrace_trace_function
jnz trace jnz trace
#ifdef CONFIG_FUNCTION_RET_TRACER
cmpl $ftrace_stub, ftrace_function_return
jnz trace_return
#endif
.globl ftrace_stub .globl ftrace_stub
ftrace_stub: ftrace_stub:
ret ret
...@@ -1206,8 +1210,37 @@ trace: ...@@ -1206,8 +1210,37 @@ trace:
popl %edx popl %edx
popl %ecx popl %ecx
popl %eax popl %eax
jmp ftrace_stub
#ifdef CONFIG_FUNCTION_RET_TRACER
trace_return:
pushl %eax
pushl %ecx
pushl %edx
movl 0xc(%esp), %eax
pushl %eax
lea 0x4(%ebp), %eax
pushl %eax
call prepare_ftrace_return
addl $8, %esp
popl %edx
popl %ecx
popl %eax
jmp ftrace_stub jmp ftrace_stub
.globl return_to_handler
return_to_handler:
pushl $0
pushl %eax
pushl %ecx
pushl %edx
call ftrace_return_to_handler
movl %eax, 0xc(%esp)
popl %edx
popl %ecx
popl %eax
ret
#endif /* CONFIG_FUNCTION_RET_TRACER */
END(mcount) END(mcount)
#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
......
...@@ -14,14 +14,178 @@ ...@@ -14,14 +14,178 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/list.h> #include <linux/list.h>
#include <asm/ftrace.h> #include <asm/ftrace.h>
#include <linux/ftrace.h>
#include <asm/nops.h> #include <asm/nops.h>
#include <asm/nmi.h>
static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
#ifdef CONFIG_FUNCTION_RET_TRACER
/*
* These functions are picked from those used on
* this page for dynamic ftrace. They have been
* simplified to ignore all traces in NMI context.
*/
static atomic_t in_nmi;
void ftrace_nmi_enter(void)
{
atomic_inc(&in_nmi);
}
void ftrace_nmi_exit(void)
{
atomic_dec(&in_nmi);
}
/*
* Synchronize accesses to return adresses stack with
* interrupts.
*/
static raw_spinlock_t ret_stack_lock;
/* Add a function return address to the trace stack on thread info.*/
static int push_return_trace(unsigned long ret, unsigned long long time,
unsigned long func)
{
int index;
struct thread_info *ti;
unsigned long flags;
int err = 0;
raw_local_irq_save(flags);
__raw_spin_lock(&ret_stack_lock);
ti = current_thread_info();
/* The return trace stack is full */
if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1) {
err = -EBUSY;
goto out;
}
index = ++ti->curr_ret_stack;
ti->ret_stack[index].ret = ret;
ti->ret_stack[index].func = func;
ti->ret_stack[index].calltime = time;
out:
__raw_spin_unlock(&ret_stack_lock);
raw_local_irq_restore(flags);
return err;
}
/* Retrieve a function return address to the trace stack on thread info.*/
static void pop_return_trace(unsigned long *ret, unsigned long long *time,
unsigned long *func)
{
struct thread_info *ti;
int index;
unsigned long flags;
raw_local_irq_save(flags);
__raw_spin_lock(&ret_stack_lock);
ti = current_thread_info();
index = ti->curr_ret_stack;
*ret = ti->ret_stack[index].ret;
*func = ti->ret_stack[index].func;
*time = ti->ret_stack[index].calltime;
ti->curr_ret_stack--;
__raw_spin_unlock(&ret_stack_lock);
raw_local_irq_restore(flags);
}
/*
* Send the trace to the ring-buffer.
* @return the original return address.
*/
unsigned long ftrace_return_to_handler(void)
{
struct ftrace_retfunc trace;
pop_return_trace(&trace.ret, &trace.calltime, &trace.func);
trace.rettime = cpu_clock(raw_smp_processor_id());
ftrace_function_return(&trace);
return trace.ret;
}
/*
* Hook the return address and push it in the stack of return addrs
* in current thread info.
*/
asmlinkage
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
{
unsigned long old;
unsigned long long calltime;
int faulted;
unsigned long return_hooker = (unsigned long)
&return_to_handler;
/* Nmi's are currently unsupported */
if (atomic_read(&in_nmi))
return;
/*
* Protect against fault, even if it shouldn't
* happen. This tool is too much intrusive to
* ignore such a protection.
*/
asm volatile(
"1: movl (%[parent_old]), %[old]\n"
"2: movl %[return_hooker], (%[parent_replaced])\n"
" movl $0, %[faulted]\n"
".section .fixup, \"ax\"\n"
"3: movl $1, %[faulted]\n"
".previous\n"
".section __ex_table, \"a\"\n"
" .long 1b, 3b\n"
" .long 2b, 3b\n"
".previous\n"
: [parent_replaced] "=r" (parent), [old] "=r" (old),
[faulted] "=r" (faulted)
: [parent_old] "0" (parent), [return_hooker] "r" (return_hooker)
: "memory"
);
if (WARN_ON(faulted)) {
unregister_ftrace_return();
return;
}
if (WARN_ON(!__kernel_text_address(old))) {
unregister_ftrace_return();
*parent = old;
return;
}
calltime = cpu_clock(raw_smp_processor_id());
if (push_return_trace(old, calltime, self_addr) == -EBUSY)
*parent = old;
}
static int __init init_ftrace_function_return(void)
{
ret_stack_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
return 0;
}
device_initcall(init_ftrace_function_return);
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
union ftrace_code_union { union ftrace_code_union {
char code[MCOUNT_INSN_SIZE]; char code[MCOUNT_INSN_SIZE];
...@@ -31,17 +195,11 @@ union ftrace_code_union { ...@@ -31,17 +195,11 @@ union ftrace_code_union {
} __attribute__((packed)); } __attribute__((packed));
}; };
static int ftrace_calc_offset(long ip, long addr) static int ftrace_calc_offset(long ip, long addr)
{ {
return (int)(addr - ip); return (int)(addr - ip);
} }
unsigned char *ftrace_nop_replace(void)
{
return ftrace_nop;
}
unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
{ {
static union ftrace_code_union calc; static union ftrace_code_union calc;
...@@ -183,6 +341,15 @@ do_ftrace_mod_code(unsigned long ip, void *new_code) ...@@ -183,6 +341,15 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
} }
static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
unsigned char *ftrace_nop_replace(void)
{
return ftrace_nop;
}
int int
ftrace_modify_code(unsigned long ip, unsigned char *old_code, ftrace_modify_code(unsigned long ip, unsigned char *old_code,
unsigned char *new_code) unsigned char *new_code)
...@@ -292,3 +459,4 @@ int __init ftrace_dyn_arch_init(void *data) ...@@ -292,3 +459,4 @@ int __init ftrace_dyn_arch_init(void *data)
return 0; return 0;
} }
#endif
...@@ -267,6 +267,26 @@ ftrace_init_module(unsigned long *start, unsigned long *end) { } ...@@ -267,6 +267,26 @@ ftrace_init_module(unsigned long *start, unsigned long *end) { }
#endif #endif
/*
* Structure that defines a return function trace.
*/
struct ftrace_retfunc {
unsigned long ret; /* Return address */
unsigned long func; /* Current function */
unsigned long long calltime;
unsigned long long rettime;
};
#ifdef CONFIG_FUNCTION_RET_TRACER
/* Type of a callback handler of tracing return function */
typedef void (*trace_function_return_t)(struct ftrace_retfunc *);
extern void register_ftrace_return(trace_function_return_t func);
/* The current handler in use */
extern trace_function_return_t ftrace_function_return;
extern void unregister_ftrace_return(void);
#endif
/* /*
* Structure which defines the trace of an initcall. * Structure which defines the trace of an initcall.
* You don't have to fill the func field since it is * You don't have to fill the func field since it is
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#define _LINUX_FTRACE_IRQ_H #define _LINUX_FTRACE_IRQ_H
#ifdef CONFIG_DYNAMIC_FTRACE #if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_RET_TRACER)
extern void ftrace_nmi_enter(void); extern void ftrace_nmi_enter(void);
extern void ftrace_nmi_exit(void); extern void ftrace_nmi_exit(void);
#else #else
......
...@@ -2006,6 +2006,17 @@ static inline void setup_thread_stack(struct task_struct *p, struct task_struct ...@@ -2006,6 +2006,17 @@ static inline void setup_thread_stack(struct task_struct *p, struct task_struct
{ {
*task_thread_info(p) = *task_thread_info(org); *task_thread_info(p) = *task_thread_info(org);
task_thread_info(p)->task = p; task_thread_info(p)->task = p;
#ifdef CONFIG_FUNCTION_RET_TRACER
/*
* When fork() creates a child process, this function is called.
* But the child task may not inherit the return adresses traced
* by the return function tracer because it will directly execute
* in userspace and will not return to kernel functions its parent
* used.
*/
task_thread_info(p)->curr_ret_stack = -1;
#endif
} }
static inline unsigned long *end_of_stack(struct task_struct *p) static inline unsigned long *end_of_stack(struct task_struct *p)
......
...@@ -23,6 +23,10 @@ CFLAGS_REMOVE_cgroup-debug.o = -pg ...@@ -23,6 +23,10 @@ CFLAGS_REMOVE_cgroup-debug.o = -pg
CFLAGS_REMOVE_sched_clock.o = -pg CFLAGS_REMOVE_sched_clock.o = -pg
CFLAGS_REMOVE_sched.o = -mno-spe -pg CFLAGS_REMOVE_sched.o = -mno-spe -pg
endif endif
ifdef CONFIG_FUNCTION_RET_TRACER
CFLAGS_REMOVE_extable.o = -pg # For __kernel_text_address()
CFLAGS_REMOVE_module.o = -pg # For __module_text_address()
endif
obj-$(CONFIG_FREEZER) += freezer.o obj-$(CONFIG_FREEZER) += freezer.o
obj-$(CONFIG_PROFILING) += profile.o obj-$(CONFIG_PROFILING) += profile.o
......
...@@ -9,6 +9,9 @@ config NOP_TRACER ...@@ -9,6 +9,9 @@ config NOP_TRACER
config HAVE_FUNCTION_TRACER config HAVE_FUNCTION_TRACER
bool bool
config HAVE_FUNCTION_RET_TRACER
bool
config HAVE_FUNCTION_TRACE_MCOUNT_TEST config HAVE_FUNCTION_TRACE_MCOUNT_TEST
bool bool
help help
...@@ -54,6 +57,17 @@ config FUNCTION_TRACER ...@@ -54,6 +57,17 @@ config FUNCTION_TRACER
(the bootup default), then the overhead of the instructions is very (the bootup default), then the overhead of the instructions is very
small and not measurable even in micro-benchmarks. small and not measurable even in micro-benchmarks.
config FUNCTION_RET_TRACER
bool "Kernel Function return Tracer"
depends on !DYNAMIC_FTRACE
depends on HAVE_FUNCTION_RET_TRACER
depends on FUNCTION_TRACER
help
Enable the kernel to trace a function at its return.
It's first purpose is to trace the duration of functions.
This is done by setting the current return address on the thread
info structure of the current task.
config IRQSOFF_TRACER config IRQSOFF_TRACER
bool "Interrupts-off Latency Tracer" bool "Interrupts-off Latency Tracer"
default n default n
......
...@@ -24,5 +24,6 @@ obj-$(CONFIG_NOP_TRACER) += trace_nop.o ...@@ -24,5 +24,6 @@ obj-$(CONFIG_NOP_TRACER) += trace_nop.o
obj-$(CONFIG_STACK_TRACER) += trace_stack.o obj-$(CONFIG_STACK_TRACER) += trace_stack.o
obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
obj-$(CONFIG_BOOT_TRACER) += trace_boot.o obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
obj-$(CONFIG_FUNCTION_RET_TRACER) += trace_functions_return.o
libftrace-y := ftrace.o libftrace-y := ftrace.o
...@@ -1480,3 +1480,19 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, ...@@ -1480,3 +1480,19 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
return ret; return ret;
} }
#ifdef CONFIG_FUNCTION_RET_TRACER
trace_function_return_t ftrace_function_return =
(trace_function_return_t)ftrace_stub;
void register_ftrace_return(trace_function_return_t func)
{
ftrace_function_return = func;
}
void unregister_ftrace_return(void)
{
ftrace_function_return = (trace_function_return_t)ftrace_stub;
}
#endif
...@@ -244,13 +244,6 @@ unsigned long nsecs_to_usecs(unsigned long nsecs) ...@@ -244,13 +244,6 @@ unsigned long nsecs_to_usecs(unsigned long nsecs)
return nsecs / 1000; return nsecs / 1000;
} }
/*
* TRACE_ITER_SYM_MASK masks the options in trace_flags that
* control the output of kernel symbols.
*/
#define TRACE_ITER_SYM_MASK \
(TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
/* These must match the bit postions in trace_iterator_flags */ /* These must match the bit postions in trace_iterator_flags */
static const char *trace_options[] = { static const char *trace_options[] = {
"print-parent", "print-parent",
...@@ -810,6 +803,35 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data, ...@@ -810,6 +803,35 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
ring_buffer_unlock_commit(tr->buffer, event, irq_flags); ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
} }
#ifdef CONFIG_FUNCTION_RET_TRACER
static void __trace_function_return(struct trace_array *tr,
struct trace_array_cpu *data,
struct ftrace_retfunc *trace,
unsigned long flags,
int pc)
{
struct ring_buffer_event *event;
struct ftrace_ret_entry *entry;
unsigned long irq_flags;
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
return;
event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
&irq_flags);
if (!event)
return;
entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, flags, pc);
entry->ent.type = TRACE_FN_RET;
entry->ip = trace->func;
entry->parent_ip = trace->ret;
entry->rettime = trace->rettime;
entry->calltime = trace->calltime;
ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
}
#endif
void void
ftrace(struct trace_array *tr, struct trace_array_cpu *data, ftrace(struct trace_array *tr, struct trace_array_cpu *data,
unsigned long ip, unsigned long parent_ip, unsigned long flags, unsigned long ip, unsigned long parent_ip, unsigned long flags,
...@@ -1038,6 +1060,29 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) ...@@ -1038,6 +1060,29 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
raw_local_irq_restore(flags); raw_local_irq_restore(flags);
} }
#ifdef CONFIG_FUNCTION_RET_TRACER
void trace_function_return(struct ftrace_retfunc *trace)
{
struct trace_array *tr = &global_trace;
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
int cpu;
int pc;
raw_local_irq_save(flags);
cpu = raw_smp_processor_id();
data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
pc = preempt_count();
__trace_function_return(tr, data, trace, flags, pc);
}
atomic_dec(&data->disabled);
raw_local_irq_restore(flags);
}
#endif /* CONFIG_FUNCTION_RET_TRACER */
static struct ftrace_ops trace_ops __read_mostly = static struct ftrace_ops trace_ops __read_mostly =
{ {
.func = function_trace_call, .func = function_trace_call,
...@@ -1285,7 +1330,7 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt, ...@@ -1285,7 +1330,7 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt,
# define IP_FMT "%016lx" # define IP_FMT "%016lx"
#endif #endif
static int int
seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
{ {
int ret; int ret;
...@@ -1738,6 +1783,10 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) ...@@ -1738,6 +1783,10 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
trace_seq_print_cont(s, iter); trace_seq_print_cont(s, iter);
break; break;
} }
case TRACE_FN_RET: {
return print_return_function(iter);
break;
}
} }
return TRACE_TYPE_HANDLED; return TRACE_TYPE_HANDLED;
} }
......
...@@ -22,6 +22,7 @@ enum trace_type { ...@@ -22,6 +22,7 @@ enum trace_type {
TRACE_MMIO_RW, TRACE_MMIO_RW,
TRACE_MMIO_MAP, TRACE_MMIO_MAP,
TRACE_BOOT, TRACE_BOOT,
TRACE_FN_RET,
__TRACE_LAST_TYPE __TRACE_LAST_TYPE
}; };
...@@ -48,6 +49,15 @@ struct ftrace_entry { ...@@ -48,6 +49,15 @@ struct ftrace_entry {
unsigned long ip; unsigned long ip;
unsigned long parent_ip; unsigned long parent_ip;
}; };
/* Function return entry */
struct ftrace_ret_entry {
struct trace_entry ent;
unsigned long ip;
unsigned long parent_ip;
unsigned long long calltime;
unsigned long long rettime;
};
extern struct tracer boot_tracer; extern struct tracer boot_tracer;
/* /*
...@@ -218,6 +228,7 @@ extern void __ftrace_bad_type(void); ...@@ -218,6 +228,7 @@ extern void __ftrace_bad_type(void);
IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
TRACE_MMIO_MAP); \ TRACE_MMIO_MAP); \
IF_ASSIGN(var, ent, struct trace_boot, TRACE_BOOT); \ IF_ASSIGN(var, ent, struct trace_boot, TRACE_BOOT); \
IF_ASSIGN(var, ent, struct ftrace_ret_entry, TRACE_FN_RET); \
__ftrace_bad_type(); \ __ftrace_bad_type(); \
} while (0) } while (0)
...@@ -321,6 +332,8 @@ void trace_function(struct trace_array *tr, ...@@ -321,6 +332,8 @@ void trace_function(struct trace_array *tr,
unsigned long ip, unsigned long ip,
unsigned long parent_ip, unsigned long parent_ip,
unsigned long flags, int pc); unsigned long flags, int pc);
void
trace_function_return(struct ftrace_retfunc *trace);
void tracing_start_cmdline_record(void); void tracing_start_cmdline_record(void);
void tracing_stop_cmdline_record(void); void tracing_stop_cmdline_record(void);
...@@ -393,6 +406,10 @@ extern void *head_page(struct trace_array_cpu *data); ...@@ -393,6 +406,10 @@ extern void *head_page(struct trace_array_cpu *data);
extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
extern void trace_seq_print_cont(struct trace_seq *s, extern void trace_seq_print_cont(struct trace_seq *s,
struct trace_iterator *iter); struct trace_iterator *iter);
extern int
seq_print_ip_sym(struct trace_seq *s, unsigned long ip,
unsigned long sym_flags);
extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
size_t cnt); size_t cnt);
extern long ns2usecs(cycle_t nsec); extern long ns2usecs(cycle_t nsec);
...@@ -400,6 +417,17 @@ extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args); ...@@ -400,6 +417,17 @@ extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args);
extern unsigned long trace_flags; extern unsigned long trace_flags;
/* Standard output formatting function used for function return traces */
#ifdef CONFIG_FUNCTION_RET_TRACER
extern enum print_line_t print_return_function(struct trace_iterator *iter);
#else
static inline enum print_line_t
print_return_function(struct trace_iterator *iter)
{
return TRACE_TYPE_UNHANDLED;
}
#endif
/* /*
* trace_iterator_flags is an enumeration that defines bit * trace_iterator_flags is an enumeration that defines bit
* positions into trace_flags that controls the output. * positions into trace_flags that controls the output.
...@@ -422,6 +450,13 @@ enum trace_iterator_flags { ...@@ -422,6 +450,13 @@ enum trace_iterator_flags {
TRACE_ITER_PREEMPTONLY = 0x800, TRACE_ITER_PREEMPTONLY = 0x800,
}; };
/*
* TRACE_ITER_SYM_MASK masks the options in trace_flags that
* control the output of kernel symbols.
*/
#define TRACE_ITER_SYM_MASK \
(TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
extern struct tracer nop_trace; extern struct tracer nop_trace;
/** /**
......
/*
*
* Function return tracer.
* Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
* Mostly borrowed from function tracer which
* is Copyright (c) Steven Rostedt <srostedt@redhat.com>
*
*/
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/fs.h>
#include "trace.h"
static void start_return_trace(struct trace_array *tr)
{
register_ftrace_return(&trace_function_return);
}
static void stop_return_trace(struct trace_array *tr)
{
unregister_ftrace_return();
}
static void return_trace_init(struct trace_array *tr)
{
int cpu;
for_each_online_cpu(cpu)
tracing_reset(tr, cpu);
start_return_trace(tr);
}
static void return_trace_reset(struct trace_array *tr)
{
stop_return_trace(tr);
}
enum print_line_t
print_return_function(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
struct trace_entry *entry = iter->ent;
struct ftrace_ret_entry *field;
int ret;
if (entry->type == TRACE_FN_RET) {
trace_assign_type(field, entry);
ret = trace_seq_printf(s, "%pF -> ", (void *)field->parent_ip);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
ret = seq_print_ip_sym(s, field->ip,
trace_flags & TRACE_ITER_SYM_MASK);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
ret = trace_seq_printf(s, " (%llu ns)\n",
field->rettime - field->calltime);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
else
return TRACE_TYPE_HANDLED;
}
return TRACE_TYPE_UNHANDLED;
}
static struct tracer return_trace __read_mostly =
{
.name = "return",
.init = return_trace_init,
.reset = return_trace_reset,
.print_line = print_return_function
};
static __init int init_return_trace(void)
{
return register_tracer(&return_trace);
}
device_initcall(init_return_trace);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册