提交 e946217e 编写于 作者: L Linus Torvalds

Merge branch 'tracing-fixes-for-linus' of...

Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (31 commits)
  ftrace: fix current_tracer error return
  tracing: fix a build error on alpha
  ftrace: use a real variable for ftrace_nop in x86
  tracing/ftrace: make boot tracer select the sched_switch tracer
  tracepoint: check if the probe has been registered
  asm-generic: define DIE_OOPS in asm-generic
  trace: fix printk warning for u64
  ftrace: warning in kernel/trace/ftrace.c
  ftrace: fix build failure
  ftrace, powerpc, sparc64, x86: remove notrace from arch ftrace file
  ftrace: remove ftrace hash
  ftrace: remove mcount set
  ftrace: remove daemon
  ftrace: disable dynamic ftrace for all archs that use daemon
  ftrace: add ftrace warn on to disable ftrace
  ftrace: only have ftrace_kill atomic
  ftrace: use probe_kernel
  ftrace: comment arch ftrace code
  ftrace: return error on failed modified text.
  ftrace: dynamic ftrace process only text section
  ...
......@@ -536,7 +536,7 @@ KBUILD_CFLAGS += -g
KBUILD_AFLAGS += -gdwarf-2
endif
ifdef CONFIG_FTRACE
ifdef CONFIG_FUNCTION_TRACER
KBUILD_CFLAGS += -pg
endif
......
......@@ -16,8 +16,7 @@ config ARM
select HAVE_ARCH_KGDB
select HAVE_KPROBES if (!XIP_KERNEL)
select HAVE_KRETPROBES if (HAVE_KPROBES)
select HAVE_FTRACE if (!XIP_KERNEL)
select HAVE_DYNAMIC_FTRACE if (HAVE_FTRACE)
select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
select HAVE_GENERIC_DMA_COHERENT
help
The ARM series is a line of low-power-consumption RISC chip designs
......
......@@ -70,7 +70,7 @@ SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/
targets := vmlinux vmlinux.lds piggy.gz piggy.o font.o font.c \
head.o misc.o $(OBJS)
ifeq ($(CONFIG_FTRACE),y)
ifeq ($(CONFIG_FUNCTION_TRACER),y)
ORIG_CFLAGS := $(KBUILD_CFLAGS)
KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
endif
......
#ifndef _ASM_ARM_FTRACE
#define _ASM_ARM_FTRACE
#ifdef CONFIG_FTRACE
#ifdef CONFIG_FUNCTION_TRACER
#define MCOUNT_ADDR ((long)(mcount))
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
......
......@@ -183,6 +183,6 @@ EXPORT_SYMBOL(_find_next_bit_be);
EXPORT_SYMBOL(copy_page);
#ifdef CONFIG_FTRACE
#ifdef CONFIG_FUNCTION_TRACER
EXPORT_SYMBOL(mcount);
#endif
......@@ -101,7 +101,7 @@ ENDPROC(ret_from_fork)
#undef CALL
#define CALL(x) .long x
#ifdef CONFIG_FTRACE
#ifdef CONFIG_FUNCTION_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(mcount)
stmdb sp!, {r0-r3, lr}
......@@ -149,7 +149,7 @@ trace:
ftrace_stub:
mov pc, lr
#endif /* CONFIG_FTRACE */
#endif /* CONFIG_FUNCTION_TRACER */
/*=============================================================================
* SWI handler
......
......@@ -95,19 +95,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
return ret;
}
int ftrace_mcount_set(unsigned long *data)
{
unsigned long pc, old;
unsigned long *addr = data;
unsigned char *new;
pc = (unsigned long)&mcount_call;
memcpy(&old, &mcount_call, MCOUNT_INSN_SIZE);
new = ftrace_call_replace(pc, *addr);
*addr = ftrace_modify_code(pc, (unsigned char *)&old, new);
return 0;
}
/* run from kstop_machine */
int __init ftrace_dyn_arch_init(void *data)
{
......
......@@ -108,8 +108,7 @@ config ARCH_NO_VIRT_TO_BUS
config PPC
bool
default y
select HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE
select HAVE_FUNCTION_TRACER
select ARCH_WANT_OPTIONAL_GPIOLIB
select HAVE_IDE
select HAVE_IOREMAP_PROT
......
......@@ -122,7 +122,7 @@ KBUILD_CFLAGS += -mcpu=powerpc
endif
# Work around a gcc code-gen bug with -fno-omit-frame-pointer.
ifeq ($(CONFIG_FTRACE),y)
ifeq ($(CONFIG_FUNCTION_TRACER),y)
KBUILD_CFLAGS += -mno-sched-epilog
endif
......
#ifndef _ASM_POWERPC_FTRACE
#define _ASM_POWERPC_FTRACE
#ifdef CONFIG_FTRACE
#ifdef CONFIG_FUNCTION_TRACER
#define MCOUNT_ADDR ((long)(_mcount))
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
......
......@@ -12,7 +12,7 @@ CFLAGS_prom_init.o += -fPIC
CFLAGS_btext.o += -fPIC
endif
ifdef CONFIG_FTRACE
ifdef CONFIG_FUNCTION_TRACER
# Do not trace early boot code
CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
CFLAGS_REMOVE_prom_init.o = -pg -mno-sched-epilog
......
......@@ -1158,7 +1158,7 @@ machine_check_in_rtas:
#endif /* CONFIG_PPC_RTAS */
#ifdef CONFIG_FTRACE
#ifdef CONFIG_FUNCTION_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
_GLOBAL(mcount)
_GLOBAL(_mcount)
......
......@@ -884,7 +884,7 @@ _GLOBAL(enter_prom)
mtlr r0
blr
#ifdef CONFIG_FTRACE
#ifdef CONFIG_FUNCTION_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
_GLOBAL(mcount)
_GLOBAL(_mcount)
......
......@@ -28,17 +28,17 @@ static unsigned int ftrace_nop = 0x60000000;
#endif
static unsigned int notrace ftrace_calc_offset(long ip, long addr)
static unsigned int ftrace_calc_offset(long ip, long addr)
{
return (int)(addr - ip);
}
notrace unsigned char *ftrace_nop_replace(void)
unsigned char *ftrace_nop_replace(void)
{
return (char *)&ftrace_nop;
}
notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
{
static unsigned int op;
......@@ -68,7 +68,7 @@ notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
# define _ASM_PTR " .long "
#endif
notrace int
int
ftrace_modify_code(unsigned long ip, unsigned char *old_code,
unsigned char *new_code)
{
......@@ -113,7 +113,7 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
return faulted;
}
notrace int ftrace_update_ftrace_func(ftrace_func_t func)
int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned long ip = (unsigned long)(&ftrace_call);
unsigned char old[MCOUNT_INSN_SIZE], *new;
......@@ -126,23 +126,6 @@ notrace int ftrace_update_ftrace_func(ftrace_func_t func)
return ret;
}
notrace int ftrace_mcount_set(unsigned long *data)
{
unsigned long ip = (long)(&mcount_call);
unsigned long *addr = data;
unsigned char old[MCOUNT_INSN_SIZE], *new;
/*
* Replace the mcount stub with a pointer to the
* ip recorder function.
*/
memcpy(old, &mcount_call, MCOUNT_INSN_SIZE);
new = ftrace_call_replace(ip, *addr);
*addr = ftrace_modify_code(ip, old, new);
return 0;
}
int __init ftrace_dyn_arch_init(void *data)
{
/* This is running in kstop_machine */
......
......@@ -68,7 +68,7 @@ EXPORT_SYMBOL(single_step_exception);
EXPORT_SYMBOL(sys_sigreturn);
#endif
#ifdef CONFIG_FTRACE
#ifdef CONFIG_FUNCTION_TRACER
EXPORT_SYMBOL(_mcount);
#endif
......
CFLAGS_bootx_init.o += -fPIC
ifdef CONFIG_FTRACE
ifdef CONFIG_FUNCTION_TRACER
# Do not trace early boot code
CFLAGS_REMOVE_bootx_init.o = -pg -mno-sched-epilog
endif
......
......@@ -11,8 +11,7 @@ config SPARC
config SPARC64
bool
default y
select HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE
select HAVE_FUNCTION_TRACER
select HAVE_IDE
select HAVE_LMB
select HAVE_ARCH_KGDB
......
......@@ -33,7 +33,7 @@ config DEBUG_PAGEALLOC
config MCOUNT
bool
depends on STACK_DEBUG || FTRACE
depends on STACK_DEBUG || FUNCTION_TRACER
default y
config FRAME_POINTER
......
......@@ -5,6 +5,8 @@
EXTRA_AFLAGS := -ansi
EXTRA_CFLAGS := -Werror
CFLAGS_REMOVE_ftrace.o = -pg
extra-y := head.o init_task.o vmlinux.lds
obj-y := process.o setup.o cpu.o idprom.o reboot.o \
......
......@@ -9,12 +9,12 @@
static const u32 ftrace_nop = 0x01000000;
notrace unsigned char *ftrace_nop_replace(void)
unsigned char *ftrace_nop_replace(void)
{
return (char *)&ftrace_nop;
}
notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
{
static u32 call;
s32 off;
......@@ -25,7 +25,7 @@ notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
return (unsigned char *) &call;
}
notrace int
int
ftrace_modify_code(unsigned long ip, unsigned char *old_code,
unsigned char *new_code)
{
......@@ -59,7 +59,7 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
return faulted;
}
notrace int ftrace_update_ftrace_func(ftrace_func_t func)
int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned long ip = (unsigned long)(&ftrace_call);
unsigned char old[MCOUNT_INSN_SIZE], *new;
......@@ -69,24 +69,6 @@ notrace int ftrace_update_ftrace_func(ftrace_func_t func)
return ftrace_modify_code(ip, old, new);
}
notrace int ftrace_mcount_set(unsigned long *data)
{
unsigned long ip = (long)(&mcount_call);
unsigned long *addr = data;
unsigned char old[MCOUNT_INSN_SIZE], *new;
/*
* Replace the mcount stub with a pointer to the
* ip recorder function.
*/
memcpy(old, &mcount_call, MCOUNT_INSN_SIZE);
new = ftrace_call_replace(ip, *addr);
*addr = ftrace_modify_code(ip, old, new);
return 0;
}
int __init ftrace_dyn_arch_init(void *data)
{
ftrace_mcount_set(data);
......
......@@ -93,7 +93,7 @@ mcount:
nop
1:
#endif
#ifdef CONFIG_FTRACE
#ifdef CONFIG_FUNCTION_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
mov %o7, %o0
.globl mcount_call
......@@ -119,7 +119,7 @@ mcount_call:
.size _mcount,.-_mcount
.size mcount,.-mcount
#ifdef CONFIG_FTRACE
#ifdef CONFIG_FUNCTION_TRACER
.globl ftrace_stub
.type ftrace_stub,#function
ftrace_stub:
......
......@@ -28,7 +28,7 @@ config X86
select HAVE_KRETPROBES
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE
select HAVE_FUNCTION_TRACER
select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
select HAVE_ARCH_KGDB if !X86_VOYAGER
select HAVE_ARCH_TRACEHOOK
......
#ifndef _ASM_X86_FTRACE_H
#define _ASM_X86_FTRACE_H
#ifdef CONFIG_FTRACE
#ifdef CONFIG_FUNCTION_TRACER
#define MCOUNT_ADDR ((long)(mcount))
#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
......@@ -19,6 +19,6 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
}
#endif
#endif /* CONFIG_FTRACE */
#endif /* CONFIG_FUNCTION_TRACER */
#endif /* _ASM_X86_FTRACE_H */
......@@ -6,11 +6,12 @@ extra-y := head_$(BITS).o head$(BITS).o head.o init_task.o vmlinu
CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE)
ifdef CONFIG_FTRACE
ifdef CONFIG_FUNCTION_TRACER
# Do not profile debug and lowlevel utilities
CFLAGS_REMOVE_tsc.o = -pg
CFLAGS_REMOVE_rtc.o = -pg
CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
CFLAGS_REMOVE_ftrace.o = -pg
endif
#
......
......@@ -1149,7 +1149,7 @@ ENDPROC(xen_failsafe_callback)
#endif /* CONFIG_XEN */
#ifdef CONFIG_FTRACE
#ifdef CONFIG_FUNCTION_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(mcount)
......@@ -1204,7 +1204,7 @@ trace:
jmp ftrace_stub
END(mcount)
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FTRACE */
#endif /* CONFIG_FUNCTION_TRACER */
.section .rodata,"a"
#include "syscall_table_32.S"
......
......@@ -61,7 +61,7 @@
.code64
#ifdef CONFIG_FTRACE
#ifdef CONFIG_FUNCTION_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(mcount)
retq
......@@ -138,7 +138,7 @@ trace:
jmp ftrace_stub
END(mcount)
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FTRACE */
#endif /* CONFIG_FUNCTION_TRACER */
#ifndef CONFIG_PREEMPT
#define retint_kernel retint_restore_args
......
......@@ -21,8 +21,7 @@
#include <asm/nops.h>
/* Long is fine, even if it is only 4 bytes ;-) */
static unsigned long *ftrace_nop;
static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
union ftrace_code_union {
char code[MCOUNT_INSN_SIZE];
......@@ -33,17 +32,17 @@ union ftrace_code_union {
};
static int notrace ftrace_calc_offset(long ip, long addr)
static int ftrace_calc_offset(long ip, long addr)
{
return (int)(addr - ip);
}
notrace unsigned char *ftrace_nop_replace(void)
unsigned char *ftrace_nop_replace(void)
{
return (char *)ftrace_nop;
return ftrace_nop;
}
notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
{
static union ftrace_code_union calc;
......@@ -57,7 +56,7 @@ notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
return calc.code;
}
notrace int
int
ftrace_modify_code(unsigned long ip, unsigned char *old_code,
unsigned char *new_code)
{
......@@ -66,26 +65,31 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
/*
* Note: Due to modules and __init, code can
* disappear and change, we need to protect against faulting
* as well as code changing.
* as well as code changing. We do this by using the
* probe_kernel_* functions.
*
* No real locking needed, this code is run through
* kstop_machine, or before SMP starts.
*/
if (__copy_from_user_inatomic(replaced, (char __user *)ip, MCOUNT_INSN_SIZE))
return 1;
/* read the text we want to modify */
if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
return -EFAULT;
/* Make sure it is what we expect it to be */
if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
return 2;
return -EINVAL;
WARN_ON_ONCE(__copy_to_user_inatomic((char __user *)ip, new_code,
MCOUNT_INSN_SIZE));
/* replace the text with the new text */
if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
return -EPERM;
sync_core();
return 0;
}
notrace int ftrace_update_ftrace_func(ftrace_func_t func)
int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned long ip = (unsigned long)(&ftrace_call);
unsigned char old[MCOUNT_INSN_SIZE], *new;
......@@ -98,13 +102,6 @@ notrace int ftrace_update_ftrace_func(ftrace_func_t func)
return ret;
}
notrace int ftrace_mcount_set(unsigned long *data)
{
/* mcount is initialized as a nop */
*data = 0;
return 0;
}
int __init ftrace_dyn_arch_init(void *data)
{
extern const unsigned char ftrace_test_p6nop[];
......@@ -127,9 +124,6 @@ int __init ftrace_dyn_arch_init(void *data)
* TODO: check the cpuid to determine the best nop.
*/
asm volatile (
"jmp ftrace_test_jmp\n"
/* This code needs to stay around */
".section .text, \"ax\"\n"
"ftrace_test_jmp:"
"jmp ftrace_test_p6nop\n"
"nop\n"
......@@ -140,8 +134,6 @@ int __init ftrace_dyn_arch_init(void *data)
"jmp 1f\n"
"ftrace_test_nop5:"
".byte 0x66,0x66,0x66,0x66,0x90\n"
"jmp 1f\n"
".previous\n"
"1:"
".section .fixup, \"ax\"\n"
"2: movl $1, %0\n"
......@@ -156,15 +148,15 @@ int __init ftrace_dyn_arch_init(void *data)
switch (faulted) {
case 0:
pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
ftrace_nop = (unsigned long *)ftrace_test_p6nop;
memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
break;
case 1:
pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
ftrace_nop = (unsigned long *)ftrace_test_nop5;
memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
break;
case 2:
pr_info("ftrace: converting mcount calls to jmp . + 5\n");
ftrace_nop = (unsigned long *)ftrace_test_jmp;
memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
break;
}
......
......@@ -5,7 +5,7 @@
#include <asm/desc.h>
#include <asm/ftrace.h>
#ifdef CONFIG_FTRACE
#ifdef CONFIG_FUNCTION_TRACER
/* mcount is defined in assembly */
EXPORT_SYMBOL(mcount);
#endif
......
......@@ -12,7 +12,7 @@
#include <asm/desc.h>
#include <asm/ftrace.h>
#ifdef CONFIG_FTRACE
#ifdef CONFIG_FUNCTION_TRACER
/* mcount is defined in assembly */
EXPORT_SYMBOL(mcount);
#endif
......
ifdef CONFIG_FTRACE
ifdef CONFIG_FUNCTION_TRACER
# Do not profile debug and lowlevel utilities
CFLAGS_REMOVE_spinlock.o = -pg
CFLAGS_REMOVE_time.o = -pg
......
......@@ -3,6 +3,7 @@
enum die_val {
DIE_UNUSED,
DIE_OOPS=1
};
#endif /* _ASM_GENERIC_KDEBUG_H */
......@@ -8,7 +8,7 @@
#include <linux/types.h>
#include <linux/kallsyms.h>
#ifdef CONFIG_FTRACE
#ifdef CONFIG_FUNCTION_TRACER
extern int ftrace_enabled;
extern int
......@@ -36,16 +36,14 @@ void clear_ftrace_function(void);
extern void ftrace_stub(unsigned long a0, unsigned long a1);
#else /* !CONFIG_FTRACE */
#else /* !CONFIG_FUNCTION_TRACER */
# define register_ftrace_function(ops) do { } while (0)
# define unregister_ftrace_function(ops) do { } while (0)
# define clear_ftrace_function(ops) do { } while (0)
static inline void ftrace_kill_atomic(void) { }
#endif /* CONFIG_FTRACE */
static inline void ftrace_kill(void) { }
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_DYNAMIC_FTRACE
# define FTRACE_HASHBITS 10
# define FTRACE_HASHSIZE (1<<FTRACE_HASHBITS)
enum {
FTRACE_FL_FREE = (1 << 0),
......@@ -58,9 +56,9 @@ enum {
};
struct dyn_ftrace {
struct hlist_node node;
unsigned long ip; /* address of mcount call-site */
unsigned long flags;
struct list_head list;
unsigned long ip; /* address of mcount call-site */
unsigned long flags;
};
int ftrace_force_update(void);
......@@ -71,14 +69,33 @@ extern int ftrace_ip_converted(unsigned long ip);
extern unsigned char *ftrace_nop_replace(void);
extern unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr);
extern int ftrace_dyn_arch_init(void *data);
extern int ftrace_mcount_set(unsigned long *data);
extern int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
unsigned char *new_code);
extern int ftrace_update_ftrace_func(ftrace_func_t func);
extern void ftrace_caller(void);
extern void ftrace_call(void);
extern void mcount_call(void);
/**
* ftrace_modify_code - modify code segment
* @ip: the address of the code segment
* @old_code: the contents of what is expected to be there
* @new_code: the code to patch in
*
* This is a very sensitive operation and great care needs
* to be taken by the arch. The operation should carefully
* read the location, check to see if what is read is indeed
* what we expect it to be, and then on success of the compare,
* it should write to the location.
*
* Return must be:
* 0 on success
* -EFAULT on error reading the location
* -EINVAL on a failed compare of the contents
* -EPERM on error writing to the location
* Any other value will be considered a failure.
*/
extern int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
unsigned char *new_code);
extern int skip_trace(unsigned long ip);
extern void ftrace_release(void *start, unsigned long size);
......@@ -97,11 +114,10 @@ static inline void ftrace_release(void *start, unsigned long size) { }
/* totally disable ftrace - can not re-enable after this */
void ftrace_kill(void);
void ftrace_kill_atomic(void);
static inline void tracer_disable(void)
{
#ifdef CONFIG_FTRACE
#ifdef CONFIG_FUNCTION_TRACER
ftrace_enabled = 0;
#endif
}
......@@ -113,7 +129,7 @@ static inline void tracer_disable(void)
*/
static inline int __ftrace_enabled_save(void)
{
#ifdef CONFIG_FTRACE
#ifdef CONFIG_FUNCTION_TRACER
int saved_ftrace_enabled = ftrace_enabled;
ftrace_enabled = 0;
return saved_ftrace_enabled;
......@@ -124,7 +140,7 @@ static inline int __ftrace_enabled_save(void)
static inline void __ftrace_enabled_restore(int enabled)
{
#ifdef CONFIG_FTRACE
#ifdef CONFIG_FUNCTION_TRACER
ftrace_enabled = enabled;
#endif
}
......
......@@ -13,7 +13,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \
CFLAGS_REMOVE_sched.o = -mno-spe
ifdef CONFIG_FTRACE
ifdef CONFIG_FUNCTION_TRACER
# Do not trace debug files and internal ftrace files
CFLAGS_REMOVE_lockdep.o = -pg
CFLAGS_REMOVE_lockdep_proc.o = -pg
......@@ -88,7 +88,7 @@ obj-$(CONFIG_MARKERS) += marker.o
obj-$(CONFIG_TRACEPOINTS) += tracepoint.o
obj-$(CONFIG_LATENCYTOP) += latencytop.o
obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
obj-$(CONFIG_FTRACE) += trace/
obj-$(CONFIG_FUNCTION_TRACER) += trace/
obj-$(CONFIG_TRACING) += trace/
obj-$(CONFIG_SMP) += sched_cpupri.o
......
......@@ -474,7 +474,7 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = &proc_dointvec,
},
#ifdef CONFIG_FTRACE
#ifdef CONFIG_FUNCTION_TRACER
{
.ctl_name = CTL_UNNUMBERED,
.procname = "ftrace_enabled",
......
#
# Architectures that offer an FTRACE implementation should select HAVE_FTRACE:
# Architectures that offer an FUNCTION_TRACER implementation should
# select HAVE_FUNCTION_TRACER:
#
config NOP_TRACER
bool
config HAVE_FTRACE
config HAVE_FUNCTION_TRACER
bool
select NOP_TRACER
......@@ -28,9 +29,11 @@ config TRACING
select STACKTRACE
select TRACEPOINTS
config FTRACE
menu "Tracers"
config FUNCTION_TRACER
bool "Kernel Function Tracer"
depends on HAVE_FTRACE
depends on HAVE_FUNCTION_TRACER
depends on DEBUG_KERNEL
select FRAME_POINTER
select TRACING
......@@ -49,7 +52,6 @@ config IRQSOFF_TRACER
default n
depends on TRACE_IRQFLAGS_SUPPORT
depends on GENERIC_TIME
depends on HAVE_FTRACE
depends on DEBUG_KERNEL
select TRACE_IRQFLAGS
select TRACING
......@@ -73,7 +75,6 @@ config PREEMPT_TRACER
default n
depends on GENERIC_TIME
depends on PREEMPT
depends on HAVE_FTRACE
depends on DEBUG_KERNEL
select TRACING
select TRACER_MAX_TRACE
......@@ -101,7 +102,6 @@ config SYSPROF_TRACER
config SCHED_TRACER
bool "Scheduling Latency Tracer"
depends on HAVE_FTRACE
depends on DEBUG_KERNEL
select TRACING
select CONTEXT_SWITCH_TRACER
......@@ -112,7 +112,6 @@ config SCHED_TRACER
config CONTEXT_SWITCH_TRACER
bool "Trace process context switches"
depends on HAVE_FTRACE
depends on DEBUG_KERNEL
select TRACING
select MARKERS
......@@ -122,9 +121,9 @@ config CONTEXT_SWITCH_TRACER
config BOOT_TRACER
bool "Trace boot initcalls"
depends on HAVE_FTRACE
depends on DEBUG_KERNEL
select TRACING
select CONTEXT_SWITCH_TRACER
help
This tracer helps developers to optimize boot times: it records
the timings of the initcalls and traces key events and the identity
......@@ -141,9 +140,9 @@ config BOOT_TRACER
config STACK_TRACER
bool "Trace max stack"
depends on HAVE_FTRACE
depends on HAVE_FUNCTION_TRACER
depends on DEBUG_KERNEL
select FTRACE
select FUNCTION_TRACER
select STACKTRACE
help
This special tracer records the maximum stack footprint of the
......@@ -160,7 +159,7 @@ config STACK_TRACER
config DYNAMIC_FTRACE
bool "enable/disable ftrace tracepoints dynamically"
depends on FTRACE
depends on FUNCTION_TRACER
depends on HAVE_DYNAMIC_FTRACE
depends on DEBUG_KERNEL
default y
......@@ -170,7 +169,7 @@ config DYNAMIC_FTRACE
with a No-Op instruction) as they are called. A table is
created to dynamically enable them again.
This way a CONFIG_FTRACE kernel is slightly larger, but otherwise
This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but otherwise
has native performance as long as no tracing is active.
The changes to the code are done by a kernel thread that
......@@ -195,3 +194,5 @@ config FTRACE_STARTUP_TEST
a series of tests are made to verify that the tracer is
functioning properly. It will do tests on all the configured
tracers of ftrace.
endmenu
# Do not instrument the tracer itself:
ifdef CONFIG_FTRACE
ifdef CONFIG_FUNCTION_TRACER
ORIG_CFLAGS := $(KBUILD_CFLAGS)
KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS))
......@@ -10,13 +10,13 @@ CFLAGS_trace_selftest_dynamic.o = -pg
obj-y += trace_selftest_dynamic.o
endif
obj-$(CONFIG_FTRACE) += libftrace.o
obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o
obj-$(CONFIG_RING_BUFFER) += ring_buffer.o
obj-$(CONFIG_TRACING) += trace.o
obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o
obj-$(CONFIG_FTRACE) += trace_functions.o
obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
......
此差异已折叠。
......@@ -130,7 +130,7 @@ struct buffer_page {
static inline void free_buffer_page(struct buffer_page *bpage)
{
if (bpage->page)
__free_page(bpage->page);
free_page((unsigned long)bpage->page);
kfree(bpage);
}
......@@ -966,7 +966,9 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
if (unlikely(*delta > (1ULL << 59) && !once++)) {
printk(KERN_WARNING "Delta way too big! %llu"
" ts=%llu write stamp = %llu\n",
*delta, *ts, cpu_buffer->write_stamp);
(unsigned long long)*delta,
(unsigned long long)*ts,
(unsigned long long)cpu_buffer->write_stamp);
WARN_ON(1);
}
......
......@@ -34,6 +34,7 @@
#include <linux/stacktrace.h>
#include <linux/ring_buffer.h>
#include <linux/irqflags.h>
#include "trace.h"
......@@ -851,7 +852,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
preempt_enable_notrace();
}
#ifdef CONFIG_FTRACE
#ifdef CONFIG_FUNCTION_TRACER
static void
function_trace_call(unsigned long ip, unsigned long parent_ip)
{
......@@ -865,9 +866,6 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
if (unlikely(!ftrace_function_enabled))
return;
if (skip_trace(ip))
return;
pc = preempt_count();
resched = need_resched();
preempt_disable_notrace();
......@@ -2379,9 +2377,10 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
int i;
size_t ret;
ret = cnt;
if (cnt > max_tracer_type_len)
cnt = max_tracer_type_len;
ret = cnt;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
......@@ -2414,8 +2413,8 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
out:
mutex_unlock(&trace_types_lock);
if (ret == cnt)
filp->f_pos += cnt;
if (ret > 0)
filp->f_pos += ret;
return ret;
}
......@@ -3097,7 +3096,7 @@ void ftrace_dump(void)
dump_ran = 1;
/* No turning back! */
ftrace_kill_atomic();
ftrace_kill();
for_each_tracing_cpu(cpu) {
atomic_inc(&global_trace.data[cpu]->disabled);
......
......@@ -335,7 +335,7 @@ void update_max_tr_single(struct trace_array *tr,
extern cycle_t ftrace_now(int cpu);
#ifdef CONFIG_FTRACE
#ifdef CONFIG_FUNCTION_TRACER
void tracing_start_function_trace(void);
void tracing_stop_function_trace(void);
#else
......
......@@ -64,7 +64,7 @@ static void function_trace_ctrl_update(struct trace_array *tr)
static struct tracer function_trace __read_mostly =
{
.name = "ftrace",
.name = "function",
.init = function_trace_init,
.reset = function_trace_reset,
.ctrl_update = function_trace_ctrl_update,
......
......@@ -63,7 +63,7 @@ irq_trace(void)
*/
static __cacheline_aligned_in_smp unsigned long max_sequence;
#ifdef CONFIG_FTRACE
#ifdef CONFIG_FUNCTION_TRACER
/*
* irqsoff uses its own tracer function to keep the overhead down:
*/
......@@ -104,7 +104,7 @@ static struct ftrace_ops trace_ops __read_mostly =
{
.func = irqsoff_tracer_call,
};
#endif /* CONFIG_FTRACE */
#endif /* CONFIG_FUNCTION_TRACER */
/*
* Should this new latency be reported/recorded?
......
......@@ -31,7 +31,7 @@ static raw_spinlock_t wakeup_lock =
static void __wakeup_reset(struct trace_array *tr);
#ifdef CONFIG_FTRACE
#ifdef CONFIG_FUNCTION_TRACER
/*
* irqsoff uses its own tracer function to keep the overhead down:
*/
......@@ -96,7 +96,7 @@ static struct ftrace_ops trace_ops __read_mostly =
{
.func = wakeup_tracer_call,
};
#endif /* CONFIG_FTRACE */
#endif /* CONFIG_FUNCTION_TRACER */
/*
* Should this new latency be reported/recorded?
......
......@@ -70,7 +70,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
return ret;
}
#ifdef CONFIG_FTRACE
#ifdef CONFIG_FUNCTION_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
......@@ -99,13 +99,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
/* passed in by parameter to fool gcc from optimizing */
func();
/* update the records */
ret = ftrace_force_update();
if (ret) {
printk(KERN_CONT ".. ftraced failed .. ");
return ret;
}
/*
* Some archs *cough*PowerPC*cough* add charachters to the
* start of the function names. We simply put a '*' to
......@@ -183,13 +176,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
/* make sure msleep has been recorded */
msleep(1);
/* force the recorded functions to be traced */
ret = ftrace_force_update();
if (ret) {
printk(KERN_CONT ".. ftraced failed .. ");
return ret;
}
/* start the tracing */
ftrace_enabled = 1;
tracer_enabled = 1;
......@@ -226,7 +212,7 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
return ret;
}
#endif /* CONFIG_FTRACE */
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_IRQSOFF_TRACER
int
......
......@@ -44,6 +44,10 @@ static inline void check_stack(void)
if (this_size <= max_stack_size)
return;
/* we do not handle interrupt stacks yet */
if (!object_is_on_stack(&this_size))
return;
raw_local_irq_save(flags);
__raw_spin_lock(&max_stack_lock);
......
......@@ -131,6 +131,9 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe)
old = entry->funcs;
if (!old)
return NULL;
debug_print_probes(entry);
/* (N -> M), (N > 1, M >= 0) probes */
for (nr_probes = 0; old[nr_probes]; nr_probes++) {
......@@ -388,6 +391,11 @@ int tracepoint_probe_unregister(const char *name, void *probe)
if (entry->rcu_pending)
rcu_barrier_sched();
old = tracepoint_entry_remove_probe(entry, probe);
if (!old) {
printk(KERN_WARNING "Warning: Trying to unregister a probe"
"that doesn't exist\n");
goto end;
}
mutex_unlock(&tracepoints_mutex);
tracepoint_update_probes(); /* may update entry */
mutex_lock(&tracepoints_mutex);
......
......@@ -2,7 +2,7 @@
# Makefile for some libs needed in the kernel.
#
ifdef CONFIG_FTRACE
ifdef CONFIG_FUNCTION_TRACER
ORIG_CFLAGS := $(KBUILD_CFLAGS)
KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS))
endif
......
......@@ -198,10 +198,16 @@ cmd_modversions = \
fi;
endif
ifdef CONFIG_64BIT
arch_bits = 64
else
arch_bits = 32
endif
ifdef CONFIG_FTRACE_MCOUNT_RECORD
cmd_record_mcount = perl $(srctree)/scripts/recordmcount.pl \
"$(ARCH)" "$(OBJDUMP)" "$(OBJCOPY)" "$(CC)" "$(LD)" "$(NM)" "$(RM)" \
"$(MV)" "$(@)";
"$(ARCH)" "$(arch_bits)" "$(OBJDUMP)" "$(OBJCOPY)" "$(CC)" "$(LD)" \
"$(NM)" "$(RM)" "$(MV)" "$(@)";
endif
define rule_cc_o_c
......
......@@ -37,7 +37,10 @@
# dmesg | perl scripts/bootgraph.pl > output.svg
#
my %start, %end;
use strict;
my %start;
my %end;
my $done = 0;
my $maxtime = 0;
my $firsttime = 100;
......@@ -105,18 +108,20 @@ my $threshold = ($maxtime - $firsttime) / 60.0;
my $stylecounter = 0;
my %rows;
my $rowscount = 1;
while (($key,$value) = each %start) {
my @initcalls = sort { $start{$a} <=> $start{$b} } keys(%start);
my $key;
foreach $key (@initcalls) {
my $duration = $end{$key} - $start{$key};
if ($duration >= $threshold) {
my $s, $s2, $e, $y;
$pid = $pids{$key};
my ($s, $s2, $e, $w, $y, $y2, $style);
my $pid = $pids{$key};
if (!defined($rows{$pid})) {
$rows{$pid} = $rowscount;
$rowscount = $rowscount + 1;
}
$s = ($value - $firsttime) * $mult;
$s = ($start{$key} - $firsttime) * $mult;
$s2 = $s + 6;
$e = ($end{$key} - $firsttime) * $mult;
$w = $e - $s;
......@@ -140,9 +145,9 @@ while (($key,$value) = each %start) {
my $time = $firsttime;
my $step = ($maxtime - $firsttime) / 15;
while ($time < $maxtime) {
my $s2 = ($time - $firsttime) * $mult;
my $s3 = ($time - $firsttime) * $mult;
my $tm = int($time * 100) / 100.0;
print "<text transform=\"translate($s2,89) rotate(90)\">$tm</text>\n";
print "<text transform=\"translate($s3,89) rotate(90)\">$tm</text>\n";
$time = $time + $step;
}
......
......@@ -106,7 +106,13 @@ if ($#ARGV < 6) {
exit(1);
}
my ($arch, $objdump, $objcopy, $cc, $ld, $nm, $rm, $mv, $inputfile) = @ARGV;
my ($arch, $bits, $objdump, $objcopy, $cc,
$ld, $nm, $rm, $mv, $inputfile) = @ARGV;
# Acceptable sections to record.
my %text_sections = (
".text" => 1,
);
$objdump = "objdump" if ((length $objdump) == 0);
$objcopy = "objcopy" if ((length $objcopy) == 0);
......@@ -129,8 +135,16 @@ my $function_regex; # Find the name of a function
# (return offset and func name)
my $mcount_regex; # Find the call site to mcount (return offset)
if ($arch eq "x86") {
if ($bits == 64) {
$arch = "x86_64";
} else {
$arch = "i386";
}
}
if ($arch eq "x86_64") {
$section_regex = "Disassembly of section";
$section_regex = "Disassembly of section\\s+(\\S+):";
$function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:";
$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount([+-]0x[0-9a-zA-Z]+)?\$";
$type = ".quad";
......@@ -142,7 +156,7 @@ if ($arch eq "x86_64") {
$cc .= " -m64";
} elsif ($arch eq "i386") {
$section_regex = "Disassembly of section";
$section_regex = "Disassembly of section\\s+(\\S+):";
$function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:";
$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount\$";
$type = ".long";
......@@ -289,7 +303,13 @@ my $text;
while (<IN>) {
# is it a section?
if (/$section_regex/) {
$read_function = 1;
# Only record text sections that we know are safe
if (defined($text_sections{$1})) {
$read_function = 1;
} else {
$read_function = 0;
}
# print out any recorded offsets
update_funcs() if ($text_found);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册