提交 52f232cb 编写于 作者: S Steven Rostedt 提交者: Ingo Molnar

tracing: likely/unlikely branch annotation tracer

Impact: new likely/unlikely branch tracer

This patch adds a way to record the instances of the likely() and unlikely()
branch condition annotations.

When "unlikely" is set in /debugfs/tracing/iter_ctrl the unlikely conditions
will be added to any of the ftrace tracers. The change takes effect when
a new tracer is passed into the current_tracer file.

For example:

 bash-3471  [003]   357.014755: [INCORRECT] sched_info_dequeued:sched_stats.h:177
 bash-3471  [003]   357.014756: [correct] update_curr:sched_fair.c:489
 bash-3471  [003]   357.014758: [correct] calc_delta_fair:sched_fair.c:411
 bash-3471  [003]   357.014759: [correct] account_group_exec_runtime:sched_stats.h:356
 bash-3471  [003]   357.014761: [correct] update_curr:sched_fair.c:489
 bash-3471  [003]   357.014763: [INCORRECT] calc_delta_fair:sched_fair.c:411
 bash-3471  [003]   357.014765: [correct] calc_delta_mine:sched.c:1279

Which shows the normal tracer heading, as well as whether the condition was
correct "[correct]" or was mistaken "[INCORRECT]", followed by the function,
file name and line number.
Signed-off-by: NSteven Rostedt <srostedt@redhat.com>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 1f0d69a9
......@@ -175,6 +175,28 @@ config TRACE_UNLIKELY_PROFILE
Say N if unsure.
config TRACING_UNLIKELY
bool
help
Selected by tracers that will trace the likely and unlikely
conditions. This prevents the tracers themselves from being
profiled. Profiling the tracing infrastructure can only happen
when the likelys and unlikelys are not being traced.
config UNLIKELY_TRACER
bool "Trace likely/unlikely instances"
depends on TRACE_UNLIKELY_PROFILE
select TRACING_UNLIKELY
help
This traces the events of likely and unlikely condition
calls in the kernel. The difference between this and the
"Trace likely/unlikely profiler" is that this is not a
histogram of the callers, but actually places the calling
events into a running trace buffer to see when and where the
events happened, as well as their results.
Say N if unsure.
config STACK_TRACER
bool "Trace max stack"
depends on HAVE_FUNCTION_TRACER
......
......@@ -10,6 +10,12 @@ CFLAGS_trace_selftest_dynamic.o = -pg
obj-y += trace_selftest_dynamic.o
endif
# If unlikely tracing is enabled, do not trace these files
ifdef CONFIG_TRACING_UNLIKELY
KBUILD_CFLAGS += '-Dlikely(x)=likely_notrace(x)'
KBUILD_CFLAGS += '-Dunlikely(x)=unlikely_notrace(x)'
endif
obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o
obj-$(CONFIG_RING_BUFFER) += ring_buffer.o
......
......@@ -258,6 +258,9 @@ static const char *trace_options[] = {
"sched-tree",
"ftrace_printk",
"ftrace_preempt",
#ifdef CONFIG_UNLIKELY_TRACER
"unlikely",
#endif
NULL
};
......@@ -1648,6 +1651,18 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
trace_seq_print_cont(s, iter);
break;
}
case TRACE_UNLIKELY: {
struct trace_unlikely *field;
trace_assign_type(field, entry);
trace_seq_printf(s, "[%s] %s:%s:%d\n",
field->correct ? "correct" : "INCORRECT",
field->func,
field->file,
field->line);
break;
}
default:
trace_seq_printf(s, "Unknown type %d\n", entry->type);
}
......@@ -1787,6 +1802,18 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
return print_return_function(iter);
break;
}
case TRACE_UNLIKELY: {
struct trace_unlikely *field;
trace_assign_type(field, entry);
trace_seq_printf(s, "[%s] %s:%s:%d\n",
field->correct ? "correct" : "INCORRECT",
field->func,
field->file,
field->line);
break;
}
}
return TRACE_TYPE_HANDLED;
}
......@@ -2592,6 +2619,7 @@ static int tracing_set_tracer(char *buf)
if (t == current_trace)
goto out;
trace_unlikely_disable();
if (current_trace && current_trace->reset)
current_trace->reset(tr);
......@@ -2599,6 +2627,7 @@ static int tracing_set_tracer(char *buf)
if (t->init)
t->init(tr);
trace_unlikely_enable(tr);
out:
mutex_unlock(&trace_types_lock);
......
......@@ -22,6 +22,7 @@ enum trace_type {
TRACE_SPECIAL,
TRACE_MMIO_RW,
TRACE_MMIO_MAP,
TRACE_UNLIKELY,
TRACE_BOOT_CALL,
TRACE_BOOT_RET,
TRACE_FN_RET,
......@@ -134,6 +135,16 @@ struct trace_boot_ret {
struct boot_trace_ret boot_ret;
};
#define TRACE_FUNC_SIZE 30
#define TRACE_FILE_SIZE 20
struct trace_unlikely {
struct trace_entry ent;
unsigned line;
char func[TRACE_FUNC_SIZE+1];
char file[TRACE_FILE_SIZE+1];
char correct;
};
/*
* trace_flag_type is an enumeration that holds different
* states when a trace occurs. These are:
......@@ -236,6 +247,7 @@ extern void __ftrace_bad_type(void);
TRACE_MMIO_MAP); \
IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
IF_ASSIGN(var, ent, struct trace_unlikely, TRACE_UNLIKELY); \
IF_ASSIGN(var, ent, struct ftrace_ret_entry, TRACE_FN_RET);\
__ftrace_bad_type(); \
} while (0)
......@@ -456,6 +468,9 @@ enum trace_iterator_flags {
TRACE_ITER_SCHED_TREE = 0x200,
TRACE_ITER_PRINTK = 0x400,
TRACE_ITER_PREEMPTONLY = 0x800,
#ifdef CONFIG_UNLIKELY_TRACER
TRACE_ITER_UNLIKELY = 0x1000,
#endif
};
/*
......@@ -515,4 +530,28 @@ static inline void ftrace_preempt_enable(int resched)
preempt_enable_notrace();
}
#ifdef CONFIG_UNLIKELY_TRACER
extern int enable_unlikely_tracing(struct trace_array *tr);
extern void disable_unlikely_tracing(void);
static inline int trace_unlikely_enable(struct trace_array *tr)
{
if (trace_flags & TRACE_ITER_UNLIKELY)
return enable_unlikely_tracing(tr);
return 0;
}
static inline void trace_unlikely_disable(void)
{
/* due to races, always disable */
disable_unlikely_tracing();
}
#else
static inline int trace_unlikely_enable(struct trace_array *tr)
{
return 0;
}
static inline void trace_unlikely_disable(void)
{
}
#endif /* CONFIG_UNLIKELY_TRACER */
#endif /* _LINUX_KERNEL_TRACE_H */
......@@ -15,8 +15,122 @@
#include <asm/local.h>
#include "trace.h"
#ifdef CONFIG_UNLIKELY_TRACER
static int unlikely_tracing_enabled __read_mostly;
static DEFINE_MUTEX(unlikely_tracing_mutex);
static struct trace_array *unlikely_tracer;
static void
probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
{
struct trace_array *tr = unlikely_tracer;
struct ring_buffer_event *event;
struct trace_unlikely *entry;
unsigned long flags, irq_flags;
int cpu, pc;
const char *p;
/*
* I would love to save just the ftrace_likely_data pointer, but
* this code can also be used by modules. Ugly things can happen
* if the module is unloaded, and then we go and read the
* pointer. This is slower, but much safer.
*/
if (unlikely(!tr))
return;
local_irq_save(flags);
cpu = raw_smp_processor_id();
if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
goto out;
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
&irq_flags);
if (!event)
goto out;
pc = preempt_count();
entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, flags, pc);
entry->ent.type = TRACE_UNLIKELY;
/* Strip off the path, only save the file */
p = f->file + strlen(f->file);
while (p >= f->file && *p != '/')
p--;
p++;
strncpy(entry->func, f->func, TRACE_FUNC_SIZE);
strncpy(entry->file, p, TRACE_FILE_SIZE);
entry->func[TRACE_FUNC_SIZE] = 0;
entry->file[TRACE_FILE_SIZE] = 0;
entry->line = f->line;
entry->correct = val == expect;
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
out:
atomic_dec(&tr->data[cpu]->disabled);
local_irq_restore(flags);
}
static inline
void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect)
{
if (!unlikely_tracing_enabled)
return;
probe_likely_condition(f, val, expect);
}
int enable_unlikely_tracing(struct trace_array *tr)
{
int ret = 0;
mutex_lock(&unlikely_tracing_mutex);
unlikely_tracer = tr;
/*
* Must be seen before enabling. The reader is a condition
* where we do not need a matching rmb()
*/
smp_wmb();
unlikely_tracing_enabled++;
mutex_unlock(&unlikely_tracing_mutex);
return ret;
}
void disable_unlikely_tracing(void)
{
mutex_lock(&unlikely_tracing_mutex);
if (!unlikely_tracing_enabled)
goto out_unlock;
unlikely_tracing_enabled--;
out_unlock:
mutex_unlock(&unlikely_tracing_mutex);
}
#else
static inline
void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect)
{
}
#endif /* CONFIG_UNLIKELY_TRACER */
void ftrace_likely_update(struct ftrace_likely_data *f, int val, int expect)
{
/*
* I would love to have a trace point here instead, but the
* trace point code is so inundated with unlikely and likely
* conditions that the recursive nightmare that exists is too
* much to try to get working. At least for now.
*/
trace_likely_condition(f, val, expect);
/* FIXME: Make this atomic! */
if (val == expect)
f->correct++;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册