提交 7a13aa28 编写于 作者: N Namhyung Kim 提交者: Jiri Olsa

perf hists: Accumulate hist entry stat based on the callchain

Call __hists__add_entry() for each callchain node to get an
accumulated stat for an entry.  Introduce new cumulative_iter ops to
process them properly.
Signed-off-by: NNamhyung Kim <namhyung@kernel.org>
Tested-by: NArun Sharma <asharma@fb.com>
Tested-by: NRodrigo Campos <rodrigo@sdfg.com.ar>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Link: http://lkml.kernel.org/r/1401335910-16832-6-git-send-email-namhyung@kernel.orgSigned-off-by: NJiri Olsa <jolsa@kernel.org>
上级 a0b51af3
...@@ -115,6 +115,8 @@ static int process_sample_event(struct perf_tool *tool, ...@@ -115,6 +115,8 @@ static int process_sample_event(struct perf_tool *tool,
iter.ops = &hist_iter_branch; iter.ops = &hist_iter_branch;
else if (rep->mem_mode) else if (rep->mem_mode)
iter.ops = &hist_iter_mem; iter.ops = &hist_iter_mem;
else if (symbol_conf.cumulate_callchain)
iter.ops = &hist_iter_cumulative;
else else
iter.ops = &hist_iter_normal; iter.ops = &hist_iter_normal;
......
...@@ -616,7 +616,8 @@ int sample__resolve_callchain(struct perf_sample *sample, struct symbol **parent ...@@ -616,7 +616,8 @@ int sample__resolve_callchain(struct perf_sample *sample, struct symbol **parent
if (sample->callchain == NULL) if (sample->callchain == NULL)
return 0; return 0;
if (symbol_conf.use_callchain || sort__has_parent) { if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain ||
sort__has_parent) {
return machine__resolve_callchain(al->machine, evsel, al->thread, return machine__resolve_callchain(al->machine, evsel, al->thread,
sample, parent, al, max_stack); sample, parent, al, max_stack);
} }
......
...@@ -696,6 +696,94 @@ iter_finish_normal_entry(struct hist_entry_iter *iter, struct addr_location *al) ...@@ -696,6 +696,94 @@ iter_finish_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
return hist_entry__append_callchain(he, sample); return hist_entry__append_callchain(he, sample);
} }
static int
iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused,
struct addr_location *al __maybe_unused)
{
callchain_cursor_commit(&callchain_cursor);
return 0;
}
static int
iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
struct addr_location *al)
{
struct perf_evsel *evsel = iter->evsel;
struct perf_sample *sample = iter->sample;
struct hist_entry *he;
int err = 0;
he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL,
sample->period, sample->weight,
sample->transaction, true);
if (he == NULL)
return -ENOMEM;
iter->he = he;
/*
* The iter->he will be over-written after ->add_next_entry()
* called so inc stats for the original entry now.
*/
if (ui__has_annotation())
err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
hists__inc_nr_samples(&evsel->hists, he->filtered);
return err;
}
static int
iter_next_cumulative_entry(struct hist_entry_iter *iter,
struct addr_location *al)
{
struct callchain_cursor_node *node;
node = callchain_cursor_current(&callchain_cursor);
if (node == NULL)
return 0;
al->map = node->map;
al->sym = node->sym;
if (node->map)
al->addr = node->map->map_ip(node->map, node->ip);
else
al->addr = node->ip;
if (iter->hide_unresolved && al->sym == NULL)
return 0;
callchain_cursor_advance(&callchain_cursor);
return 1;
}
static int
iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
struct addr_location *al)
{
struct perf_evsel *evsel = iter->evsel;
struct perf_sample *sample = iter->sample;
struct hist_entry *he;
he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL,
sample->period, sample->weight,
sample->transaction, false);
if (he == NULL)
return -ENOMEM;
iter->he = he;
return 0;
}
static int
iter_finish_cumulative_entry(struct hist_entry_iter *iter,
struct addr_location *al __maybe_unused)
{
iter->he = NULL;
return 0;
}
const struct hist_iter_ops hist_iter_mem = { const struct hist_iter_ops hist_iter_mem = {
.prepare_entry = iter_prepare_mem_entry, .prepare_entry = iter_prepare_mem_entry,
.add_single_entry = iter_add_single_mem_entry, .add_single_entry = iter_add_single_mem_entry,
...@@ -720,6 +808,14 @@ const struct hist_iter_ops hist_iter_normal = { ...@@ -720,6 +808,14 @@ const struct hist_iter_ops hist_iter_normal = {
.finish_entry = iter_finish_normal_entry, .finish_entry = iter_finish_normal_entry,
}; };
const struct hist_iter_ops hist_iter_cumulative = {
.prepare_entry = iter_prepare_cumulative_entry,
.add_single_entry = iter_add_single_cumulative_entry,
.next_entry = iter_next_cumulative_entry,
.add_next_entry = iter_add_next_cumulative_entry,
.finish_entry = iter_finish_cumulative_entry,
};
int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
struct perf_evsel *evsel, struct perf_sample *sample, struct perf_evsel *evsel, struct perf_sample *sample,
int max_stack_depth) int max_stack_depth)
......
...@@ -124,6 +124,7 @@ struct hist_entry_iter { ...@@ -124,6 +124,7 @@ struct hist_entry_iter {
extern const struct hist_iter_ops hist_iter_normal; extern const struct hist_iter_ops hist_iter_normal;
extern const struct hist_iter_ops hist_iter_branch; extern const struct hist_iter_ops hist_iter_branch;
extern const struct hist_iter_ops hist_iter_mem; extern const struct hist_iter_ops hist_iter_mem;
extern const struct hist_iter_ops hist_iter_cumulative;
struct hist_entry *__hists__add_entry(struct hists *hists, struct hist_entry *__hists__add_entry(struct hists *hists,
struct addr_location *al, struct addr_location *al,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册