提交 2933d781 编写于 作者: I Ingo Molnar

Merge tag 'perf-core-for-mingo' of...

Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/jolsa/perf into perf/core

Pull perf/core improvements and fixes from Jiri Olsa:

  * Factor hists statistics counts processing which in turn also
    fixes several bugs in TUI report command (Namhyung Kim)
Signed-off-by: NJiri Olsa <jolsa@kernel.org>
Signed-off-by: NIngo Molnar <mingo@kernel.org>
......@@ -46,7 +46,7 @@ struct perf_annotate {
};
static int perf_evsel__add_sample(struct perf_evsel *evsel,
struct perf_sample *sample,
struct perf_sample *sample __maybe_unused,
struct addr_location *al,
struct perf_annotate *ann)
{
......@@ -70,7 +70,6 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel,
return -ENOMEM;
ret = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
evsel->hists.stats.total_period += sample->period;
hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
return ret;
}
......
......@@ -341,11 +341,16 @@ static int diff__process_sample_event(struct perf_tool *tool __maybe_unused,
return -1;
}
if (al.filtered == 0) {
evsel->hists.stats.total_non_filtered_period += sample->period;
evsel->hists.nr_non_filtered_entries++;
}
/*
* The total_period is updated here before going to the output
* tree since normally only the baseline hists will call
* hists__output_resort() and precompute needs the total
* period in order to sort entries by percentage delta.
*/
evsel->hists.stats.total_period += sample->period;
if (!al.filtered)
evsel->hists.stats.total_non_filtered_period += sample->period;
return 0;
}
......@@ -573,10 +578,7 @@ static void hists__compute_resort(struct hists *hists)
hists->entries = RB_ROOT;
next = rb_first(root);
hists->nr_entries = 0;
hists->nr_non_filtered_entries = 0;
hists->stats.total_period = 0;
hists->stats.total_non_filtered_period = 0;
hists__reset_stats(hists);
hists__reset_col_len(hists);
while (next != NULL) {
......@@ -586,7 +588,10 @@ static void hists__compute_resort(struct hists *hists)
next = rb_next(&he->rb_node_in);
insert_hist_entry_by_compute(&hists->entries, he, compute);
hists__inc_nr_entries(hists, he);
hists__inc_stats(hists, he);
if (!he->filtered)
hists__calc_col_len(hists, he);
}
}
......
......@@ -57,6 +57,7 @@ struct report {
const char *cpu_list;
const char *symbol_filter_str;
float min_percent;
u64 nr_entries;
DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
};
......@@ -75,6 +76,27 @@ static int report__config(const char *var, const char *value, void *cb)
return perf_default_config(var, value, cb);
}
static void report__inc_stats(struct report *rep, struct hist_entry *he)
{
/*
* The @he is either of a newly created one or an existing one
* merging current sample. We only want to count a new one so
* checking ->nr_events being 1.
*/
if (he->stat.nr_events == 1)
rep->nr_entries++;
/*
* Only counts number of samples at this stage as it's more
* natural to do it here and non-sample events are also
* counted in perf_session_deliver_event(). The dump_trace
* requires this info is ready before going to the output tree.
*/
hists__inc_nr_events(he->hists, PERF_RECORD_SAMPLE);
if (!he->filtered)
he->hists->stats.nr_non_filtered_samples++;
}
static int report__add_mem_hist_entry(struct report *rep, struct addr_location *al,
struct perf_sample *sample, struct perf_evsel *evsel)
{
......@@ -121,10 +143,8 @@ static int report__add_mem_hist_entry(struct report *rep, struct addr_location *
goto out;
}
evsel->hists.stats.total_period += cost;
hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
if (!he->filtered)
evsel->hists.stats.nr_non_filtered_samples++;
report__inc_stats(rep, he);
err = hist_entry__append_callchain(he, sample);
out:
return err;
......@@ -175,11 +195,7 @@ static int report__add_branch_hist_entry(struct report *rep, struct addr_locatio
if (err)
goto out;
}
evsel->hists.stats.total_period += 1;
hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
if (!he->filtered)
evsel->hists.stats.nr_non_filtered_samples++;
report__inc_stats(rep, he);
} else
goto out;
}
......@@ -212,10 +228,8 @@ static int report__add_hist_entry(struct report *rep, struct perf_evsel *evsel,
if (ui__has_annotation())
err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
evsel->hists.stats.total_period += sample->period;
if (!he->filtered)
evsel->hists.stats.nr_non_filtered_samples++;
hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
report__inc_stats(rep, he);
out:
return err;
}
......@@ -486,24 +500,12 @@ static int report__browse_hists(struct report *rep)
return ret;
}
static u64 report__collapse_hists(struct report *rep)
static void report__collapse_hists(struct report *rep)
{
struct ui_progress prog;
struct perf_evsel *pos;
u64 nr_samples = 0;
/*
* Count number of histogram entries to use when showing progress,
* reusing nr_samples variable.
*/
evlist__for_each(rep->session->evlist, pos)
nr_samples += pos->hists.nr_entries;
ui_progress__init(&prog, nr_samples, "Merging related events...");
/*
* Count total number of samples, will be used to check if this
* session had any.
*/
nr_samples = 0;
ui_progress__init(&prog, rep->nr_entries, "Merging related events...");
evlist__for_each(rep->session->evlist, pos) {
struct hists *hists = &pos->hists;
......@@ -512,7 +514,6 @@ static u64 report__collapse_hists(struct report *rep)
hists->symbol_filter_str = rep->symbol_filter_str;
hists__collapse_resort(hists, &prog);
nr_samples += hists->stats.nr_events[PERF_RECORD_SAMPLE];
/* Non-group events are considered as leader */
if (symbol_conf.event_group &&
......@@ -525,14 +526,11 @@ static u64 report__collapse_hists(struct report *rep)
}
ui_progress__finish();
return nr_samples;
}
static int __cmd_report(struct report *rep)
{
int ret;
u64 nr_samples;
struct perf_session *session = rep->session;
struct perf_evsel *pos;
struct perf_data_file *file = session->file;
......@@ -572,12 +570,12 @@ static int __cmd_report(struct report *rep)
}
}
nr_samples = report__collapse_hists(rep);
report__collapse_hists(rep);
if (session_done())
return 0;
if (nr_samples == 0) {
if (rep->nr_entries == 0) {
ui__error("The %s file has no samples!\n", file->path);
return 0;
}
......
......@@ -26,13 +26,36 @@ struct hist_browser {
int print_seq;
bool show_dso;
float min_pcnt;
u64 nr_pcnt_entries;
u64 nr_non_filtered_entries;
u64 nr_callchain_rows;
};
extern void hist_browser__init_hpp(void);
static int hists__browser_title(struct hists *hists, char *bf, size_t size,
const char *ev_name);
static void hist_browser__update_nr_entries(struct hist_browser *hb);
static struct rb_node *hists__filter_entries(struct rb_node *nd,
struct hists *hists,
float min_pcnt);
static bool hist_browser__has_filter(struct hist_browser *hb)
{
return hists__has_filter(hb->hists) || hb->min_pcnt;
}
static u32 hist_browser__nr_entries(struct hist_browser *hb)
{
u32 nr_entries;
if (hist_browser__has_filter(hb))
nr_entries = hb->nr_non_filtered_entries;
else
nr_entries = hb->hists->nr_entries;
return nr_entries + hb->nr_callchain_rows;
}
static void hist_browser__refresh_dimensions(struct hist_browser *browser)
{
......@@ -43,7 +66,14 @@ static void hist_browser__refresh_dimensions(struct hist_browser *browser)
static void hist_browser__reset(struct hist_browser *browser)
{
browser->b.nr_entries = browser->hists->nr_entries;
/*
* The hists__remove_entry_filter() already folds non-filtered
* entries so we can assume it has 0 callchain rows.
*/
browser->nr_callchain_rows = 0;
hist_browser__update_nr_entries(browser);
browser->b.nr_entries = hist_browser__nr_entries(browser);
hist_browser__refresh_dimensions(browser);
ui_browser__reset_index(&browser->b);
}
......@@ -198,14 +228,16 @@ static bool hist_browser__toggle_fold(struct hist_browser *browser)
struct hist_entry *he = browser->he_selection;
hist_entry__init_have_children(he);
browser->hists->nr_entries -= he->nr_rows;
browser->b.nr_entries -= he->nr_rows;
browser->nr_callchain_rows -= he->nr_rows;
if (he->ms.unfolded)
he->nr_rows = callchain__count_rows(&he->sorted_chain);
else
he->nr_rows = 0;
browser->hists->nr_entries += he->nr_rows;
browser->b.nr_entries = browser->hists->nr_entries;
browser->b.nr_entries += he->nr_rows;
browser->nr_callchain_rows += he->nr_rows;
return true;
}
......@@ -280,23 +312,27 @@ static void hist_entry__set_folding(struct hist_entry *he, bool unfold)
he->nr_rows = 0;
}
static void hists__set_folding(struct hists *hists, bool unfold)
static void
__hist_browser__set_folding(struct hist_browser *browser, bool unfold)
{
struct rb_node *nd;
struct hists *hists = browser->hists;
hists->nr_entries = 0;
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
for (nd = rb_first(&hists->entries);
(nd = hists__filter_entries(nd, hists, browser->min_pcnt)) != NULL;
nd = rb_next(nd)) {
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
hist_entry__set_folding(he, unfold);
hists->nr_entries += 1 + he->nr_rows;
browser->nr_callchain_rows += he->nr_rows;
}
}
static void hist_browser__set_folding(struct hist_browser *browser, bool unfold)
{
hists__set_folding(browser->hists, unfold);
browser->b.nr_entries = browser->hists->nr_entries;
browser->nr_callchain_rows = 0;
__hist_browser__set_folding(browser, unfold);
browser->b.nr_entries = hist_browser__nr_entries(browser);
/* Go to the start, we may be way after valid entries after a collapse */
ui_browser__reset_index(&browser->b);
}
......@@ -310,8 +346,6 @@ static void ui_browser__warn_lost_events(struct ui_browser *browser)
"Or reduce the sampling frequency.");
}
static void hist_browser__update_pcnt_entries(struct hist_browser *hb);
static int hist_browser__run(struct hist_browser *browser, const char *ev_name,
struct hist_browser_timer *hbt)
{
......@@ -320,9 +354,7 @@ static int hist_browser__run(struct hist_browser *browser, const char *ev_name,
int delay_secs = hbt ? hbt->refresh : 0;
browser->b.entries = &browser->hists->entries;
browser->b.nr_entries = browser->hists->nr_entries;
if (browser->min_pcnt)
browser->b.nr_entries = browser->nr_pcnt_entries;
browser->b.nr_entries = hist_browser__nr_entries(browser);
hist_browser__refresh_dimensions(browser);
hists__browser_title(browser->hists, title, sizeof(title), ev_name);
......@@ -339,13 +371,10 @@ static int hist_browser__run(struct hist_browser *browser, const char *ev_name,
u64 nr_entries;
hbt->timer(hbt->arg);
if (browser->min_pcnt) {
hist_browser__update_pcnt_entries(browser);
nr_entries = browser->nr_pcnt_entries;
} else {
nr_entries = browser->hists->nr_entries;
}
if (hist_browser__has_filter(browser))
hist_browser__update_nr_entries(browser);
nr_entries = hist_browser__nr_entries(browser);
ui_browser__update_nr_entries(&browser->b, nr_entries);
if (browser->hists->stats.nr_lost_warned !=
......@@ -1343,18 +1372,23 @@ static int switch_data_file(void)
return ret;
}
static void hist_browser__update_pcnt_entries(struct hist_browser *hb)
static void hist_browser__update_nr_entries(struct hist_browser *hb)
{
u64 nr_entries = 0;
struct rb_node *nd = rb_first(&hb->hists->entries);
while (nd) {
if (hb->min_pcnt == 0) {
hb->nr_non_filtered_entries = hb->hists->nr_non_filtered_entries;
return;
}
while ((nd = hists__filter_entries(nd, hb->hists,
hb->min_pcnt)) != NULL) {
nr_entries++;
nd = hists__filter_entries(rb_next(nd), hb->hists,
hb->min_pcnt);
nd = rb_next(nd);
}
hb->nr_pcnt_entries = nr_entries;
hb->nr_non_filtered_entries = nr_entries;
}
static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
......@@ -1411,7 +1445,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
if (min_pcnt) {
browser->min_pcnt = min_pcnt;
hist_browser__update_pcnt_entries(browser);
hist_browser__update_nr_entries(browser);
}
fstack = pstack__new(2);
......
......@@ -225,14 +225,18 @@ static void he_stat__decay(struct he_stat *he_stat)
static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
{
u64 prev_period = he->stat.period;
u64 diff;
if (prev_period == 0)
return true;
he_stat__decay(&he->stat);
diff = prev_period - he->stat.period;
hists->stats.total_period -= diff;
if (!he->filtered)
hists->stats.total_period -= prev_period - he->stat.period;
hists->stats.total_non_filtered_period -= diff;
return he->stat.period == 0;
}
......@@ -259,8 +263,11 @@ void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
if (sort__need_collapse)
rb_erase(&n->rb_node_in, &hists->entries_collapsed);
hist_entry__free(n);
--hists->nr_entries;
if (!n->filtered)
--hists->nr_non_filtered_entries;
hist_entry__free(n);
}
}
}
......@@ -317,17 +324,6 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template)
return he;
}
void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
{
if (!h->filtered) {
hists__calc_col_len(hists, h);
hists->nr_non_filtered_entries++;
hists->stats.total_non_filtered_period += h->stat.period;
}
hists->nr_entries++;
hists->stats.total_period += h->stat.period;
}
static u8 symbol__parent_filter(const struct symbol *parent)
{
if (symbol_conf.exclude_other && parent == NULL)
......@@ -393,7 +389,6 @@ static struct hist_entry *add_hist_entry(struct hists *hists,
if (!he)
return NULL;
hists->nr_entries++;
rb_link_node(&he->rb_node_in, parent, p);
rb_insert_color(&he->rb_node_in, hists->entries_in);
out:
......@@ -633,6 +628,35 @@ static int hist_entry__sort_on_period(struct hist_entry *a,
return ret;
}
static void hists__reset_filter_stats(struct hists *hists)
{
hists->nr_non_filtered_entries = 0;
hists->stats.total_non_filtered_period = 0;
}
void hists__reset_stats(struct hists *hists)
{
hists->nr_entries = 0;
hists->stats.total_period = 0;
hists__reset_filter_stats(hists);
}
static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
{
hists->nr_non_filtered_entries++;
hists->stats.total_non_filtered_period += h->stat.period;
}
void hists__inc_stats(struct hists *hists, struct hist_entry *h)
{
if (!h->filtered)
hists__inc_filter_stats(hists, h);
hists->nr_entries++;
hists->stats.total_period += h->stat.period;
}
static void __hists__insert_output_entry(struct rb_root *entries,
struct hist_entry *he,
u64 min_callchain_hits)
......@@ -676,9 +700,7 @@ void hists__output_resort(struct hists *hists)
next = rb_first(root);
hists->entries = RB_ROOT;
hists->nr_non_filtered_entries = 0;
hists->stats.total_period = 0;
hists->stats.total_non_filtered_period = 0;
hists__reset_stats(hists);
hists__reset_col_len(hists);
while (next) {
......@@ -686,7 +708,10 @@ void hists__output_resort(struct hists *hists)
next = rb_next(&n->rb_node_in);
__hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
hists__inc_nr_entries(hists, n);
hists__inc_stats(hists, n);
if (!n->filtered)
hists__calc_col_len(hists, n);
}
}
......@@ -697,13 +722,13 @@ static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h
if (h->filtered)
return;
++hists->nr_non_filtered_entries;
if (h->ms.unfolded)
hists->nr_non_filtered_entries += h->nr_rows;
/* force fold unfiltered entry for simplicity */
h->ms.unfolded = false;
h->row_offset = 0;
hists->stats.total_non_filtered_period += h->stat.period;
hists->stats.nr_non_filtered_samples += h->stat.nr_events;
hists__inc_filter_stats(hists, h);
hists__calc_col_len(hists, h);
}
......@@ -724,9 +749,9 @@ void hists__filter_by_dso(struct hists *hists)
{
struct rb_node *nd;
hists->nr_non_filtered_entries = 0;
hists->stats.total_non_filtered_period = 0;
hists->stats.nr_non_filtered_samples = 0;
hists__reset_filter_stats(hists);
hists__reset_col_len(hists);
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
......@@ -758,9 +783,9 @@ void hists__filter_by_thread(struct hists *hists)
{
struct rb_node *nd;
hists->nr_non_filtered_entries = 0;
hists->stats.total_non_filtered_period = 0;
hists->stats.nr_non_filtered_samples = 0;
hists__reset_filter_stats(hists);
hists__reset_col_len(hists);
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
......@@ -790,9 +815,9 @@ void hists__filter_by_symbol(struct hists *hists)
{
struct rb_node *nd;
hists->nr_non_filtered_entries = 0;
hists->stats.total_non_filtered_period = 0;
hists->stats.nr_non_filtered_samples = 0;
hists__reset_filter_stats(hists);
hists__reset_col_len(hists);
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
......@@ -853,7 +878,7 @@ static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
he->hists = hists;
rb_link_node(&he->rb_node_in, parent, p);
rb_insert_color(&he->rb_node_in, root);
hists__inc_nr_entries(hists, he);
hists__inc_stats(hists, he);
he->dummy = true;
}
out:
......
......@@ -116,7 +116,8 @@ void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel);
void hists__output_recalc_col_len(struct hists *hists, int max_rows);
u64 hists__total_period(struct hists *hists);
void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h);
void hists__reset_stats(struct hists *hists);
void hists__inc_stats(struct hists *hists, struct hist_entry *h);
void hists__inc_nr_events(struct hists *hists, u32 type);
void events_stats__inc(struct events_stats *stats, u32 type);
size_t events_stats__fprintf(struct events_stats *stats, FILE *fp);
......@@ -128,6 +129,12 @@ void hists__filter_by_dso(struct hists *hists);
void hists__filter_by_thread(struct hists *hists);
void hists__filter_by_symbol(struct hists *hists);
static inline bool hists__has_filter(struct hists *hists)
{
return hists->thread_filter || hists->dso_filter ||
hists->symbol_filter_str;
}
u16 hists__col_len(struct hists *hists, enum hist_column col);
void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len);
bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册