提交 67d61296 编写于 作者: I Ingo Molnar

Merge tag 'perf-core-for-mingo-20160419' of...

Merge tag 'perf-core-for-mingo-20160419' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

Build fixes:

- Fix 'perf trace' build when DWARF unwind isn't available (Arnaldo Carvalho de Melo)

- Remove x86 references from arch-neutral Build, fixing it in !x86 arches,
  reported as breaking the build for powerpc64le in linux-next (Arnaldo Carvalho de Melo)

Infrastructure changes:

- Do memset() variable 'st' using the correct size in the jit code (Colin Ian King)

- Fix postgresql ubuntu 'perf script' install instructions (Chris Phlipot)

- Use callchain_param more thoroughly when checking how callchains were
  configured, eventually will be the only way to look for callchain parameters
  (Arnaldo Carvalho de Melo)

- Fix some issues in the 'perf test kallsyms' entry (Arnaldo Carvalho de Melo)
Signed-off-by: NArnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: NIngo Molnar <mingo@kernel.org>
......@@ -946,7 +946,6 @@ int record_opts__parse_callchain(struct record_opts *record,
const char *arg, bool unset)
{
int ret;
record->callgraph_set = true;
callchain->enabled = !unset;
/* --no-call-graph */
......@@ -978,15 +977,14 @@ int record_callchain_opt(const struct option *opt,
const char *arg __maybe_unused,
int unset __maybe_unused)
{
struct record_opts *record = (struct record_opts *)opt->value;
struct callchain_param *callchain = opt->value;
record->callgraph_set = true;
callchain_param.enabled = true;
callchain->enabled = true;
if (callchain_param.record_mode == CALLCHAIN_NONE)
callchain_param.record_mode = CALLCHAIN_FP;
if (callchain->record_mode == CALLCHAIN_NONE)
callchain->record_mode = CALLCHAIN_FP;
callchain_debug(&callchain_param);
callchain_debug(callchain);
return 0;
}
......@@ -1224,7 +1222,7 @@ struct option __record_options[] = {
record__parse_mmap_pages),
OPT_BOOLEAN(0, "group", &record.opts.group,
"put the counters into a counter group"),
OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
NULL, "enables call-graph recording" ,
&record_callchain_opt),
OPT_CALLBACK(0, "call-graph", &record.opts,
......
......@@ -47,7 +47,6 @@ struct report {
struct perf_tool tool;
struct perf_session *session;
bool use_tui, use_gtk, use_stdio;
bool dont_use_callchains;
bool show_full_info;
bool show_threads;
bool inverted_callchain;
......@@ -247,7 +246,7 @@ static int report__setup_sample_type(struct report *rep)
"you call 'perf record' without -g?\n");
return -1;
}
} else if (!rep->dont_use_callchains &&
} else if (!callchain_param.enabled &&
callchain_param.mode != CHAIN_NONE &&
!symbol_conf.use_callchain) {
symbol_conf.use_callchain = true;
......@@ -599,13 +598,15 @@ static int __cmd_report(struct report *rep)
static int
report_parse_callchain_opt(const struct option *opt, const char *arg, int unset)
{
struct report *rep = (struct report *)opt->value;
struct callchain_param *callchain = opt->value;
callchain->enabled = !unset;
/*
* --no-call-graph
*/
if (unset) {
rep->dont_use_callchains = true;
symbol_conf.use_callchain = false;
callchain->mode = CHAIN_NONE;
return 0;
}
......@@ -734,7 +735,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
"regex filter to identify parent, see: '--sort parent'"),
OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other,
"Only display entries with parent-match"),
OPT_CALLBACK_DEFAULT('g', "call-graph", &report,
OPT_CALLBACK_DEFAULT('g', "call-graph", &callchain_param,
"print_type,threshold[,print_limit],order,sort_key[,branch],value",
report_callchain_help, &report_parse_callchain_opt,
callchain_default_opt),
......
......@@ -791,7 +791,7 @@ static void process_event(struct perf_script *script,
if (PRINT_FIELD(IP)) {
struct callchain_cursor *cursor = NULL, cursor_callchain;
if (symbol_conf.use_callchain &&
if (symbol_conf.use_callchain && sample->callchain &&
thread__resolve_callchain(al->thread, &cursor_callchain, evsel,
sample, NULL, NULL, scripting_max_stack) == 0)
cursor = &cursor_callchain;
......
......@@ -917,15 +917,15 @@ static int perf_top__start_counters(struct perf_top *top)
return -1;
}
static int perf_top__setup_sample_type(struct perf_top *top __maybe_unused)
static int callchain_param__setup_sample_type(struct callchain_param *callchain)
{
if (!sort__has_sym) {
if (symbol_conf.use_callchain) {
if (callchain->enabled) {
ui__error("Selected -g but \"sym\" not present in --sort/-s.");
return -EINVAL;
}
} else if (callchain_param.mode != CHAIN_NONE) {
if (callchain_register_param(&callchain_param) < 0) {
} else if (callchain->mode != CHAIN_NONE) {
if (callchain_register_param(callchain) < 0) {
ui__error("Can't register callchain params.\n");
return -EINVAL;
}
......@@ -952,7 +952,7 @@ static int __cmd_top(struct perf_top *top)
goto out_delete;
}
ret = perf_top__setup_sample_type(top);
ret = callchain_param__setup_sample_type(&callchain_param);
if (ret)
goto out_delete;
......@@ -1045,18 +1045,17 @@ callchain_opt(const struct option *opt, const char *arg, int unset)
static int
parse_callchain_opt(const struct option *opt, const char *arg, int unset)
{
struct record_opts *record = (struct record_opts *)opt->value;
struct callchain_param *callchain = opt->value;
record->callgraph_set = true;
callchain_param.enabled = !unset;
callchain_param.record_mode = CALLCHAIN_FP;
callchain->enabled = !unset;
callchain->record_mode = CALLCHAIN_FP;
/*
* --no-call-graph
*/
if (unset) {
symbol_conf.use_callchain = false;
callchain_param.record_mode = CALLCHAIN_NONE;
callchain->record_mode = CALLCHAIN_NONE;
return 0;
}
......@@ -1162,10 +1161,10 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
"output field(s): overhead, period, sample plus all of sort keys"),
OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
"Show a column with the number of samples"),
OPT_CALLBACK_NOOPT('g', NULL, &top.record_opts,
OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
NULL, "enables call-graph recording and display",
&callchain_opt),
OPT_CALLBACK(0, "call-graph", &top.record_opts,
OPT_CALLBACK(0, "call-graph", &callchain_param,
"record_mode[,record_size],print_type,threshold[,print_limit],order,sort_key[,branch]",
top_callchain_help, &parse_callchain_opt),
OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
......@@ -1312,7 +1311,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
top.sym_evsel = perf_evlist__first(top.evlist);
if (!symbol_conf.use_callchain) {
if (!callchain_param.enabled) {
symbol_conf.cumulate_callchain = false;
perf_hpp__cancel_cumulate();
}
......
......@@ -2457,7 +2457,7 @@ static int trace__add_syscall_newtp(struct trace *trace)
perf_evlist__add(evlist, sys_enter);
perf_evlist__add(evlist, sys_exit);
if (trace->opts.callgraph_set && !trace->kernel_syscallchains) {
if (callchain_param.enabled && !trace->kernel_syscallchains) {
/*
* We're interested only in the user space callchain
* leading to the syscall, allow overriding that for
......@@ -2546,7 +2546,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
perf_evlist__config(evlist, &trace->opts, NULL);
if (trace->opts.callgraph_set && trace->syscalls.events.sys_exit) {
if (callchain_param.enabled && trace->syscalls.events.sys_exit) {
perf_evsel__config_callchain(trace->syscalls.events.sys_exit,
&trace->opts, &callchain_param);
/*
......@@ -3109,7 +3109,7 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
"per thread proc mmap processing timeout in ms"),
OPT_END()
};
bool max_stack_user_set = true;
bool __maybe_unused max_stack_user_set = true;
bool mmap_pages_user_set = true;
const char * const trace_subcommands[] = { "record", NULL };
int err;
......@@ -3153,11 +3153,11 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
}
#ifdef HAVE_DWARF_UNWIND_SUPPORT
if ((trace.min_stack || max_stack_user_set) && !trace.opts.callgraph_set)
if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled)
record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
#endif
if (trace.opts.callgraph_set) {
if (callchain_param.enabled) {
if (!mmap_pages_user_set && geteuid() == 0)
trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
......
......@@ -52,7 +52,6 @@ struct record_opts {
bool sample_weight;
bool sample_time;
bool sample_time_set;
bool callgraph_set;
bool period;
bool running_time;
bool full_auxtrace;
......
......@@ -34,10 +34,9 @@ import datetime
#
# ubuntu:
#
# $ sudo apt-get install postgresql
# $ sudo apt-get install postgresql python-pyside.qtsql libqt4-sql-psql
# $ sudo su - postgres
# $ createuser <your user id here>
# Shall the new role be a superuser? (y/n) y
# $ createuser -s <your user id here>
#
# An example of using this script with Intel PT:
#
......
......@@ -54,8 +54,14 @@ int test__vmlinux_matches_kallsyms(int subtest __maybe_unused)
* Step 3:
*
* Load and split /proc/kallsyms into multiple maps, one per module.
* Do not use kcore, as this test was designed before kcore support
* and has parts that only make sense if using the non-kcore code.
* XXX: extend it to stress the kcorre code as well, hint: the list
* of modules extracted from /proc/kcore, in its current form, can't
* be compacted against the list of modules found in the "vmlinux"
* code and with the one got from /proc/modules from the "kallsyms" code.
*/
if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) {
if (__machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, true, NULL) <= 0) {
pr_debug("dso__load_kallsyms ");
goto out;
}
......@@ -157,6 +163,9 @@ int test__vmlinux_matches_kallsyms(int subtest __maybe_unused)
pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
mem_start, sym->name, pair->name);
} else {
pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
mem_start, sym->name, first_pair->name);
}
}
} else
......
......@@ -1896,11 +1896,10 @@ static int hist_browser__fprintf_entry(struct hist_browser *browser,
bool first = true;
int ret;
if (symbol_conf.use_callchain)
if (symbol_conf.use_callchain) {
folded_sign = hist_entry__folded(he);
if (symbol_conf.use_callchain)
printed += fprintf(fp, "%c ", folded_sign);
}
hists__for_each_format(browser->hists, fmt) {
if (perf_hpp__should_skip(fmt, he->hists))
......
......@@ -150,10 +150,6 @@ CFLAGS_libstring.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ET
CFLAGS_hweight.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
CFLAGS_parse-events.o += -Wno-redundant-decls
$(OUTPUT)util/syscalltbl.o: util/syscalltbl.c arch/x86/entry/syscalls/syscall_64.tbl $(OUTPUT)arch/x86/include/generated/asm/syscalls_64.c FORCE
$(call rule_mkdir)
$(call if_changed_dep,cc_o_c)
$(OUTPUT)util/kallsyms.o: ../lib/symbol/kallsyms.c FORCE
$(call rule_mkdir)
$(call if_changed_dep,cc_o_c)
......
......@@ -109,6 +109,7 @@ __parse_callchain_report_opt(const char *arg, bool allow_record_opt)
bool record_opt_set = false;
bool try_stack_size = false;
callchain_param.enabled = true;
symbol_conf.use_callchain = true;
if (!arg)
......@@ -117,6 +118,7 @@ __parse_callchain_report_opt(const char *arg, bool allow_record_opt)
while ((tok = strtok((char *)arg, ",")) != NULL) {
if (!strncmp(tok, "none", strlen(tok))) {
callchain_param.mode = CHAIN_NONE;
callchain_param.enabled = false;
symbol_conf.use_callchain = false;
return 0;
}
......
......@@ -420,7 +420,7 @@ for ((_evsel) = list_entry((_leader)->node.next, struct perf_evsel, node); \
(_evsel) && (_evsel)->leader == (_leader); \
(_evsel) = list_entry((_evsel)->node.next, struct perf_evsel, node))
static inline bool has_branch_callstack(struct perf_evsel *evsel)
static inline bool perf_evsel__has_branch_callstack(const struct perf_evsel *evsel)
{
return evsel->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK;
}
......
......@@ -412,7 +412,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
return -1;
}
if (stat(filename, &st))
memset(&st, 0, sizeof(stat));
memset(&st, 0, sizeof(st));
event->mmap2.header.type = PERF_RECORD_MMAP2;
event->mmap2.header.misc = PERF_RECORD_MISC_USER;
......@@ -500,7 +500,7 @@ static int jit_repipe_code_move(struct jit_buf_desc *jd, union jr_entry *jr)
size++; /* for \0 */
if (stat(filename, &st))
memset(&st, 0, sizeof(stat));
memset(&st, 0, sizeof(st));
size = PERF_ALIGN(size, sizeof(u64));
......
......@@ -908,11 +908,11 @@ int machines__create_kernel_maps(struct machines *machines, pid_t pid)
return machine__create_kernel_maps(machine);
}
int machine__load_kallsyms(struct machine *machine, const char *filename,
enum map_type type, symbol_filter_t filter)
int __machine__load_kallsyms(struct machine *machine, const char *filename,
enum map_type type, bool no_kcore, symbol_filter_t filter)
{
struct map *map = machine__kernel_map(machine);
int ret = dso__load_kallsyms(map->dso, filename, map, filter);
int ret = __dso__load_kallsyms(map->dso, filename, map, no_kcore, filter);
if (ret > 0) {
dso__set_loaded(map->dso, type);
......@@ -927,6 +927,12 @@ int machine__load_kallsyms(struct machine *machine, const char *filename,
return ret;
}
int machine__load_kallsyms(struct machine *machine, const char *filename,
enum map_type type, symbol_filter_t filter)
{
return __machine__load_kallsyms(machine, filename, type, false, filter);
}
int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
symbol_filter_t filter)
{
......@@ -1808,7 +1814,7 @@ static int thread__resolve_callchain_sample(struct thread *thread,
callchain_cursor_reset(cursor);
if (has_branch_callstack(evsel)) {
if (perf_evsel__has_branch_callstack(evsel)) {
err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
root_al, max_stack);
if (err)
......
......@@ -215,6 +215,8 @@ struct symbol *machine__find_kernel_function_by_name(struct machine *machine,
struct map *machine__findnew_module_map(struct machine *machine, u64 start,
const char *filename);
int __machine__load_kallsyms(struct machine *machine, const char *filename,
enum map_type type, bool no_kcore, symbol_filter_t filter);
int machine__load_kallsyms(struct machine *machine, const char *filename,
enum map_type type, symbol_filter_t filter);
int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
......
......@@ -907,7 +907,7 @@ static void callchain__printf(struct perf_evsel *evsel,
unsigned int i;
struct ip_callchain *callchain = sample->callchain;
if (has_branch_callstack(evsel))
if (perf_evsel__has_branch_callstack(evsel))
callchain__lbr_callstack_printf(sample);
printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
......@@ -1081,7 +1081,7 @@ static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
if (sample_type & PERF_SAMPLE_CALLCHAIN)
callchain__printf(evsel, sample);
if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !has_branch_callstack(evsel))
if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !perf_evsel__has_branch_callstack(evsel))
branch_stack__printf(sample);
if (sample_type & PERF_SAMPLE_REGS_USER)
......
......@@ -1208,8 +1208,8 @@ static int kallsyms__delta(struct map *map, const char *filename, u64 *delta)
return 0;
}
int dso__load_kallsyms(struct dso *dso, const char *filename,
struct map *map, symbol_filter_t filter)
int __dso__load_kallsyms(struct dso *dso, const char *filename,
struct map *map, bool no_kcore, symbol_filter_t filter)
{
u64 delta = 0;
......@@ -1230,12 +1230,18 @@ int dso__load_kallsyms(struct dso *dso, const char *filename,
else
dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
if (!dso__load_kcore(dso, map, filename))
if (!no_kcore && !dso__load_kcore(dso, map, filename))
return dso__split_kallsyms_for_kcore(dso, map, filter);
else
return dso__split_kallsyms(dso, map, delta, filter);
}
int dso__load_kallsyms(struct dso *dso, const char *filename,
struct map *map, symbol_filter_t filter)
{
return __dso__load_kallsyms(dso, filename, map, false, filter);
}
static int dso__load_perf_map(struct dso *dso, struct map *map,
symbol_filter_t filter)
{
......
......@@ -240,6 +240,8 @@ int dso__load_vmlinux(struct dso *dso, struct map *map,
symbol_filter_t filter);
int dso__load_vmlinux_path(struct dso *dso, struct map *map,
symbol_filter_t filter);
int __dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map,
bool no_kcore, symbol_filter_t filter);
int dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map,
symbol_filter_t filter);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册