提交 8b5b584d 编写于 作者: I Ingo Molnar

Merge tag 'perf-core-for-mingo' of...

Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/jolsa/perf into perf/core

Pull perf/core improvements and fixes from Jiri Olsa:

  * Handle the num array type in python properly (Sebastian Andrzej Siewior)

  * Fix wrong condition for allocation failure (Jiri Olsa)

  * Adjust callchain based on DWARF debug info on powerpc (Sukadev Bhattiprolu)

  * Fix a risk for doing free on uninitialized pointer in traceevent lib (Rickard Strandqvist)
Signed-off-by: NJiri Olsa <jolsa@kernel.org>
Signed-off-by: NIngo Molnar <mingo@kernel.org>
...@@ -2395,7 +2395,7 @@ process_flags(struct event_format *event, struct print_arg *arg, char **tok) ...@@ -2395,7 +2395,7 @@ process_flags(struct event_format *event, struct print_arg *arg, char **tok)
{ {
struct print_arg *field; struct print_arg *field;
enum event_type type; enum event_type type;
char *token; char *token = NULL;
memset(arg, 0, sizeof(*arg)); memset(arg, 0, sizeof(*arg));
arg->type = PRINT_FLAGS; arg->type = PRINT_FLAGS;
...@@ -2448,7 +2448,7 @@ process_symbols(struct event_format *event, struct print_arg *arg, char **tok) ...@@ -2448,7 +2448,7 @@ process_symbols(struct event_format *event, struct print_arg *arg, char **tok)
{ {
struct print_arg *field; struct print_arg *field;
enum event_type type; enum event_type type;
char *token; char *token = NULL;
memset(arg, 0, sizeof(*arg)); memset(arg, 0, sizeof(*arg));
arg->type = PRINT_SYMBOL; arg->type = PRINT_SYMBOL;
...@@ -2487,7 +2487,7 @@ process_hex(struct event_format *event, struct print_arg *arg, char **tok) ...@@ -2487,7 +2487,7 @@ process_hex(struct event_format *event, struct print_arg *arg, char **tok)
{ {
struct print_arg *field; struct print_arg *field;
enum event_type type; enum event_type type;
char *token; char *token = NULL;
memset(arg, 0, sizeof(*arg)); memset(arg, 0, sizeof(*arg));
arg->type = PRINT_HEX; arg->type = PRINT_HEX;
......
...@@ -3,3 +3,4 @@ PERF_HAVE_DWARF_REGS := 1 ...@@ -3,3 +3,4 @@ PERF_HAVE_DWARF_REGS := 1
LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
endif endif
LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/header.o LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/header.o
LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/skip-callchain-idx.o
/*
* Use DWARF Debug information to skip unnecessary callchain entries.
*
* Copyright (C) 2014 Sukadev Bhattiprolu, IBM Corporation.
* Copyright (C) 2014 Ulrich Weigand, IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <inttypes.h>
#include <dwarf.h>
#include <elfutils/libdwfl.h>
#include "util/thread.h"
#include "util/callchain.h"
/*
* When saving the callchain on Power, the kernel conservatively saves
* excess entries in the callchain. A few of these entries are needed
* in some cases but not others. If the unnecessary entries are not
* ignored, we end up with duplicate arcs in the call-graphs. Use
* DWARF debug information to skip over any unnecessary callchain
* entries.
*
* See function header for arch_adjust_callchain() below for more details.
*
* The libdwfl code in this file is based on code from elfutils
* (libdwfl/argp-std.c, libdwfl/tests/addrcfi.c, etc).
*/
static char *debuginfo_path;
static const Dwfl_Callbacks offline_callbacks = {
.debuginfo_path = &debuginfo_path,
.find_debuginfo = dwfl_standard_find_debuginfo,
.section_address = dwfl_offline_section_address,
};
/*
* Use the DWARF expression for the Call-frame-address and determine
* if return address is in LR and if a new frame was allocated.
*/
static int check_return_reg(int ra_regno, Dwarf_Frame *frame)
{
Dwarf_Op ops_mem[2];
Dwarf_Op dummy;
Dwarf_Op *ops = &dummy;
size_t nops;
int result;
result = dwarf_frame_register(frame, ra_regno, ops_mem, &ops, &nops);
if (result < 0) {
pr_debug("dwarf_frame_register() %s\n", dwarf_errmsg(-1));
return -1;
}
/*
* Check if return address is on the stack.
*/
if (nops != 0 || ops != NULL)
return 0;
/*
* Return address is in LR. Check if a frame was allocated
* but not-yet used.
*/
result = dwarf_frame_cfa(frame, &ops, &nops);
if (result < 0) {
pr_debug("dwarf_frame_cfa() returns %d, %s\n", result,
dwarf_errmsg(-1));
return -1;
}
/*
* If call frame address is in r1, no new frame was allocated.
*/
if (nops == 1 && ops[0].atom == DW_OP_bregx && ops[0].number == 1 &&
ops[0].number2 == 0)
return 1;
/*
* A new frame was allocated but has not yet been used.
*/
return 2;
}
/*
* Get the DWARF frame from the .eh_frame section.
*/
static Dwarf_Frame *get_eh_frame(Dwfl_Module *mod, Dwarf_Addr pc)
{
int result;
Dwarf_Addr bias;
Dwarf_CFI *cfi;
Dwarf_Frame *frame;
cfi = dwfl_module_eh_cfi(mod, &bias);
if (!cfi) {
pr_debug("%s(): no CFI - %s\n", __func__, dwfl_errmsg(-1));
return NULL;
}
result = dwarf_cfi_addrframe(cfi, pc, &frame);
if (result) {
pr_debug("%s(): %s\n", __func__, dwfl_errmsg(-1));
return NULL;
}
return frame;
}
/*
* Get the DWARF frame from the .debug_frame section.
*/
static Dwarf_Frame *get_dwarf_frame(Dwfl_Module *mod, Dwarf_Addr pc)
{
Dwarf_CFI *cfi;
Dwarf_Addr bias;
Dwarf_Frame *frame;
int result;
cfi = dwfl_module_dwarf_cfi(mod, &bias);
if (!cfi) {
pr_debug("%s(): no CFI - %s\n", __func__, dwfl_errmsg(-1));
return NULL;
}
result = dwarf_cfi_addrframe(cfi, pc, &frame);
if (result) {
pr_debug("%s(): %s\n", __func__, dwfl_errmsg(-1));
return NULL;
}
return frame;
}
/*
* Return:
* 0 if return address for the program counter @pc is on stack
* 1 if return address is in LR and no new stack frame was allocated
* 2 if return address is in LR and a new frame was allocated (but not
* yet used)
* -1 in case of errors
*/
static int check_return_addr(const char *exec_file, Dwarf_Addr pc)
{
int rc = -1;
Dwfl *dwfl;
Dwfl_Module *mod;
Dwarf_Frame *frame;
int ra_regno;
Dwarf_Addr start = pc;
Dwarf_Addr end = pc;
bool signalp;
dwfl = dwfl_begin(&offline_callbacks);
if (!dwfl) {
pr_debug("dwfl_begin() failed: %s\n", dwarf_errmsg(-1));
return -1;
}
if (dwfl_report_offline(dwfl, "", exec_file, -1) == NULL) {
pr_debug("dwfl_report_offline() failed %s\n", dwarf_errmsg(-1));
goto out;
}
mod = dwfl_addrmodule(dwfl, pc);
if (!mod) {
pr_debug("dwfl_addrmodule() failed, %s\n", dwarf_errmsg(-1));
goto out;
}
/*
* To work with split debug info files (eg: glibc), check both
* .eh_frame and .debug_frame sections of the ELF header.
*/
frame = get_eh_frame(mod, pc);
if (!frame) {
frame = get_dwarf_frame(mod, pc);
if (!frame)
goto out;
}
ra_regno = dwarf_frame_info(frame, &start, &end, &signalp);
if (ra_regno < 0) {
pr_debug("Return address register unavailable: %s\n",
dwarf_errmsg(-1));
goto out;
}
rc = check_return_reg(ra_regno, frame);
out:
dwfl_end(dwfl);
return rc;
}
/*
* The callchain saved by the kernel always includes the link register (LR).
*
* 0: PERF_CONTEXT_USER
* 1: Program counter (Next instruction pointer)
* 2: LR value
* 3: Caller's caller
* 4: ...
*
* The value in LR is only needed when it holds a return address. If the
* return address is on the stack, we should ignore the LR value.
*
* Further, when the return address is in the LR, if a new frame was just
* allocated but the LR was not saved into it, then the LR contains the
* caller, slot 4: contains the caller's caller and the contents of slot 3:
* (chain->ips[3]) is undefined and must be ignored.
*
* Use DWARF debug information to determine if any entries need to be skipped.
*
* Return:
* index: of callchain entry that needs to be ignored (if any)
* -1 if no entry needs to be ignored or in case of errors
*/
int arch_skip_callchain_idx(struct machine *machine, struct thread *thread,
struct ip_callchain *chain)
{
struct addr_location al;
struct dso *dso = NULL;
int rc;
u64 ip;
u64 skip_slot = -1;
if (chain->nr < 3)
return skip_slot;
ip = chain->ips[2];
thread__find_addr_location(thread, machine, PERF_RECORD_MISC_USER,
MAP__FUNCTION, ip, &al);
if (al.map)
dso = al.map->dso;
if (!dso) {
pr_debug("%" PRIx64 " dso is NULL\n", ip);
return skip_slot;
}
rc = check_return_addr(dso->long_name, ip);
pr_debug("DSO %s, nr %" PRIx64 ", ip 0x%" PRIx64 "rc %d\n",
dso->long_name, chain->nr, ip, rc);
if (rc == 0) {
/*
* Return address on stack. Ignore LR value in callchain
*/
skip_slot = 2;
} else if (rc == 2) {
/*
* New frame allocated but return address still in LR.
* Ignore the caller's caller entry in callchain.
*/
skip_slot = 3;
}
return skip_slot;
}
...@@ -184,7 +184,7 @@ static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel) ...@@ -184,7 +184,7 @@ static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel) static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
{ {
evsel->priv = zalloc(sizeof(struct perf_stat)); evsel->priv = zalloc(sizeof(struct perf_stat));
if (evsel == NULL) if (evsel->priv == NULL)
return -ENOMEM; return -ENOMEM;
perf_evsel__reset_stat_priv(evsel); perf_evsel__reset_stat_priv(evsel);
return 0; return 0;
......
...@@ -48,6 +48,10 @@ ifneq ($(ARCH),$(filter $(ARCH),x86 arm)) ...@@ -48,6 +48,10 @@ ifneq ($(ARCH),$(filter $(ARCH),x86 arm))
NO_LIBDW_DWARF_UNWIND := 1 NO_LIBDW_DWARF_UNWIND := 1
endif endif
ifeq ($(ARCH),powerpc)
CFLAGS += -DHAVE_SKIP_CALLCHAIN_IDX
endif
ifeq ($(LIBUNWIND_LIBS),) ifeq ($(LIBUNWIND_LIBS),)
NO_LIBUNWIND := 1 NO_LIBUNWIND := 1
else else
......
...@@ -176,4 +176,17 @@ static inline void callchain_cursor_snapshot(struct callchain_cursor *dest, ...@@ -176,4 +176,17 @@ static inline void callchain_cursor_snapshot(struct callchain_cursor *dest,
dest->first = src->curr; dest->first = src->curr;
dest->nr -= src->pos; dest->nr -= src->pos;
} }
#ifdef HAVE_SKIP_CALLCHAIN_IDX
extern int arch_skip_callchain_idx(struct machine *machine,
struct thread *thread, struct ip_callchain *chain);
#else
static inline int arch_skip_callchain_idx(struct machine *machine __maybe_unused,
struct thread *thread __maybe_unused,
struct ip_callchain *chain __maybe_unused)
{
return -1;
}
#endif
#endif /* __PERF_CALLCHAIN_H */ #endif /* __PERF_CALLCHAIN_H */
...@@ -1281,7 +1281,9 @@ static int machine__resolve_callchain_sample(struct machine *machine, ...@@ -1281,7 +1281,9 @@ static int machine__resolve_callchain_sample(struct machine *machine,
u8 cpumode = PERF_RECORD_MISC_USER; u8 cpumode = PERF_RECORD_MISC_USER;
int chain_nr = min(max_stack, (int)chain->nr); int chain_nr = min(max_stack, (int)chain->nr);
int i; int i;
int j;
int err; int err;
int skip_idx __maybe_unused;
callchain_cursor_reset(&callchain_cursor); callchain_cursor_reset(&callchain_cursor);
...@@ -1290,14 +1292,26 @@ static int machine__resolve_callchain_sample(struct machine *machine, ...@@ -1290,14 +1292,26 @@ static int machine__resolve_callchain_sample(struct machine *machine,
return 0; return 0;
} }
/*
* Based on DWARF debug information, some architectures skip
* a callchain entry saved by the kernel.
*/
skip_idx = arch_skip_callchain_idx(machine, thread, chain);
for (i = 0; i < chain_nr; i++) { for (i = 0; i < chain_nr; i++) {
u64 ip; u64 ip;
struct addr_location al; struct addr_location al;
if (callchain_param.order == ORDER_CALLEE) if (callchain_param.order == ORDER_CALLEE)
ip = chain->ips[i]; j = i;
else else
ip = chain->ips[chain->nr - i - 1]; j = chain->nr - i - 1;
#ifdef HAVE_SKIP_CALLCHAIN_IDX
if (j == skip_idx)
continue;
#endif
ip = chain->ips[j];
if (ip >= PERF_CONTEXT_MAX) { if (ip >= PERF_CONTEXT_MAX) {
switch (ip) { switch (ip) {
......
...@@ -231,6 +231,47 @@ static inline struct event_format *find_cache_event(struct perf_evsel *evsel) ...@@ -231,6 +231,47 @@ static inline struct event_format *find_cache_event(struct perf_evsel *evsel)
return event; return event;
} }
static PyObject *get_field_numeric_entry(struct event_format *event,
struct format_field *field, void *data)
{
bool is_array = field->flags & FIELD_IS_ARRAY;
PyObject *obj, *list = NULL;
unsigned long long val;
unsigned int item_size, n_items, i;
if (is_array) {
list = PyList_New(field->arraylen);
item_size = field->size / field->arraylen;
n_items = field->arraylen;
} else {
item_size = field->size;
n_items = 1;
}
for (i = 0; i < n_items; i++) {
val = read_size(event, data + field->offset + i * item_size,
item_size);
if (field->flags & FIELD_IS_SIGNED) {
if ((long long)val >= LONG_MIN &&
(long long)val <= LONG_MAX)
obj = PyInt_FromLong(val);
else
obj = PyLong_FromLongLong(val);
} else {
if (val <= LONG_MAX)
obj = PyInt_FromLong(val);
else
obj = PyLong_FromUnsignedLongLong(val);
}
if (is_array)
PyList_SET_ITEM(list, i, obj);
}
if (is_array)
obj = list;
return obj;
}
static void python_process_tracepoint(struct perf_sample *sample, static void python_process_tracepoint(struct perf_sample *sample,
struct perf_evsel *evsel, struct perf_evsel *evsel,
struct thread *thread, struct thread *thread,
...@@ -239,7 +280,6 @@ static void python_process_tracepoint(struct perf_sample *sample, ...@@ -239,7 +280,6 @@ static void python_process_tracepoint(struct perf_sample *sample,
PyObject *handler, *retval, *context, *t, *obj, *dict = NULL; PyObject *handler, *retval, *context, *t, *obj, *dict = NULL;
static char handler_name[256]; static char handler_name[256];
struct format_field *field; struct format_field *field;
unsigned long long val;
unsigned long s, ns; unsigned long s, ns;
struct event_format *event; struct event_format *event;
unsigned n = 0; unsigned n = 0;
...@@ -303,20 +343,7 @@ static void python_process_tracepoint(struct perf_sample *sample, ...@@ -303,20 +343,7 @@ static void python_process_tracepoint(struct perf_sample *sample,
offset = field->offset; offset = field->offset;
obj = PyString_FromString((char *)data + offset); obj = PyString_FromString((char *)data + offset);
} else { /* FIELD_IS_NUMERIC */ } else { /* FIELD_IS_NUMERIC */
val = read_size(event, data + field->offset, obj = get_field_numeric_entry(event, field, data);
field->size);
if (field->flags & FIELD_IS_SIGNED) {
if ((long long)val >= LONG_MIN &&
(long long)val <= LONG_MAX)
obj = PyInt_FromLong(val);
else
obj = PyLong_FromLongLong(val);
} else {
if (val <= LONG_MAX)
obj = PyInt_FromLong(val);
else
obj = PyLong_FromUnsignedLongLong(val);
}
} }
if (handler) if (handler)
PyTuple_SetItem(t, n++, obj); PyTuple_SetItem(t, n++, obj);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册