提交 18ef15c6 编写于 作者: A Arnaldo Carvalho de Melo

perf tools: Experiment with cppcheck

Experimenting a bit using cppcheck[1], a static checker brought to my
attention by Colin, reducing the scope of some variables, reducing the
line of source code lines in the process:

  $ cppcheck --enable=style tools/perf/util/thread.c
  Checking tools/perf/util/thread.c...
  [tools/perf/util/thread.c:17]: (style) The scope of the variable 'leader' can be reduced.
  [tools/perf/util/thread.c:133]: (style) The scope of the variable 'err' can be reduced.
  [tools/perf/util/thread.c:273]: (style) The scope of the variable 'err' can be reduced.

Will continue later, but these are already useful, keep them.

1: https://sourceforge.net/p/cppcheck/wiki/Home/

Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Colin Ian King <colin.king@canonical.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/n/tip-ixws7lbycihhpmq9cc949ti6@git.kernel.orgSigned-off-by: NArnaldo Carvalho de Melo <acme@redhat.com>
上级 ead1a574
......@@ -384,15 +384,14 @@ void perf_evlist__toggle_enable(struct perf_evlist *evlist)
static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
struct perf_evsel *evsel, int cpu)
{
int thread, err;
int thread;
int nr_threads = perf_evlist__nr_threads(evlist, evsel);
if (!evsel->fd)
return -EINVAL;
for (thread = 0; thread < nr_threads; thread++) {
err = ioctl(FD(evsel, cpu, thread),
PERF_EVENT_IOC_ENABLE, 0);
int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
if (err)
return err;
}
......@@ -403,14 +402,14 @@ static int perf_evlist__enable_event_thread(struct perf_evlist *evlist,
struct perf_evsel *evsel,
int thread)
{
int cpu, err;
int cpu;
int nr_cpus = cpu_map__nr(evlist->cpus);
if (!evsel->fd)
return -EINVAL;
for (cpu = 0; cpu < nr_cpus; cpu++) {
err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
if (err)
return err;
}
......@@ -1606,10 +1605,9 @@ void perf_evlist__close(struct perf_evlist *evlist)
struct perf_evsel *evsel;
int ncpus = cpu_map__nr(evlist->cpus);
int nthreads = thread_map__nr(evlist->threads);
int n;
evlist__for_each_entry_reverse(evlist, evsel) {
n = evsel->cpus ? evsel->cpus->nr : ncpus;
int n = evsel->cpus ? evsel->cpus->nr : ncpus;
perf_evsel__close(evsel, n, nthreads);
}
}
......
......@@ -985,14 +985,13 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
{
int cpu, thread;
if (evsel->system_wide)
nthreads = 1;
evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
if (evsel->fd) {
int cpu, thread;
for (cpu = 0; cpu < ncpus; cpu++) {
for (thread = 0; thread < nthreads; thread++) {
FD(evsel, cpu, thread) = -1;
......
......@@ -1745,9 +1745,8 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
int max_stack)
{
struct ip_callchain *chain = sample->callchain;
int chain_nr = min(max_stack, (int)chain->nr);
int chain_nr = min(max_stack, (int)chain->nr), i;
u8 cpumode = PERF_RECORD_MISC_USER;
int i, j, err;
u64 ip;
for (i = 0; i < chain_nr; i++) {
......@@ -1758,7 +1757,7 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
/* LBR only affects the user callchain */
if (i != chain_nr) {
struct branch_stack *lbr_stack = sample->branch_stack;
int lbr_nr = lbr_stack->nr;
int lbr_nr = lbr_stack->nr, j;
/*
* LBR callstack can only get user call chain.
* The mix_chain_nr is kernel call chain
......@@ -1772,6 +1771,7 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
int mix_chain_nr = i + 1 + lbr_nr + 1;
for (j = 0; j < mix_chain_nr; j++) {
int err;
if (callchain_param.order == ORDER_CALLEE) {
if (j < i + 1)
ip = chain->ips[j];
......
......@@ -66,9 +66,8 @@ static inline ssize_t strbuf_avail(const struct strbuf *sb) {
int strbuf_grow(struct strbuf *buf, size_t);
static inline int strbuf_setlen(struct strbuf *sb, size_t len) {
int ret;
if (!sb->alloc) {
ret = strbuf_grow(sb, 0);
int ret = strbuf_grow(sb, 0);
if (ret)
return ret;
}
......
......@@ -14,13 +14,12 @@
int thread__init_map_groups(struct thread *thread, struct machine *machine)
{
struct thread *leader;
pid_t pid = thread->pid_;
if (pid == thread->tid || pid == -1) {
thread->mg = map_groups__new(machine);
} else {
leader = __machine__findnew_thread(machine, pid, pid);
struct thread *leader = __machine__findnew_thread(machine, pid, pid);
if (leader) {
thread->mg = map_groups__get(leader->mg);
thread__put(leader);
......@@ -130,11 +129,10 @@ int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
bool exec)
{
struct comm *new, *curr = thread__comm(thread);
int err;
/* Override the default :tid entry */
if (!thread->comm_set) {
err = comm__override(curr, str, timestamp, exec);
int err = comm__override(curr, str, timestamp, exec);
if (err)
return err;
} else {
......@@ -270,10 +268,9 @@ static int thread__clone_map_groups(struct thread *thread,
int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp)
{
int err;
if (parent->comm_set) {
const char *comm = thread__comm_str(parent);
int err;
if (!comm)
return -ENOMEM;
err = thread__set_comm(thread, comm, timestamp);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册