提交 078c3386 编写于 作者: W Wang Nan 提交者: Arnaldo Carvalho de Melo

perf evlist: Map backward events to backward_mmap

In perf_evlist__mmap_per_evsel(), select backward_mmap for backward
events.  Utilize new perf_mmap APIs. Dynamically alloc backward_mmap.

Remove useless functions.
Signed-off-by: NWang Nan <wangnan0@huawei.com>
Acked-by: NJiri Olsa <jolsa@kernel.org>
Cc: He Kuang <hekuang@huawei.com>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Nilay Vaish <nilayvaish@gmail.com>
Cc: Zefan Li <lizefan@huawei.com>
Cc: pi3orama@163.com
Link: http://lkml.kernel.org/r/1468485287-33422-9-git-send-email-wangnan0@huawei.comSigned-off-by: NArnaldo Carvalho de Melo <acme@redhat.com>
上级 b2cb615d
...@@ -31,8 +31,8 @@ static int count_samples(struct perf_evlist *evlist, int *sample_count, ...@@ -31,8 +31,8 @@ static int count_samples(struct perf_evlist *evlist, int *sample_count,
for (i = 0; i < evlist->nr_mmaps; i++) { for (i = 0; i < evlist->nr_mmaps; i++) {
union perf_event *event; union perf_event *event;
perf_evlist__mmap_read_catchup(evlist, i); perf_mmap__read_catchup(&evlist->backward_mmap[i]);
while ((event = perf_evlist__mmap_read_backward(evlist, i)) != NULL) { while ((event = perf_mmap__read_backward(&evlist->backward_mmap[i])) != NULL) {
const u32 type = event->header.type; const u32 type = event->header.type;
switch (type) { switch (type) {
......
...@@ -27,7 +27,6 @@ ...@@ -27,7 +27,6 @@
#include <linux/log2.h> #include <linux/log2.h>
#include <linux/err.h> #include <linux/err.h>
static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx);
static void perf_mmap__munmap(struct perf_mmap *map); static void perf_mmap__munmap(struct perf_mmap *map);
static void perf_mmap__put(struct perf_mmap *map); static void perf_mmap__put(struct perf_mmap *map);
...@@ -692,8 +691,11 @@ static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value) ...@@ -692,8 +691,11 @@ static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
{ {
int i; int i;
if (!evlist->backward_mmap)
return 0;
for (i = 0; i < evlist->nr_mmaps; i++) { for (i = 0; i < evlist->nr_mmaps; i++) {
int fd = evlist->mmap[i].fd; int fd = evlist->backward_mmap[i].fd;
int err; int err;
if (fd < 0) if (fd < 0)
...@@ -904,16 +906,6 @@ static void perf_mmap__put(struct perf_mmap *md) ...@@ -904,16 +906,6 @@ static void perf_mmap__put(struct perf_mmap *md)
perf_mmap__munmap(md); perf_mmap__munmap(md);
} }
static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx)
{
perf_mmap__get(&evlist->mmap[idx]);
}
static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx)
{
perf_mmap__put(&evlist->mmap[idx]);
}
void perf_mmap__consume(struct perf_mmap *md, bool overwrite) void perf_mmap__consume(struct perf_mmap *md, bool overwrite)
{ {
if (!overwrite) { if (!overwrite) {
...@@ -1049,12 +1041,6 @@ static int perf_mmap__mmap(struct perf_mmap *map, ...@@ -1049,12 +1041,6 @@ static int perf_mmap__mmap(struct perf_mmap *map,
return 0; return 0;
} }
static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
struct mmap_params *mp, int fd)
{
return perf_mmap__mmap(&evlist->mmap[idx], mp, fd);
}
static bool static bool
perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused, perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
struct perf_evsel *evsel) struct perf_evsel *evsel)
...@@ -1066,16 +1052,27 @@ perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused, ...@@ -1066,16 +1052,27 @@ perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
struct mmap_params *mp, int cpu, struct mmap_params *mp, int cpu,
int thread, int *output) int thread, int *_output, int *_output_backward)
{ {
struct perf_evsel *evsel; struct perf_evsel *evsel;
int revent; int revent;
evlist__for_each_entry(evlist, evsel) { evlist__for_each_entry(evlist, evsel) {
struct perf_mmap *maps = evlist->mmap;
int *output = _output;
int fd; int fd;
if (!!evsel->attr.write_backward != (evlist->overwrite && evlist->backward)) if (evsel->attr.write_backward) {
continue; output = _output_backward;
maps = evlist->backward_mmap;
if (!maps) {
maps = perf_evlist__alloc_mmap(evlist);
if (!maps)
return -1;
evlist->backward_mmap = maps;
}
}
if (evsel->system_wide && thread) if (evsel->system_wide && thread)
continue; continue;
...@@ -1084,13 +1081,14 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, ...@@ -1084,13 +1081,14 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
if (*output == -1) { if (*output == -1) {
*output = fd; *output = fd;
if (__perf_evlist__mmap(evlist, idx, mp, *output) < 0)
if (perf_mmap__mmap(&maps[idx], mp, *output) < 0)
return -1; return -1;
} else { } else {
if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0) if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
return -1; return -1;
perf_evlist__mmap_get(evlist, idx); perf_mmap__get(&maps[idx]);
} }
revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0; revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0;
...@@ -1103,8 +1101,8 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, ...@@ -1103,8 +1101,8 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
* Therefore don't add it for polling. * Therefore don't add it for polling.
*/ */
if (!evsel->system_wide && if (!evsel->system_wide &&
__perf_evlist__add_pollfd(evlist, fd, &evlist->mmap[idx], revent) < 0) { __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) {
perf_evlist__mmap_put(evlist, idx); perf_mmap__put(&maps[idx]);
return -1; return -1;
} }
...@@ -1130,13 +1128,14 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, ...@@ -1130,13 +1128,14 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
pr_debug2("perf event ring buffer mmapped per cpu\n"); pr_debug2("perf event ring buffer mmapped per cpu\n");
for (cpu = 0; cpu < nr_cpus; cpu++) { for (cpu = 0; cpu < nr_cpus; cpu++) {
int output = -1; int output = -1;
int output_backward = -1;
auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu, auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
true); true);
for (thread = 0; thread < nr_threads; thread++) { for (thread = 0; thread < nr_threads; thread++) {
if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu, if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
thread, &output)) thread, &output, &output_backward))
goto out_unmap; goto out_unmap;
} }
} }
...@@ -1157,12 +1156,13 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, ...@@ -1157,12 +1156,13 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
pr_debug2("perf event ring buffer mmapped per thread\n"); pr_debug2("perf event ring buffer mmapped per thread\n");
for (thread = 0; thread < nr_threads; thread++) { for (thread = 0; thread < nr_threads; thread++) {
int output = -1; int output = -1;
int output_backward = -1;
auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread, auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
false); false);
if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread, if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
&output)) &output, &output_backward))
goto out_unmap; goto out_unmap;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册