提交 32c261c0 编写于 作者: J Jiri Olsa 提交者: Arnaldo Carvalho de Melo

libperf: Adopt perf_mmap__mmap() function from tools/perf

Move perf_mmap__mmap() from tools/perf to libperf, it will be used in
the following patches. And rename the existing perf's function to
mmap__mmap().
Signed-off-by: NJiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lore.kernel.org/lkml/20191007125344.14268-5-jolsa@kernel.orgSigned-off-by: NArnaldo Carvalho de Melo <acme@redhat.com>
上级 bf59b305
......@@ -37,5 +37,7 @@ struct perf_mmap_param {
size_t perf_mmap__mmap_len(struct perf_mmap *map);
void perf_mmap__init(struct perf_mmap *map, bool overwrite);
int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
int fd, int cpu);
#endif /* __LIBPERF_INTERNAL_MMAP_H */
// SPDX-License-Identifier: GPL-2.0
#include <sys/mman.h>
#include <internal/mmap.h>
#include <internal/lib.h>
......@@ -13,3 +14,20 @@ size_t perf_mmap__mmap_len(struct perf_mmap *map)
{
return map->mask + 1 + page_size;
}
int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
int fd, int cpu)
{
map->prev = 0;
map->mask = mp->mask;
map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
MAP_SHARED, fd, 0);
if (map->base == MAP_FAILED) {
map->base = NULL;
return -1;
}
map->fd = fd;
map->cpu = cpu;
return 0;
}
......@@ -695,7 +695,7 @@ static int evlist__mmap_per_evsel(struct evlist *evlist, int idx,
if (*output == -1) {
*output = fd;
if (perf_mmap__mmap(&maps[idx], mp, *output, evlist_cpu) < 0)
if (mmap__mmap(&maps[idx], mp, *output, evlist_cpu) < 0)
return -1;
} else {
if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
......
......@@ -353,7 +353,7 @@ static void perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params
CPU_SET(map->core.cpu, &map->affinity_mask);
}
int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
{
/*
* The last one will be done at perf_mmap__consume(), so that we
......@@ -369,18 +369,12 @@ int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
* perf_evlist__filter_pollfd().
*/
refcount_set(&map->core.refcnt, 2);
map->core.prev = 0;
map->core.mask = mp->core.mask;
map->core.base = mmap(NULL, mmap__mmap_len(map), mp->core.prot,
MAP_SHARED, fd, 0);
if (map->core.base == MAP_FAILED) {
if (perf_mmap__mmap(&map->core, &mp->core, fd, cpu)) {
pr_debug2("failed to mmap perf event ring buffer, error %d\n",
errno);
map->core.base = NULL;
return -1;
}
map->core.fd = fd;
map->core.cpu = cpu;
perf_mmap__setup_affinity_mask(map, mp);
......
......@@ -42,7 +42,7 @@ struct mmap_params {
struct auxtrace_mmap_params auxtrace_mp;
};
int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu);
int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu);
void perf_mmap__munmap(struct mmap *map);
void perf_mmap__get(struct mmap *map);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册