提交 6afad54d 编写于 作者: K Kan Liang 提交者: Arnaldo Carvalho de Melo

perf mmap: Discard legacy interfaces for mmap read forward

Discards legacy interfaces perf_evlist__mmap_read_forward(),
perf_evlist__mmap_read() and perf_evlist__mmap_consume().

No tools use them.
Signed-off-by: NKan Liang <kan.liang@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/r/1519945751-37786-14-git-send-email-kan.liang@linux.intel.comSigned-off-by: NArnaldo Carvalho de Melo <acme@redhat.com>
上级 75948730
......@@ -702,29 +702,6 @@ static int perf_evlist__resume(struct perf_evlist *evlist)
return perf_evlist__set_paused(evlist, false);
}
union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx)
{
struct perf_mmap *md = &evlist->mmap[idx];
/*
* Check messup is required for forward overwritable ring buffer:
* memory pointed by md->prev can be overwritten in this case.
* No need for read-write ring buffer: kernel stop outputting when
* it hit md->prev (perf_mmap__consume()).
*/
return perf_mmap__read_forward(md);
}
union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
{
return perf_evlist__mmap_read_forward(evlist, idx);
}
void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
{
perf_mmap__consume(&evlist->mmap[idx], false);
}
static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
{
int i;
......@@ -761,7 +738,7 @@ static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
map[i].fd = -1;
/*
* When the perf_mmap() call is made we grab one refcount, plus
* one extra to let perf_evlist__mmap_consume() get the last
* one extra to let perf_mmap__consume() get the last
* events after all real references (perf_mmap__get()) are
* dropped.
*
......
......@@ -129,10 +129,6 @@ struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist, enum bkw_mmap_state state);
union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx);
union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist,
int idx);
void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
int perf_evlist__open(struct perf_evlist *evlist);
......
......@@ -63,25 +63,6 @@ static union perf_event *perf_mmap__read(struct perf_mmap *map,
return event;
}
/*
* legacy interface for mmap read.
* Don't use it. Use perf_mmap__read_event().
*/
union perf_event *perf_mmap__read_forward(struct perf_mmap *map)
{
u64 head;
/*
* Check if event was unmapped due to a POLLHUP/POLLERR.
*/
if (!refcount_read(&map->refcnt))
return NULL;
head = perf_mmap__read_head(map);
return perf_mmap__read(map, &map->prev, head);
}
/*
* Read event from ring buffer one by one.
* Return one event for each call.
......@@ -191,7 +172,7 @@ void perf_mmap__munmap(struct perf_mmap *map)
int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd)
{
/*
* The last one will be done at perf_evlist__mmap_consume(), so that we
* The last one will be done at perf_mmap__consume(), so that we
* make sure we don't prevent tools from consuming every last event in
* the ring buffer.
*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册