提交 38b200d6 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

perf_counter: Add PERF_EVENT_READ

Provide a read() like event which can be used to log the
counter value at specific sites such as child->parent
folding on exit.

In order to be useful, we log the counter parent ID, not the
actual counter ID, since userspace can only relate parent
IDs to perf_counter_attr constructs.
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 194002b2
...@@ -334,6 +334,18 @@ enum perf_event_type { ...@@ -334,6 +334,18 @@ enum perf_event_type {
*/ */
PERF_EVENT_FORK = 7, PERF_EVENT_FORK = 7,
/*
* struct {
* struct perf_event_header header;
* u32 pid, tid;
* u64 value;
* { u64 time_enabled; } && PERF_FORMAT_ENABLED
* { u64 time_running; } && PERF_FORMAT_RUNNING
* { u64 parent_id; } && PERF_FORMAT_ID
* };
*/
PERF_EVENT_READ = 8,
/* /*
* When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field
* will be PERF_SAMPLE_* * will be PERF_SAMPLE_*
......
...@@ -2623,6 +2623,66 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, ...@@ -2623,6 +2623,66 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
perf_output_end(&handle); perf_output_end(&handle);
} }
/*
* read event
*/
struct perf_read_event {
struct perf_event_header header;
u32 pid;
u32 tid;
u64 value;
u64 format[3];
};
static void
perf_counter_read_event(struct perf_counter *counter,
struct task_struct *task)
{
struct perf_output_handle handle;
struct perf_read_event event = {
.header = {
.type = PERF_EVENT_READ,
.misc = 0,
.size = sizeof(event) - sizeof(event.format),
},
.pid = perf_counter_pid(counter, task),
.tid = perf_counter_tid(counter, task),
.value = atomic64_read(&counter->count),
};
int ret, i = 0;
if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
event.header.size += sizeof(u64);
event.format[i++] = counter->total_time_enabled;
}
if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
event.header.size += sizeof(u64);
event.format[i++] = counter->total_time_running;
}
if (counter->attr.read_format & PERF_FORMAT_ID) {
u64 id;
event.header.size += sizeof(u64);
if (counter->parent)
id = counter->parent->id;
else
id = counter->id;
event.format[i++] = id;
}
ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
if (ret)
return;
perf_output_copy(&handle, &event, event.header.size);
perf_output_end(&handle);
}
/* /*
* fork tracking * fork tracking
*/ */
...@@ -3985,10 +4045,13 @@ static int inherit_group(struct perf_counter *parent_counter, ...@@ -3985,10 +4045,13 @@ static int inherit_group(struct perf_counter *parent_counter,
} }
static void sync_child_counter(struct perf_counter *child_counter, static void sync_child_counter(struct perf_counter *child_counter,
struct perf_counter *parent_counter) struct task_struct *child)
{ {
struct perf_counter *parent_counter = child_counter->parent;
u64 child_val; u64 child_val;
perf_counter_read_event(child_counter, child);
child_val = atomic64_read(&child_counter->count); child_val = atomic64_read(&child_counter->count);
/* /*
...@@ -4017,7 +4080,8 @@ static void sync_child_counter(struct perf_counter *child_counter, ...@@ -4017,7 +4080,8 @@ static void sync_child_counter(struct perf_counter *child_counter,
static void static void
__perf_counter_exit_task(struct perf_counter *child_counter, __perf_counter_exit_task(struct perf_counter *child_counter,
struct perf_counter_context *child_ctx) struct perf_counter_context *child_ctx,
struct task_struct *child)
{ {
struct perf_counter *parent_counter; struct perf_counter *parent_counter;
...@@ -4031,7 +4095,7 @@ __perf_counter_exit_task(struct perf_counter *child_counter, ...@@ -4031,7 +4095,7 @@ __perf_counter_exit_task(struct perf_counter *child_counter,
* counters need to be zapped - but otherwise linger. * counters need to be zapped - but otherwise linger.
*/ */
if (parent_counter) { if (parent_counter) {
sync_child_counter(child_counter, parent_counter); sync_child_counter(child_counter, child);
free_counter(child_counter); free_counter(child_counter);
} }
} }
...@@ -4093,7 +4157,7 @@ void perf_counter_exit_task(struct task_struct *child) ...@@ -4093,7 +4157,7 @@ void perf_counter_exit_task(struct task_struct *child)
again: again:
list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list, list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
list_entry) list_entry)
__perf_counter_exit_task(child_counter, child_ctx); __perf_counter_exit_task(child_counter, child_ctx, child);
/* /*
* If the last counter was a group counter, it will have appended all * If the last counter was a group counter, it will have appended all
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册