提交 a5cdd40c 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

perf: Update perf_event_type documentation

Due to a discussion with Adrian I had a good look at the perf_event_type record
layout and found the documentation to be somewhat unclear.

Cc: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: NPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20130716150907.GL23818@dyad.programming.kicks-ass.netSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 17f41571
......@@ -478,6 +478,16 @@ enum perf_event_type {
* file will be supported by older perf tools, with these new optional
* fields being ignored.
*
* struct sample_id {
* { u32 pid, tid; } && PERF_SAMPLE_TID
* { u64 time; } && PERF_SAMPLE_TIME
* { u64 id; } && PERF_SAMPLE_ID
* { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
* { u32 cpu, res; } && PERF_SAMPLE_CPU
* } && perf_event_attr::sample_id_all
*/
/*
* The MMAP events record the PROT_EXEC mappings so that we can
* correlate userspace IPs to code. They have the following structure:
*
......@@ -498,6 +508,7 @@ enum perf_event_type {
* struct perf_event_header header;
* u64 id;
* u64 lost;
* struct sample_id sample_id;
* };
*/
PERF_RECORD_LOST = 2,
......@@ -508,6 +519,7 @@ enum perf_event_type {
*
* u32 pid, tid;
* char comm[];
* struct sample_id sample_id;
* };
*/
PERF_RECORD_COMM = 3,
......@@ -518,6 +530,7 @@ enum perf_event_type {
* u32 pid, ppid;
* u32 tid, ptid;
* u64 time;
* struct sample_id sample_id;
* };
*/
PERF_RECORD_EXIT = 4,
......@@ -528,6 +541,7 @@ enum perf_event_type {
* u64 time;
* u64 id;
* u64 stream_id;
* struct sample_id sample_id;
* };
*/
PERF_RECORD_THROTTLE = 5,
......@@ -539,6 +553,7 @@ enum perf_event_type {
* u32 pid, ppid;
* u32 tid, ptid;
* u64 time;
* struct sample_id sample_id;
* };
*/
PERF_RECORD_FORK = 7,
......@@ -549,6 +564,7 @@ enum perf_event_type {
* u32 pid, tid;
*
* struct read_format values;
* struct sample_id sample_id;
* };
*/
PERF_RECORD_READ = 8,
......@@ -596,7 +612,7 @@ enum perf_event_type {
* u64 dyn_size; } && PERF_SAMPLE_STACK_USER
*
* { u64 weight; } && PERF_SAMPLE_WEIGHT
* { u64 data_src; } && PERF_SAMPLE_DATA_SRC
* { u64 data_src; } && PERF_SAMPLE_DATA_SRC
* };
*/
PERF_RECORD_SAMPLE = 9,
......
......@@ -4462,20 +4462,6 @@ void perf_output_sample(struct perf_output_handle *handle,
}
}
if (!event->attr.watermark) {
int wakeup_events = event->attr.wakeup_events;
if (wakeup_events) {
struct ring_buffer *rb = handle->rb;
int events = local_inc_return(&rb->events);
if (events >= wakeup_events) {
local_sub(wakeup_events, &rb->events);
local_inc(&rb->wakeup);
}
}
}
if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
if (data->br_stack) {
size_t size;
......@@ -4511,16 +4497,31 @@ void perf_output_sample(struct perf_output_handle *handle,
}
}
if (sample_type & PERF_SAMPLE_STACK_USER)
if (sample_type & PERF_SAMPLE_STACK_USER) {
perf_output_sample_ustack(handle,
data->stack_user_size,
data->regs_user.regs);
}
if (sample_type & PERF_SAMPLE_WEIGHT)
perf_output_put(handle, data->weight);
if (sample_type & PERF_SAMPLE_DATA_SRC)
perf_output_put(handle, data->data_src.val);
if (!event->attr.watermark) {
int wakeup_events = event->attr.wakeup_events;
if (wakeup_events) {
struct ring_buffer *rb = handle->rb;
int events = local_inc_return(&rb->events);
if (events >= wakeup_events) {
local_sub(wakeup_events, &rb->events);
local_inc(&rb->wakeup);
}
}
}
}
void perf_prepare_sample(struct perf_event_header *header,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册