提交 51a763dd 编写于 作者: A Arnaldo Carvalho de Melo 提交者: Ingo Molnar

tracing: Introduce trace_buffer_{lock_reserve,unlock_commit}

Impact: new API

These new functions do what previously was being open coded, reducing
the number of details ftrace plugin writers have to worry about.

It also standardizes the handling of stacktrace, userstacktrace and
other trace options we may introduce in the future.

With this patch, for instance, the blk tracer (and some others already
in the tree) can use the "userstacktrace" /d/tracing/trace_options
facility.

$ codiff /tmp/vmlinux.before /tmp/vmlinux.after
linux-2.6-tip/kernel/trace/trace.c:
  trace_vprintk              |   -5
  trace_graph_return         |  -22
  trace_graph_entry          |  -26
  trace_function             |  -45
  __ftrace_trace_stack       |  -27
  ftrace_trace_userstack     |  -29
  tracing_sched_switch_trace |  -66
  tracing_stop               |   +1
  trace_seq_to_user          |   -1
  ftrace_trace_special       |  -63
  ftrace_special             |   +1
  tracing_sched_wakeup_trace |  -70
  tracing_reset_online_cpus  |   -1
 13 functions changed, 2 bytes added, 355 bytes removed, diff: -353

linux-2.6-tip/block/blktrace.c:
  __blk_add_trace |  -58
 1 function changed, 58 bytes removed, diff: -58

linux-2.6-tip/kernel/trace/trace.c:
  trace_buffer_lock_reserve  |  +88
  trace_buffer_unlock_commit |  +86
 2 functions changed, 174 bytes added, diff: +174

/tmp/vmlinux.after:
 16 functions changed, 176 bytes added, 413 bytes removed, diff: -237
Signed-off-by: NArnaldo Carvalho de Melo <acme@redhat.com>
Acked-by: NFrédéric Weisbecker <fweisbec@gmail.com>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 0a987751
...@@ -187,19 +187,15 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, ...@@ -187,19 +187,15 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
if (blk_tr) { if (blk_tr) {
struct trace_entry *ent;
tracing_record_cmdline(current); tracing_record_cmdline(current);
event = ring_buffer_lock_reserve(blk_tr->buffer, pc = preempt_count();
sizeof(*t) + pdu_len); event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK,
sizeof(*t) + pdu_len,
0, pc);
if (!event) if (!event)
return; return;
t = ring_buffer_event_data(event);
ent = ring_buffer_event_data(event);
t = (struct blk_io_trace *)ent;
pc = preempt_count();
tracing_generic_entry_update(ent, 0, pc);
ent->type = TRACE_BLK;
goto record_it; goto record_it;
} }
...@@ -241,12 +237,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, ...@@ -241,12 +237,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
if (blk_tr) { if (blk_tr) {
ring_buffer_unlock_commit(blk_tr->buffer, event); trace_buffer_unlock_commit(blk_tr, event, 0, pc);
if (pid != 0 &&
!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC) &&
(trace_flags & TRACE_ITER_STACKTRACE) != 0)
__trace_stack(blk_tr, 0, 5, pc);
trace_wake_up();
return; return;
} }
} }
......
...@@ -276,13 +276,12 @@ void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id, ...@@ -276,13 +276,12 @@ void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
if (!kmem_tracing_enabled) if (!kmem_tracing_enabled)
return; return;
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); event = trace_buffer_lock_reserve(tr, TRACE_KMEM_ALLOC,
sizeof(*entry), 0, 0);
if (!event) if (!event)
return; return;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, 0, 0);
entry->ent.type = TRACE_KMEM_ALLOC;
entry->call_site = call_site; entry->call_site = call_site;
entry->ptr = ptr; entry->ptr = ptr;
entry->bytes_req = bytes_req; entry->bytes_req = bytes_req;
...@@ -290,9 +289,7 @@ void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id, ...@@ -290,9 +289,7 @@ void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
entry->gfp_flags = gfp_flags; entry->gfp_flags = gfp_flags;
entry->node = node; entry->node = node;
ring_buffer_unlock_commit(tr->buffer, event); trace_buffer_unlock_commit(tr, event, 0, 0);
trace_wake_up();
} }
EXPORT_SYMBOL(kmemtrace_mark_alloc_node); EXPORT_SYMBOL(kmemtrace_mark_alloc_node);
...@@ -307,20 +304,16 @@ void kmemtrace_mark_free(enum kmemtrace_type_id type_id, ...@@ -307,20 +304,16 @@ void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
if (!kmem_tracing_enabled) if (!kmem_tracing_enabled)
return; return;
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); event = trace_buffer_lock_reserve(tr, TRACE_KMEM_FREE,
sizeof(*entry), 0, 0);
if (!event) if (!event)
return; return;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, 0, 0);
entry->ent.type = TRACE_KMEM_FREE;
entry->type_id = type_id; entry->type_id = type_id;
entry->call_site = call_site; entry->call_site = call_site;
entry->ptr = ptr; entry->ptr = ptr;
ring_buffer_unlock_commit(tr->buffer, event); trace_buffer_unlock_commit(tr, event, 0, 0);
trace_wake_up();
} }
EXPORT_SYMBOL(kmemtrace_mark_free); EXPORT_SYMBOL(kmemtrace_mark_free);
......
...@@ -776,6 +776,39 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, ...@@ -776,6 +776,39 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
(need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
} }
struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
unsigned char type,
unsigned long len,
unsigned long flags, int pc)
{
struct ring_buffer_event *event;
event = ring_buffer_lock_reserve(tr->buffer, len);
if (event != NULL) {
struct trace_entry *ent = ring_buffer_event_data(event);
tracing_generic_entry_update(ent, flags, pc);
ent->type = type;
}
return event;
}
static void ftrace_trace_stack(struct trace_array *tr,
unsigned long flags, int skip, int pc);
static void ftrace_trace_userstack(struct trace_array *tr,
unsigned long flags, int pc);
void trace_buffer_unlock_commit(struct trace_array *tr,
struct ring_buffer_event *event,
unsigned long flags, int pc)
{
ring_buffer_unlock_commit(tr->buffer, event);
ftrace_trace_stack(tr, flags, 6, pc);
ftrace_trace_userstack(tr, flags, pc);
trace_wake_up();
}
void void
trace_function(struct trace_array *tr, trace_function(struct trace_array *tr,
unsigned long ip, unsigned long parent_ip, unsigned long flags, unsigned long ip, unsigned long parent_ip, unsigned long flags,
...@@ -788,12 +821,11 @@ trace_function(struct trace_array *tr, ...@@ -788,12 +821,11 @@ trace_function(struct trace_array *tr,
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
return; return;
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); event = trace_buffer_lock_reserve(tr, TRACE_FN, sizeof(*entry),
flags, pc);
if (!event) if (!event)
return; return;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, flags, pc);
entry->ent.type = TRACE_FN;
entry->ip = ip; entry->ip = ip;
entry->parent_ip = parent_ip; entry->parent_ip = parent_ip;
ring_buffer_unlock_commit(tr->buffer, event); ring_buffer_unlock_commit(tr->buffer, event);
...@@ -811,12 +843,11 @@ static void __trace_graph_entry(struct trace_array *tr, ...@@ -811,12 +843,11 @@ static void __trace_graph_entry(struct trace_array *tr,
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
return; return;
event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry)); event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT,
sizeof(*entry), flags, pc);
if (!event) if (!event)
return; return;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, flags, pc);
entry->ent.type = TRACE_GRAPH_ENT;
entry->graph_ent = *trace; entry->graph_ent = *trace;
ring_buffer_unlock_commit(global_trace.buffer, event); ring_buffer_unlock_commit(global_trace.buffer, event);
} }
...@@ -832,12 +863,11 @@ static void __trace_graph_return(struct trace_array *tr, ...@@ -832,12 +863,11 @@ static void __trace_graph_return(struct trace_array *tr,
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
return; return;
event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry)); event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_RET,
sizeof(*entry), flags, pc);
if (!event) if (!event)
return; return;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, flags, pc);
entry->ent.type = TRACE_GRAPH_RET;
entry->ret = *trace; entry->ret = *trace;
ring_buffer_unlock_commit(global_trace.buffer, event); ring_buffer_unlock_commit(global_trace.buffer, event);
} }
...@@ -861,13 +891,11 @@ static void __ftrace_trace_stack(struct trace_array *tr, ...@@ -861,13 +891,11 @@ static void __ftrace_trace_stack(struct trace_array *tr,
struct stack_entry *entry; struct stack_entry *entry;
struct stack_trace trace; struct stack_trace trace;
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); event = trace_buffer_lock_reserve(tr, TRACE_STACK,
sizeof(*entry), flags, pc);
if (!event) if (!event)
return; return;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, flags, pc);
entry->ent.type = TRACE_STACK;
memset(&entry->caller, 0, sizeof(entry->caller)); memset(&entry->caller, 0, sizeof(entry->caller));
trace.nr_entries = 0; trace.nr_entries = 0;
...@@ -908,12 +936,11 @@ static void ftrace_trace_userstack(struct trace_array *tr, ...@@ -908,12 +936,11 @@ static void ftrace_trace_userstack(struct trace_array *tr,
if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
return; return;
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); event = trace_buffer_lock_reserve(tr, TRACE_USER_STACK,
sizeof(*entry), flags, pc);
if (!event) if (!event)
return; return;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, flags, pc);
entry->ent.type = TRACE_USER_STACK;
memset(&entry->caller, 0, sizeof(entry->caller)); memset(&entry->caller, 0, sizeof(entry->caller));
...@@ -941,20 +968,15 @@ ftrace_trace_special(void *__tr, ...@@ -941,20 +968,15 @@ ftrace_trace_special(void *__tr,
struct trace_array *tr = __tr; struct trace_array *tr = __tr;
struct special_entry *entry; struct special_entry *entry;
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); event = trace_buffer_lock_reserve(tr, TRACE_SPECIAL,
sizeof(*entry), 0, pc);
if (!event) if (!event)
return; return;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, 0, pc);
entry->ent.type = TRACE_SPECIAL;
entry->arg1 = arg1; entry->arg1 = arg1;
entry->arg2 = arg2; entry->arg2 = arg2;
entry->arg3 = arg3; entry->arg3 = arg3;
ring_buffer_unlock_commit(tr->buffer, event); trace_buffer_unlock_commit(tr, event, 0, pc);
ftrace_trace_stack(tr, 0, 4, pc);
ftrace_trace_userstack(tr, 0, pc);
trace_wake_up();
} }
void void
...@@ -973,12 +995,11 @@ tracing_sched_switch_trace(struct trace_array *tr, ...@@ -973,12 +995,11 @@ tracing_sched_switch_trace(struct trace_array *tr,
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ctx_switch_entry *entry; struct ctx_switch_entry *entry;
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); event = trace_buffer_lock_reserve(tr, TRACE_CTX,
sizeof(*entry), flags, pc);
if (!event) if (!event)
return; return;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, flags, pc);
entry->ent.type = TRACE_CTX;
entry->prev_pid = prev->pid; entry->prev_pid = prev->pid;
entry->prev_prio = prev->prio; entry->prev_prio = prev->prio;
entry->prev_state = prev->state; entry->prev_state = prev->state;
...@@ -986,9 +1007,7 @@ tracing_sched_switch_trace(struct trace_array *tr, ...@@ -986,9 +1007,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
entry->next_prio = next->prio; entry->next_prio = next->prio;
entry->next_state = next->state; entry->next_state = next->state;
entry->next_cpu = task_cpu(next); entry->next_cpu = task_cpu(next);
ring_buffer_unlock_commit(tr->buffer, event); trace_buffer_unlock_commit(tr, event, flags, pc);
ftrace_trace_stack(tr, flags, 5, pc);
ftrace_trace_userstack(tr, flags, pc);
} }
void void
...@@ -1000,12 +1019,11 @@ tracing_sched_wakeup_trace(struct trace_array *tr, ...@@ -1000,12 +1019,11 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ctx_switch_entry *entry; struct ctx_switch_entry *entry;
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); event = trace_buffer_lock_reserve(tr, TRACE_WAKE,
sizeof(*entry), flags, pc);
if (!event) if (!event)
return; return;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, flags, pc);
entry->ent.type = TRACE_WAKE;
entry->prev_pid = curr->pid; entry->prev_pid = curr->pid;
entry->prev_prio = curr->prio; entry->prev_prio = curr->prio;
entry->prev_state = curr->state; entry->prev_state = curr->state;
...@@ -1013,11 +1031,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, ...@@ -1013,11 +1031,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
entry->next_prio = wakee->prio; entry->next_prio = wakee->prio;
entry->next_state = wakee->state; entry->next_state = wakee->state;
entry->next_cpu = task_cpu(wakee); entry->next_cpu = task_cpu(wakee);
ring_buffer_unlock_commit(tr->buffer, event); trace_buffer_unlock_commit(tr, event, flags, pc);
ftrace_trace_stack(tr, flags, 6, pc);
ftrace_trace_userstack(tr, flags, pc);
trace_wake_up();
} }
void void
...@@ -2825,12 +2839,10 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) ...@@ -2825,12 +2839,10 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
trace_buf[len] = 0; trace_buf[len] = 0;
size = sizeof(*entry) + len + 1; size = sizeof(*entry) + len + 1;
event = ring_buffer_lock_reserve(tr->buffer, size); event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc);
if (!event) if (!event)
goto out_unlock; goto out_unlock;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, irq_flags, pc);
entry->ent.type = TRACE_PRINT;
entry->ip = ip; entry->ip = ip;
entry->depth = depth; entry->depth = depth;
......
...@@ -403,6 +403,17 @@ int tracing_open_generic(struct inode *inode, struct file *filp); ...@@ -403,6 +403,17 @@ int tracing_open_generic(struct inode *inode, struct file *filp);
struct dentry *tracing_init_dentry(void); struct dentry *tracing_init_dentry(void);
void init_tracer_sysprof_debugfs(struct dentry *d_tracer); void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
struct ring_buffer_event;
struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
unsigned char type,
unsigned long len,
unsigned long flags,
int pc);
void trace_buffer_unlock_commit(struct trace_array *tr,
struct ring_buffer_event *event,
unsigned long flags, int pc);
struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
struct trace_array_cpu *data); struct trace_array_cpu *data);
......
...@@ -143,17 +143,13 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) ...@@ -143,17 +143,13 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
sprint_symbol(bt->func, (unsigned long)fn); sprint_symbol(bt->func, (unsigned long)fn);
preempt_disable(); preempt_disable();
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); event = trace_buffer_lock_reserve(tr, TRACE_BOOT_CALL,
sizeof(*entry), 0, 0);
if (!event) if (!event)
goto out; goto out;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, 0, 0);
entry->ent.type = TRACE_BOOT_CALL;
entry->boot_call = *bt; entry->boot_call = *bt;
ring_buffer_unlock_commit(tr->buffer, event); trace_buffer_unlock_commit(tr, event, 0, 0);
trace_wake_up();
out: out:
preempt_enable(); preempt_enable();
} }
...@@ -170,17 +166,13 @@ void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) ...@@ -170,17 +166,13 @@ void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
sprint_symbol(bt->func, (unsigned long)fn); sprint_symbol(bt->func, (unsigned long)fn);
preempt_disable(); preempt_disable();
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); event = trace_buffer_lock_reserve(tr, TRACE_BOOT_RET,
sizeof(*entry), 0, 0);
if (!event) if (!event)
goto out; goto out;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, 0, 0);
entry->ent.type = TRACE_BOOT_RET;
entry->boot_ret = *bt; entry->boot_ret = *bt;
ring_buffer_unlock_commit(tr->buffer, event); trace_buffer_unlock_commit(tr, event, 0, 0);
trace_wake_up();
out: out:
preempt_enable(); preempt_enable();
} }
...@@ -52,14 +52,13 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) ...@@ -52,14 +52,13 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
goto out; goto out;
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); pc = preempt_count();
event = trace_buffer_lock_reserve(tr, TRACE_BRANCH,
sizeof(*entry), flags, pc);
if (!event) if (!event)
goto out; goto out;
pc = preempt_count();
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, flags, pc);
entry->ent.type = TRACE_BRANCH;
/* Strip off the path, only save the file */ /* Strip off the path, only save the file */
p = f->file + strlen(f->file); p = f->file + strlen(f->file);
......
...@@ -189,16 +189,15 @@ void trace_hw_branch(u64 from, u64 to) ...@@ -189,16 +189,15 @@ void trace_hw_branch(u64 from, u64 to)
if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
goto out; goto out;
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); event = trace_buffer_lock_reserve(tr, TRACE_HW_BRANCHES,
sizeof(*entry), 0, 0);
if (!event) if (!event)
goto out; goto out;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, 0, from);
entry->ent.type = TRACE_HW_BRANCHES;
entry->ent.cpu = cpu; entry->ent.cpu = cpu;
entry->from = from; entry->from = from;
entry->to = to; entry->to = to;
ring_buffer_unlock_commit(tr->buffer, event); trace_buffer_unlock_commit(tr, event, 0, 0);
out: out:
atomic_dec(&tr->data[cpu]->disabled); atomic_dec(&tr->data[cpu]->disabled);
......
...@@ -307,19 +307,17 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, ...@@ -307,19 +307,17 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
{ {
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct trace_mmiotrace_rw *entry; struct trace_mmiotrace_rw *entry;
int pc = preempt_count();
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); event = trace_buffer_lock_reserve(tr, TRACE_MMIO_RW,
sizeof(*entry), 0, pc);
if (!event) { if (!event) {
atomic_inc(&dropped_count); atomic_inc(&dropped_count);
return; return;
} }
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, 0, preempt_count());
entry->ent.type = TRACE_MMIO_RW;
entry->rw = *rw; entry->rw = *rw;
ring_buffer_unlock_commit(tr->buffer, event); trace_buffer_unlock_commit(tr, event, 0, pc);
trace_wake_up();
} }
void mmio_trace_rw(struct mmiotrace_rw *rw) void mmio_trace_rw(struct mmiotrace_rw *rw)
...@@ -335,19 +333,17 @@ static void __trace_mmiotrace_map(struct trace_array *tr, ...@@ -335,19 +333,17 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
{ {
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct trace_mmiotrace_map *entry; struct trace_mmiotrace_map *entry;
int pc = preempt_count();
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); event = trace_buffer_lock_reserve(tr, TRACE_MMIO_MAP,
sizeof(*entry), 0, pc);
if (!event) { if (!event) {
atomic_inc(&dropped_count); atomic_inc(&dropped_count);
return; return;
} }
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, 0, preempt_count());
entry->ent.type = TRACE_MMIO_MAP;
entry->map = *map; entry->map = *map;
ring_buffer_unlock_commit(tr->buffer, event); trace_buffer_unlock_commit(tr, event, 0, pc);
trace_wake_up();
} }
void mmio_trace_mapping(struct mmiotrace_map *map) void mmio_trace_mapping(struct mmiotrace_map *map)
......
...@@ -124,17 +124,13 @@ void trace_power_end(struct power_trace *it) ...@@ -124,17 +124,13 @@ void trace_power_end(struct power_trace *it)
it->end = ktime_get(); it->end = ktime_get();
data = tr->data[smp_processor_id()]; data = tr->data[smp_processor_id()];
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); event = trace_buffer_lock_reserve(tr, TRACE_POWER,
sizeof(*entry), 0, 0);
if (!event) if (!event)
goto out; goto out;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, 0, 0);
entry->ent.type = TRACE_POWER;
entry->state_data = *it; entry->state_data = *it;
ring_buffer_unlock_commit(tr->buffer, event); trace_buffer_unlock_commit(tr, event, 0, 0);
trace_wake_up();
out: out:
preempt_enable(); preempt_enable();
} }
...@@ -159,17 +155,13 @@ void trace_power_mark(struct power_trace *it, unsigned int type, ...@@ -159,17 +155,13 @@ void trace_power_mark(struct power_trace *it, unsigned int type,
it->end = it->stamp; it->end = it->stamp;
data = tr->data[smp_processor_id()]; data = tr->data[smp_processor_id()];
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); event = trace_buffer_lock_reserve(tr, TRACE_POWER,
sizeof(*entry), 0, 0);
if (!event) if (!event)
goto out; goto out;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, 0, 0);
entry->ent.type = TRACE_POWER;
entry->state_data = *it; entry->state_data = *it;
ring_buffer_unlock_commit(tr->buffer, event); trace_buffer_unlock_commit(tr, event, 0, 0);
trace_wake_up();
out: out:
preempt_enable(); preempt_enable();
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册