提交 0a987751 编写于 作者: A Arnaldo Carvalho de Melo 提交者: Ingo Molnar

ring_buffer: remove unused flags parameter

Impact: API change, cleanup

>From ring_buffer_{lock_reserve,unlock_commit}.

$ codiff /tmp/vmlinux.before /tmp/vmlinux.after
linux-2.6-tip/kernel/trace/trace.c:
  trace_vprintk              |  -14
  trace_graph_return         |  -14
  trace_graph_entry          |  -10
  trace_function             |   -8
  __ftrace_trace_stack       |   -8
  ftrace_trace_userstack     |   -8
  tracing_sched_switch_trace |   -8
  ftrace_trace_special       |  -12
  tracing_sched_wakeup_trace |   -8
 9 functions changed, 90 bytes removed, diff: -90

linux-2.6-tip/block/blktrace.c:
  __blk_add_trace |   -1
 1 function changed, 1 bytes removed, diff: -1

/tmp/vmlinux.after:
 10 functions changed, 91 bytes removed, diff: -91
Signed-off-by: NArnaldo Carvalho de Melo <acme@redhat.com>
Acked-by: NFrédéric Weisbecker <fweisbec@gmail.com>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 dac74940
...@@ -165,7 +165,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, ...@@ -165,7 +165,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct ring_buffer_event *event = NULL; struct ring_buffer_event *event = NULL;
struct blk_io_trace *t; struct blk_io_trace *t;
unsigned long flags; unsigned long flags = 0;
unsigned long *sequence; unsigned long *sequence;
pid_t pid; pid_t pid;
int cpu, pc = 0; int cpu, pc = 0;
...@@ -191,7 +191,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, ...@@ -191,7 +191,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
tracing_record_cmdline(current); tracing_record_cmdline(current);
event = ring_buffer_lock_reserve(blk_tr->buffer, event = ring_buffer_lock_reserve(blk_tr->buffer,
sizeof(*t) + pdu_len, &flags); sizeof(*t) + pdu_len);
if (!event) if (!event)
return; return;
...@@ -241,11 +241,11 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, ...@@ -241,11 +241,11 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
if (blk_tr) { if (blk_tr) {
ring_buffer_unlock_commit(blk_tr->buffer, event, flags); ring_buffer_unlock_commit(blk_tr->buffer, event);
if (pid != 0 && if (pid != 0 &&
!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC) && !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC) &&
(trace_flags & TRACE_ITER_STACKTRACE) != 0) (trace_flags & TRACE_ITER_STACKTRACE) != 0)
__trace_stack(blk_tr, flags, 5, pc); __trace_stack(blk_tr, 0, 5, pc);
trace_wake_up(); trace_wake_up();
return; return;
} }
......
...@@ -74,13 +74,10 @@ void ring_buffer_free(struct ring_buffer *buffer); ...@@ -74,13 +74,10 @@ void ring_buffer_free(struct ring_buffer *buffer);
int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size);
struct ring_buffer_event * struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer,
ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length);
unsigned long length,
unsigned long *flags);
int ring_buffer_unlock_commit(struct ring_buffer *buffer, int ring_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event, struct ring_buffer_event *event);
unsigned long flags);
int ring_buffer_write(struct ring_buffer *buffer, int ring_buffer_write(struct ring_buffer *buffer,
unsigned long length, void *data); unsigned long length, void *data);
......
...@@ -272,13 +272,11 @@ void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id, ...@@ -272,13 +272,11 @@ void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct kmemtrace_alloc_entry *entry; struct kmemtrace_alloc_entry *entry;
struct trace_array *tr = kmemtrace_array; struct trace_array *tr = kmemtrace_array;
unsigned long irq_flags;
if (!kmem_tracing_enabled) if (!kmem_tracing_enabled)
return; return;
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
&irq_flags);
if (!event) if (!event)
return; return;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
...@@ -292,7 +290,7 @@ void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id, ...@@ -292,7 +290,7 @@ void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
entry->gfp_flags = gfp_flags; entry->gfp_flags = gfp_flags;
entry->node = node; entry->node = node;
ring_buffer_unlock_commit(tr->buffer, event, irq_flags); ring_buffer_unlock_commit(tr->buffer, event);
trace_wake_up(); trace_wake_up();
} }
...@@ -305,13 +303,11 @@ void kmemtrace_mark_free(enum kmemtrace_type_id type_id, ...@@ -305,13 +303,11 @@ void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct kmemtrace_free_entry *entry; struct kmemtrace_free_entry *entry;
struct trace_array *tr = kmemtrace_array; struct trace_array *tr = kmemtrace_array;
unsigned long irq_flags;
if (!kmem_tracing_enabled) if (!kmem_tracing_enabled)
return; return;
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
&irq_flags);
if (!event) if (!event)
return; return;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
...@@ -322,7 +318,7 @@ void kmemtrace_mark_free(enum kmemtrace_type_id type_id, ...@@ -322,7 +318,7 @@ void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
entry->call_site = call_site; entry->call_site = call_site;
entry->ptr = ptr; entry->ptr = ptr;
ring_buffer_unlock_commit(tr->buffer, event, irq_flags); ring_buffer_unlock_commit(tr->buffer, event);
trace_wake_up(); trace_wake_up();
} }
......
...@@ -1257,7 +1257,6 @@ static DEFINE_PER_CPU(int, rb_need_resched); ...@@ -1257,7 +1257,6 @@ static DEFINE_PER_CPU(int, rb_need_resched);
* ring_buffer_lock_reserve - reserve a part of the buffer * ring_buffer_lock_reserve - reserve a part of the buffer
* @buffer: the ring buffer to reserve from * @buffer: the ring buffer to reserve from
* @length: the length of the data to reserve (excluding event header) * @length: the length of the data to reserve (excluding event header)
* @flags: a pointer to save the interrupt flags
* *
* Returns a reseverd event on the ring buffer to copy directly to. * Returns a reseverd event on the ring buffer to copy directly to.
* The user of this interface will need to get the body to write into * The user of this interface will need to get the body to write into
...@@ -1270,9 +1269,7 @@ static DEFINE_PER_CPU(int, rb_need_resched); ...@@ -1270,9 +1269,7 @@ static DEFINE_PER_CPU(int, rb_need_resched);
* If NULL is returned, then nothing has been allocated or locked. * If NULL is returned, then nothing has been allocated or locked.
*/ */
struct ring_buffer_event * struct ring_buffer_event *
ring_buffer_lock_reserve(struct ring_buffer *buffer, ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
unsigned long length,
unsigned long *flags)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event; struct ring_buffer_event *event;
...@@ -1339,15 +1336,13 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -1339,15 +1336,13 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
* ring_buffer_unlock_commit - commit a reserved * ring_buffer_unlock_commit - commit a reserved
* @buffer: The buffer to commit to * @buffer: The buffer to commit to
* @event: The event pointer to commit. * @event: The event pointer to commit.
* @flags: the interrupt flags received from ring_buffer_lock_reserve.
* *
* This commits the data to the ring buffer, and releases any locks held. * This commits the data to the ring buffer, and releases any locks held.
* *
* Must be paired with ring_buffer_lock_reserve. * Must be paired with ring_buffer_lock_reserve.
*/ */
int ring_buffer_unlock_commit(struct ring_buffer *buffer, int ring_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event, struct ring_buffer_event *event)
unsigned long flags)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
int cpu = raw_smp_processor_id(); int cpu = raw_smp_processor_id();
......
...@@ -783,14 +783,12 @@ trace_function(struct trace_array *tr, ...@@ -783,14 +783,12 @@ trace_function(struct trace_array *tr,
{ {
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ftrace_entry *entry; struct ftrace_entry *entry;
unsigned long irq_flags;
/* If we are reading the ring buffer, don't trace */ /* If we are reading the ring buffer, don't trace */
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
return; return;
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
&irq_flags);
if (!event) if (!event)
return; return;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
...@@ -798,7 +796,7 @@ trace_function(struct trace_array *tr, ...@@ -798,7 +796,7 @@ trace_function(struct trace_array *tr,
entry->ent.type = TRACE_FN; entry->ent.type = TRACE_FN;
entry->ip = ip; entry->ip = ip;
entry->parent_ip = parent_ip; entry->parent_ip = parent_ip;
ring_buffer_unlock_commit(tr->buffer, event, irq_flags); ring_buffer_unlock_commit(tr->buffer, event);
} }
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
...@@ -809,20 +807,18 @@ static void __trace_graph_entry(struct trace_array *tr, ...@@ -809,20 +807,18 @@ static void __trace_graph_entry(struct trace_array *tr,
{ {
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ftrace_graph_ent_entry *entry; struct ftrace_graph_ent_entry *entry;
unsigned long irq_flags;
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
return; return;
event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry), event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry));
&irq_flags);
if (!event) if (!event)
return; return;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, flags, pc); tracing_generic_entry_update(&entry->ent, flags, pc);
entry->ent.type = TRACE_GRAPH_ENT; entry->ent.type = TRACE_GRAPH_ENT;
entry->graph_ent = *trace; entry->graph_ent = *trace;
ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); ring_buffer_unlock_commit(global_trace.buffer, event);
} }
static void __trace_graph_return(struct trace_array *tr, static void __trace_graph_return(struct trace_array *tr,
...@@ -832,20 +828,18 @@ static void __trace_graph_return(struct trace_array *tr, ...@@ -832,20 +828,18 @@ static void __trace_graph_return(struct trace_array *tr,
{ {
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ftrace_graph_ret_entry *entry; struct ftrace_graph_ret_entry *entry;
unsigned long irq_flags;
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
return; return;
event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry), event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry));
&irq_flags);
if (!event) if (!event)
return; return;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, flags, pc); tracing_generic_entry_update(&entry->ent, flags, pc);
entry->ent.type = TRACE_GRAPH_RET; entry->ent.type = TRACE_GRAPH_RET;
entry->ret = *trace; entry->ret = *trace;
ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); ring_buffer_unlock_commit(global_trace.buffer, event);
} }
#endif #endif
...@@ -866,10 +860,8 @@ static void __ftrace_trace_stack(struct trace_array *tr, ...@@ -866,10 +860,8 @@ static void __ftrace_trace_stack(struct trace_array *tr,
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct stack_entry *entry; struct stack_entry *entry;
struct stack_trace trace; struct stack_trace trace;
unsigned long irq_flags;
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
&irq_flags);
if (!event) if (!event)
return; return;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
...@@ -884,7 +876,7 @@ static void __ftrace_trace_stack(struct trace_array *tr, ...@@ -884,7 +876,7 @@ static void __ftrace_trace_stack(struct trace_array *tr,
trace.entries = entry->caller; trace.entries = entry->caller;
save_stack_trace(&trace); save_stack_trace(&trace);
ring_buffer_unlock_commit(tr->buffer, event, irq_flags); ring_buffer_unlock_commit(tr->buffer, event);
#endif #endif
} }
...@@ -912,13 +904,11 @@ static void ftrace_trace_userstack(struct trace_array *tr, ...@@ -912,13 +904,11 @@ static void ftrace_trace_userstack(struct trace_array *tr,
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct userstack_entry *entry; struct userstack_entry *entry;
struct stack_trace trace; struct stack_trace trace;
unsigned long irq_flags;
if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
return; return;
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
&irq_flags);
if (!event) if (!event)
return; return;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
...@@ -933,7 +923,7 @@ static void ftrace_trace_userstack(struct trace_array *tr, ...@@ -933,7 +923,7 @@ static void ftrace_trace_userstack(struct trace_array *tr,
trace.entries = entry->caller; trace.entries = entry->caller;
save_stack_trace_user(&trace); save_stack_trace_user(&trace);
ring_buffer_unlock_commit(tr->buffer, event, irq_flags); ring_buffer_unlock_commit(tr->buffer, event);
#endif #endif
} }
...@@ -950,10 +940,8 @@ ftrace_trace_special(void *__tr, ...@@ -950,10 +940,8 @@ ftrace_trace_special(void *__tr,
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct trace_array *tr = __tr; struct trace_array *tr = __tr;
struct special_entry *entry; struct special_entry *entry;
unsigned long irq_flags;
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
&irq_flags);
if (!event) if (!event)
return; return;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
...@@ -962,9 +950,9 @@ ftrace_trace_special(void *__tr, ...@@ -962,9 +950,9 @@ ftrace_trace_special(void *__tr,
entry->arg1 = arg1; entry->arg1 = arg1;
entry->arg2 = arg2; entry->arg2 = arg2;
entry->arg3 = arg3; entry->arg3 = arg3;
ring_buffer_unlock_commit(tr->buffer, event, irq_flags); ring_buffer_unlock_commit(tr->buffer, event);
ftrace_trace_stack(tr, irq_flags, 4, pc); ftrace_trace_stack(tr, 0, 4, pc);
ftrace_trace_userstack(tr, irq_flags, pc); ftrace_trace_userstack(tr, 0, pc);
trace_wake_up(); trace_wake_up();
} }
...@@ -984,10 +972,8 @@ tracing_sched_switch_trace(struct trace_array *tr, ...@@ -984,10 +972,8 @@ tracing_sched_switch_trace(struct trace_array *tr,
{ {
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ctx_switch_entry *entry; struct ctx_switch_entry *entry;
unsigned long irq_flags;
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
&irq_flags);
if (!event) if (!event)
return; return;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
...@@ -1000,7 +986,7 @@ tracing_sched_switch_trace(struct trace_array *tr, ...@@ -1000,7 +986,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
entry->next_prio = next->prio; entry->next_prio = next->prio;
entry->next_state = next->state; entry->next_state = next->state;
entry->next_cpu = task_cpu(next); entry->next_cpu = task_cpu(next);
ring_buffer_unlock_commit(tr->buffer, event, irq_flags); ring_buffer_unlock_commit(tr->buffer, event);
ftrace_trace_stack(tr, flags, 5, pc); ftrace_trace_stack(tr, flags, 5, pc);
ftrace_trace_userstack(tr, flags, pc); ftrace_trace_userstack(tr, flags, pc);
} }
...@@ -1013,10 +999,8 @@ tracing_sched_wakeup_trace(struct trace_array *tr, ...@@ -1013,10 +999,8 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
{ {
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ctx_switch_entry *entry; struct ctx_switch_entry *entry;
unsigned long irq_flags;
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
&irq_flags);
if (!event) if (!event)
return; return;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
...@@ -1029,7 +1013,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, ...@@ -1029,7 +1013,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
entry->next_prio = wakee->prio; entry->next_prio = wakee->prio;
entry->next_state = wakee->state; entry->next_state = wakee->state;
entry->next_cpu = task_cpu(wakee); entry->next_cpu = task_cpu(wakee);
ring_buffer_unlock_commit(tr->buffer, event, irq_flags); ring_buffer_unlock_commit(tr->buffer, event);
ftrace_trace_stack(tr, flags, 6, pc); ftrace_trace_stack(tr, flags, 6, pc);
ftrace_trace_userstack(tr, flags, pc); ftrace_trace_userstack(tr, flags, pc);
...@@ -2841,7 +2825,7 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) ...@@ -2841,7 +2825,7 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
trace_buf[len] = 0; trace_buf[len] = 0;
size = sizeof(*entry) + len + 1; size = sizeof(*entry) + len + 1;
event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags); event = ring_buffer_lock_reserve(tr->buffer, size);
if (!event) if (!event)
goto out_unlock; goto out_unlock;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
...@@ -2852,7 +2836,7 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) ...@@ -2852,7 +2836,7 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
memcpy(&entry->buf, trace_buf, len); memcpy(&entry->buf, trace_buf, len);
entry->buf[len] = 0; entry->buf[len] = 0;
ring_buffer_unlock_commit(tr->buffer, event, irq_flags); ring_buffer_unlock_commit(tr->buffer, event);
out_unlock: out_unlock:
spin_unlock_irqrestore(&trace_buf_lock, irq_flags); spin_unlock_irqrestore(&trace_buf_lock, irq_flags);
......
...@@ -132,7 +132,6 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) ...@@ -132,7 +132,6 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
{ {
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct trace_boot_call *entry; struct trace_boot_call *entry;
unsigned long irq_flags;
struct trace_array *tr = boot_trace; struct trace_array *tr = boot_trace;
if (!tr || !pre_initcalls_finished) if (!tr || !pre_initcalls_finished)
...@@ -144,15 +143,14 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) ...@@ -144,15 +143,14 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
sprint_symbol(bt->func, (unsigned long)fn); sprint_symbol(bt->func, (unsigned long)fn);
preempt_disable(); preempt_disable();
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
&irq_flags);
if (!event) if (!event)
goto out; goto out;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, 0, 0); tracing_generic_entry_update(&entry->ent, 0, 0);
entry->ent.type = TRACE_BOOT_CALL; entry->ent.type = TRACE_BOOT_CALL;
entry->boot_call = *bt; entry->boot_call = *bt;
ring_buffer_unlock_commit(tr->buffer, event, irq_flags); ring_buffer_unlock_commit(tr->buffer, event);
trace_wake_up(); trace_wake_up();
...@@ -164,7 +162,6 @@ void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) ...@@ -164,7 +162,6 @@ void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
{ {
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct trace_boot_ret *entry; struct trace_boot_ret *entry;
unsigned long irq_flags;
struct trace_array *tr = boot_trace; struct trace_array *tr = boot_trace;
if (!tr || !pre_initcalls_finished) if (!tr || !pre_initcalls_finished)
...@@ -173,15 +170,14 @@ void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) ...@@ -173,15 +170,14 @@ void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
sprint_symbol(bt->func, (unsigned long)fn); sprint_symbol(bt->func, (unsigned long)fn);
preempt_disable(); preempt_disable();
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
&irq_flags);
if (!event) if (!event)
goto out; goto out;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, 0, 0); tracing_generic_entry_update(&entry->ent, 0, 0);
entry->ent.type = TRACE_BOOT_RET; entry->ent.type = TRACE_BOOT_RET;
entry->boot_ret = *bt; entry->boot_ret = *bt;
ring_buffer_unlock_commit(tr->buffer, event, irq_flags); ring_buffer_unlock_commit(tr->buffer, event);
trace_wake_up(); trace_wake_up();
......
...@@ -33,7 +33,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) ...@@ -33,7 +33,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
struct trace_array *tr = branch_tracer; struct trace_array *tr = branch_tracer;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct trace_branch *entry; struct trace_branch *entry;
unsigned long flags, irq_flags; unsigned long flags;
int cpu, pc; int cpu, pc;
const char *p; const char *p;
...@@ -52,8 +52,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) ...@@ -52,8 +52,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
goto out; goto out;
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
&irq_flags);
if (!event) if (!event)
goto out; goto out;
...@@ -75,7 +74,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) ...@@ -75,7 +74,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
entry->line = f->line; entry->line = f->line;
entry->correct = val == expect; entry->correct = val == expect;
ring_buffer_unlock_commit(tr->buffer, event, irq_flags); ring_buffer_unlock_commit(tr->buffer, event);
out: out:
atomic_dec(&tr->data[cpu]->disabled); atomic_dec(&tr->data[cpu]->disabled);
......
...@@ -175,7 +175,7 @@ void trace_hw_branch(u64 from, u64 to) ...@@ -175,7 +175,7 @@ void trace_hw_branch(u64 from, u64 to)
struct trace_array *tr = hw_branch_trace; struct trace_array *tr = hw_branch_trace;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct hw_branch_entry *entry; struct hw_branch_entry *entry;
unsigned long irq1, irq2; unsigned long irq1;
int cpu; int cpu;
if (unlikely(!tr)) if (unlikely(!tr))
...@@ -189,7 +189,7 @@ void trace_hw_branch(u64 from, u64 to) ...@@ -189,7 +189,7 @@ void trace_hw_branch(u64 from, u64 to)
if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
goto out; goto out;
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq2); event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
if (!event) if (!event)
goto out; goto out;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
...@@ -198,7 +198,7 @@ void trace_hw_branch(u64 from, u64 to) ...@@ -198,7 +198,7 @@ void trace_hw_branch(u64 from, u64 to)
entry->ent.cpu = cpu; entry->ent.cpu = cpu;
entry->from = from; entry->from = from;
entry->to = to; entry->to = to;
ring_buffer_unlock_commit(tr->buffer, event, irq2); ring_buffer_unlock_commit(tr->buffer, event);
out: out:
atomic_dec(&tr->data[cpu]->disabled); atomic_dec(&tr->data[cpu]->disabled);
......
...@@ -307,10 +307,8 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, ...@@ -307,10 +307,8 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
{ {
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct trace_mmiotrace_rw *entry; struct trace_mmiotrace_rw *entry;
unsigned long irq_flags;
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
&irq_flags);
if (!event) { if (!event) {
atomic_inc(&dropped_count); atomic_inc(&dropped_count);
return; return;
...@@ -319,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, ...@@ -319,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
tracing_generic_entry_update(&entry->ent, 0, preempt_count()); tracing_generic_entry_update(&entry->ent, 0, preempt_count());
entry->ent.type = TRACE_MMIO_RW; entry->ent.type = TRACE_MMIO_RW;
entry->rw = *rw; entry->rw = *rw;
ring_buffer_unlock_commit(tr->buffer, event, irq_flags); ring_buffer_unlock_commit(tr->buffer, event);
trace_wake_up(); trace_wake_up();
} }
...@@ -337,10 +335,8 @@ static void __trace_mmiotrace_map(struct trace_array *tr, ...@@ -337,10 +335,8 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
{ {
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct trace_mmiotrace_map *entry; struct trace_mmiotrace_map *entry;
unsigned long irq_flags;
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
&irq_flags);
if (!event) { if (!event) {
atomic_inc(&dropped_count); atomic_inc(&dropped_count);
return; return;
...@@ -349,7 +345,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr, ...@@ -349,7 +345,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
tracing_generic_entry_update(&entry->ent, 0, preempt_count()); tracing_generic_entry_update(&entry->ent, 0, preempt_count());
entry->ent.type = TRACE_MMIO_MAP; entry->ent.type = TRACE_MMIO_MAP;
entry->map = *map; entry->map = *map;
ring_buffer_unlock_commit(tr->buffer, event, irq_flags); ring_buffer_unlock_commit(tr->buffer, event);
trace_wake_up(); trace_wake_up();
} }
......
...@@ -115,7 +115,6 @@ void trace_power_end(struct power_trace *it) ...@@ -115,7 +115,6 @@ void trace_power_end(struct power_trace *it)
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct trace_power *entry; struct trace_power *entry;
struct trace_array_cpu *data; struct trace_array_cpu *data;
unsigned long irq_flags;
struct trace_array *tr = power_trace; struct trace_array *tr = power_trace;
if (!trace_power_enabled) if (!trace_power_enabled)
...@@ -125,15 +124,14 @@ void trace_power_end(struct power_trace *it) ...@@ -125,15 +124,14 @@ void trace_power_end(struct power_trace *it)
it->end = ktime_get(); it->end = ktime_get();
data = tr->data[smp_processor_id()]; data = tr->data[smp_processor_id()];
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
&irq_flags);
if (!event) if (!event)
goto out; goto out;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, 0, 0); tracing_generic_entry_update(&entry->ent, 0, 0);
entry->ent.type = TRACE_POWER; entry->ent.type = TRACE_POWER;
entry->state_data = *it; entry->state_data = *it;
ring_buffer_unlock_commit(tr->buffer, event, irq_flags); ring_buffer_unlock_commit(tr->buffer, event);
trace_wake_up(); trace_wake_up();
...@@ -148,7 +146,6 @@ void trace_power_mark(struct power_trace *it, unsigned int type, ...@@ -148,7 +146,6 @@ void trace_power_mark(struct power_trace *it, unsigned int type,
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct trace_power *entry; struct trace_power *entry;
struct trace_array_cpu *data; struct trace_array_cpu *data;
unsigned long irq_flags;
struct trace_array *tr = power_trace; struct trace_array *tr = power_trace;
if (!trace_power_enabled) if (!trace_power_enabled)
...@@ -162,15 +159,14 @@ void trace_power_mark(struct power_trace *it, unsigned int type, ...@@ -162,15 +159,14 @@ void trace_power_mark(struct power_trace *it, unsigned int type,
it->end = it->stamp; it->end = it->stamp;
data = tr->data[smp_processor_id()]; data = tr->data[smp_processor_id()];
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
&irq_flags);
if (!event) if (!event)
goto out; goto out;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, 0, 0); tracing_generic_entry_update(&entry->ent, 0, 0);
entry->ent.type = TRACE_POWER; entry->ent.type = TRACE_POWER;
entry->state_data = *it; entry->state_data = *it;
ring_buffer_unlock_commit(tr->buffer, event, irq_flags); ring_buffer_unlock_commit(tr->buffer, event);
trace_wake_up(); trace_wake_up();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册