提交 32c0edae 编写于 作者: S Steven Rostedt 提交者: Steven Rostedt

tracing: Remove duplicate id information in event structure

Now that the trace_event structure is embedded in the ftrace_event_call
structure, there is no need for the ftrace_event_call id field.
The id field is the same as the trace_event type field.

Removing the id and re-arranging the structure brings down the tracepoint
footprint by another 5K.

   text	   data	    bss	    dec	    hex	filename
4913961	1088356	 861512	6863829	 68bbd5	vmlinux.orig
4895024	1023812	 861512	6780348	 6775bc	vmlinux.print
4894944	1018052	 861512	6774508	 675eec	vmlinux.id
Acked-by: NMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Acked-by: NMasami Hiramatsu <mhiramat@redhat.com>
Acked-by: NFrederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: NSteven Rostedt <rostedt@goodmis.org>
上级 80decc70
...@@ -149,14 +149,13 @@ struct ftrace_event_call { ...@@ -149,14 +149,13 @@ struct ftrace_event_call {
char *name; char *name;
struct dentry *dir; struct dentry *dir;
struct trace_event event; struct trace_event event;
int enabled;
int id;
const char *print_fmt; const char *print_fmt;
int filter_active;
struct event_filter *filter; struct event_filter *filter;
void *mod; void *mod;
void *data; void *data;
int enabled;
int filter_active;
int perf_refcount; int perf_refcount;
}; };
......
...@@ -150,7 +150,7 @@ ...@@ -150,7 +150,7 @@
* *
* entry = iter->ent; * entry = iter->ent;
* *
* if (entry->type != event_<call>.id) { * if (entry->type != event_<call>->event.type) {
* WARN_ON_ONCE(1); * WARN_ON_ONCE(1);
* return TRACE_TYPE_UNHANDLED; * return TRACE_TYPE_UNHANDLED;
* } * }
...@@ -221,7 +221,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ ...@@ -221,7 +221,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
\ \
entry = iter->ent; \ entry = iter->ent; \
\ \
if (entry->type != event->id) { \ if (entry->type != event->event.type) { \
WARN_ON_ONCE(1); \ WARN_ON_ONCE(1); \
return TRACE_TYPE_UNHANDLED; \ return TRACE_TYPE_UNHANDLED; \
} \ } \
...@@ -257,7 +257,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ ...@@ -257,7 +257,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
\ \
entry = iter->ent; \ entry = iter->ent; \
\ \
if (entry->type != event_##call.id) { \ if (entry->type != event_##call.event.type) { \
WARN_ON_ONCE(1); \ WARN_ON_ONCE(1); \
return TRACE_TYPE_UNHANDLED; \ return TRACE_TYPE_UNHANDLED; \
} \ } \
...@@ -409,7 +409,7 @@ static inline notrace int ftrace_get_offsets_##call( \ ...@@ -409,7 +409,7 @@ static inline notrace int ftrace_get_offsets_##call( \
* __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
* *
* event = trace_current_buffer_lock_reserve(&buffer, * event = trace_current_buffer_lock_reserve(&buffer,
* event_<call>.id, * event_<call>->event.type,
* sizeof(*entry) + __data_size, * sizeof(*entry) + __data_size,
* irq_flags, pc); * irq_flags, pc);
* if (!event) * if (!event)
...@@ -510,7 +510,7 @@ ftrace_raw_event_##call(void *__data, proto) \ ...@@ -510,7 +510,7 @@ ftrace_raw_event_##call(void *__data, proto) \
__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
\ \
event = trace_current_buffer_lock_reserve(&buffer, \ event = trace_current_buffer_lock_reserve(&buffer, \
event_call->id, \ event_call->event.type, \
sizeof(*entry) + __data_size, \ sizeof(*entry) + __data_size, \
irq_flags, pc); \ irq_flags, pc); \
if (!event) \ if (!event) \
...@@ -711,7 +711,7 @@ perf_trace_##call(void *__data, proto) \ ...@@ -711,7 +711,7 @@ perf_trace_##call(void *__data, proto) \
"profile buffer not large enough")) \ "profile buffer not large enough")) \
return; \ return; \
entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \ entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
__entry_size, event_call->id, &rctx, &irq_flags); \ __entry_size, event_call->event.type, &rctx, &irq_flags); \
if (!entry) \ if (!entry) \
return; \ return; \
tstruct \ tstruct \
......
...@@ -80,7 +80,7 @@ int perf_trace_enable(int event_id) ...@@ -80,7 +80,7 @@ int perf_trace_enable(int event_id)
mutex_lock(&event_mutex); mutex_lock(&event_mutex);
list_for_each_entry(event, &ftrace_events, list) { list_for_each_entry(event, &ftrace_events, list) {
if (event->id == event_id && if (event->event.type == event_id &&
event->class && event->class->perf_probe && event->class && event->class->perf_probe &&
try_module_get(event->mod)) { try_module_get(event->mod)) {
ret = perf_trace_event_enable(event); ret = perf_trace_event_enable(event);
...@@ -128,7 +128,7 @@ void perf_trace_disable(int event_id) ...@@ -128,7 +128,7 @@ void perf_trace_disable(int event_id)
mutex_lock(&event_mutex); mutex_lock(&event_mutex);
list_for_each_entry(event, &ftrace_events, list) { list_for_each_entry(event, &ftrace_events, list) {
if (event->id == event_id) { if (event->event.type == event_id) {
perf_trace_event_disable(event); perf_trace_event_disable(event);
module_put(event->mod); module_put(event->mod);
break; break;
......
...@@ -125,7 +125,6 @@ int trace_event_raw_init(struct ftrace_event_call *call) ...@@ -125,7 +125,6 @@ int trace_event_raw_init(struct ftrace_event_call *call)
id = register_ftrace_event(&call->event); id = register_ftrace_event(&call->event);
if (!id) if (!id)
return -ENODEV; return -ENODEV;
call->id = id;
return 0; return 0;
} }
...@@ -567,7 +566,7 @@ event_format_read(struct file *filp, char __user *ubuf, size_t cnt, ...@@ -567,7 +566,7 @@ event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
trace_seq_init(s); trace_seq_init(s);
trace_seq_printf(s, "name: %s\n", call->name); trace_seq_printf(s, "name: %s\n", call->name);
trace_seq_printf(s, "ID: %d\n", call->id); trace_seq_printf(s, "ID: %d\n", call->event.type);
trace_seq_printf(s, "format:\n"); trace_seq_printf(s, "format:\n");
head = trace_get_fields(call); head = trace_get_fields(call);
...@@ -641,7 +640,7 @@ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) ...@@ -641,7 +640,7 @@ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
return -ENOMEM; return -ENOMEM;
trace_seq_init(s); trace_seq_init(s);
trace_seq_printf(s, "%d\n", call->id); trace_seq_printf(s, "%d\n", call->event.type);
r = simple_read_from_buffer(ubuf, cnt, ppos, r = simple_read_from_buffer(ubuf, cnt, ppos,
s->buffer, s->len); s->buffer, s->len);
...@@ -969,7 +968,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, ...@@ -969,7 +968,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
enable); enable);
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
if (call->id && (call->class->perf_probe || call->class->reg)) if (call->event.type && (call->class->perf_probe || call->class->reg))
trace_create_file("id", 0444, call->dir, call, trace_create_file("id", 0444, call->dir, call,
id); id);
#endif #endif
......
...@@ -1395,7 +1395,7 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id, ...@@ -1395,7 +1395,7 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id,
mutex_lock(&event_mutex); mutex_lock(&event_mutex);
list_for_each_entry(call, &ftrace_events, list) { list_for_each_entry(call, &ftrace_events, list) {
if (call->id == event_id) if (call->event.type == event_id)
break; break;
} }
......
...@@ -153,7 +153,7 @@ static int ftrace_raw_init_event(struct ftrace_event_call *call) ...@@ -153,7 +153,7 @@ static int ftrace_raw_init_event(struct ftrace_event_call *call)
#define F_printk(fmt, args...) #fmt ", " __stringify(args) #define F_printk(fmt, args...) #fmt ", " __stringify(args)
#undef FTRACE_ENTRY #undef FTRACE_ENTRY
#define FTRACE_ENTRY(call, struct_name, type, tstruct, print) \ #define FTRACE_ENTRY(call, struct_name, etype, tstruct, print) \
\ \
struct ftrace_event_class event_class_ftrace_##call = { \ struct ftrace_event_class event_class_ftrace_##call = { \
.system = __stringify(TRACE_SYSTEM), \ .system = __stringify(TRACE_SYSTEM), \
...@@ -165,7 +165,7 @@ struct ftrace_event_call __used \ ...@@ -165,7 +165,7 @@ struct ftrace_event_call __used \
__attribute__((__aligned__(4))) \ __attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_events"))) event_##call = { \ __attribute__((section("_ftrace_events"))) event_##call = { \
.name = #call, \ .name = #call, \
.id = type, \ .event.type = etype, \
.class = &event_class_ftrace_##call, \ .class = &event_class_ftrace_##call, \
.print_fmt = print, \ .print_fmt = print, \
}; \ }; \
......
...@@ -960,8 +960,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) ...@@ -960,8 +960,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
event = trace_current_buffer_lock_reserve(&buffer, call->id, size, event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
irq_flags, pc); size, irq_flags, pc);
if (!event) if (!event)
return; return;
...@@ -992,8 +992,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, ...@@ -992,8 +992,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
event = trace_current_buffer_lock_reserve(&buffer, call->id, size, event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
irq_flags, pc); size, irq_flags, pc);
if (!event) if (!event)
return; return;
...@@ -1228,7 +1228,8 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp, ...@@ -1228,7 +1228,8 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
"profile buffer not large enough")) "profile buffer not large enough"))
return; return;
entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags); entry = perf_trace_buf_prepare(size, call->event.type,
&rctx, &irq_flags);
if (!entry) if (!entry)
return; return;
...@@ -1258,7 +1259,8 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, ...@@ -1258,7 +1259,8 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
"profile buffer not large enough")) "profile buffer not large enough"))
return; return;
entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags); entry = perf_trace_buf_prepare(size, call->event.type,
&rctx, &irq_flags);
if (!entry) if (!entry)
return; return;
...@@ -1375,8 +1377,8 @@ static int register_probe_event(struct trace_probe *tp) ...@@ -1375,8 +1377,8 @@ static int register_probe_event(struct trace_probe *tp)
} }
if (set_print_fmt(tp) < 0) if (set_print_fmt(tp) < 0)
return -ENOMEM; return -ENOMEM;
call->id = register_ftrace_event(&call->event); ret = register_ftrace_event(&call->event);
if (!call->id) { if (!ret) {
kfree(call->print_fmt); kfree(call->print_fmt);
return -ENODEV; return -ENODEV;
} }
......
...@@ -117,7 +117,7 @@ print_syscall_enter(struct trace_iterator *iter, int flags, ...@@ -117,7 +117,7 @@ print_syscall_enter(struct trace_iterator *iter, int flags,
if (!entry) if (!entry)
goto end; goto end;
if (entry->enter_event->id != ent->type) { if (entry->enter_event->event.type != ent->type) {
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
goto end; goto end;
} }
...@@ -173,7 +173,7 @@ print_syscall_exit(struct trace_iterator *iter, int flags, ...@@ -173,7 +173,7 @@ print_syscall_exit(struct trace_iterator *iter, int flags,
return TRACE_TYPE_HANDLED; return TRACE_TYPE_HANDLED;
} }
if (entry->exit_event->id != ent->type) { if (entry->exit_event->event.type != ent->type) {
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
return TRACE_TYPE_UNHANDLED; return TRACE_TYPE_UNHANDLED;
} }
...@@ -315,7 +315,7 @@ void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id) ...@@ -315,7 +315,7 @@ void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
event = trace_current_buffer_lock_reserve(&buffer, event = trace_current_buffer_lock_reserve(&buffer,
sys_data->enter_event->id, size, 0, 0); sys_data->enter_event->event.type, size, 0, 0);
if (!event) if (!event)
return; return;
...@@ -347,7 +347,7 @@ void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret) ...@@ -347,7 +347,7 @@ void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
return; return;
event = trace_current_buffer_lock_reserve(&buffer, event = trace_current_buffer_lock_reserve(&buffer,
sys_data->exit_event->id, sizeof(*entry), 0, 0); sys_data->exit_event->event.type, sizeof(*entry), 0, 0);
if (!event) if (!event)
return; return;
...@@ -511,7 +511,8 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) ...@@ -511,7 +511,8 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
return; return;
rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
sys_data->enter_event->id, &rctx, &flags); sys_data->enter_event->event.type,
&rctx, &flags);
if (!rec) if (!rec)
return; return;
...@@ -586,7 +587,8 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) ...@@ -586,7 +587,8 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
return; return;
rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
sys_data->exit_event->id, &rctx, &flags); sys_data->exit_event->event.type,
&rctx, &flags);
if (!rec) if (!rec)
return; return;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册