diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index f83e17a40e8b848185b649836274b57acdc332c1..99d0fbcbaf79eff04c8ab56531e02aa46e953977 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -90,6 +90,8 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, * not set this, then the ftrace infrastructure will add recursion * protection for the caller. * STUB - The ftrace_ops is just a place holder. + * INITIALIZED - The ftrace_ops has already been initialized (first use time + * register_ftrace_function() is called, it will initialized the ops) */ enum { FTRACE_OPS_FL_ENABLED = 1 << 0, @@ -100,6 +102,7 @@ enum { FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5, FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6, FTRACE_OPS_FL_STUB = 1 << 7, + FTRACE_OPS_FL_INITIALIZED = 1 << 8, }; struct ftrace_ops { @@ -110,6 +113,7 @@ struct ftrace_ops { #ifdef CONFIG_DYNAMIC_FTRACE struct ftrace_hash *notrace_hash; struct ftrace_hash *filter_hash; + struct mutex regex_lock; #endif }; diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 34e00fb49becf506865c18ffafefe23c413ea5c4..4372658c73ae5eddd3dd6570d4dfac8c5d8825c8 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -293,6 +293,7 @@ struct ftrace_event_file { * caching and such. Which is mostly OK ;-) */ unsigned long flags; + atomic_t sm_ref; /* soft-mode reference counter */ }; #define __TRACE_EVENT_FLAGS(name, value) \ diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 5e9efd4b83a47fda4e70825078baabbc090a9a72..015f85aaca08f5f5d6eb55af1f1aab46670bb03b 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -71,6 +71,7 @@ config TRACE_CLOCK config RING_BUFFER bool select TRACE_CLOCK + select IRQ_WORK config FTRACE_NMI_ENTER bool @@ -107,7 +108,6 @@ config TRACING select BINARY_PRINTF select EVENT_TRACING select TRACE_CLOCK - select IRQ_WORK config GENERIC_TRACER bool diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 8a5c017bb50c141bfca4d8206ac2a1805d224185..b549b0f5b9771624159d8dbbf1b6a103127a842f 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -64,6 +64,13 @@ #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL) +#ifdef CONFIG_DYNAMIC_FTRACE +#define INIT_REGEX_LOCK(opsname) \ + .regex_lock = __MUTEX_INITIALIZER(opsname.regex_lock), +#else +#define INIT_REGEX_LOCK(opsname) +#endif + static struct ftrace_ops ftrace_list_end __read_mostly = { .func = ftrace_stub, .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, @@ -131,6 +138,16 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); while (likely(op = rcu_dereference_raw((op)->next)) && \ unlikely((op) != &ftrace_list_end)) +static inline void ftrace_ops_init(struct ftrace_ops *ops) +{ +#ifdef CONFIG_DYNAMIC_FTRACE + if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { + mutex_init(&ops->regex_lock); + ops->flags |= FTRACE_OPS_FL_INITIALIZED; + } +#endif +} + /** * ftrace_nr_registered_ops - return number of ops registered * @@ -907,7 +924,8 @@ static void unregister_ftrace_profiler(void) #else static struct ftrace_ops ftrace_profile_ops __read_mostly = { .func = function_profile_call, - .flags = FTRACE_OPS_FL_RECURSION_SAFE, + .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, + INIT_REGEX_LOCK(ftrace_profile_ops) }; static int register_ftrace_profiler(void) @@ -1103,11 +1121,10 @@ static struct ftrace_ops global_ops = { .func = ftrace_stub, .notrace_hash = EMPTY_HASH, .filter_hash = EMPTY_HASH, - .flags = FTRACE_OPS_FL_RECURSION_SAFE, + .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, + INIT_REGEX_LOCK(global_ops) }; -static DEFINE_MUTEX(ftrace_regex_lock); - struct ftrace_page { struct ftrace_page *next; struct dyn_ftrace *records; @@ -1247,6 +1264,7 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash) void ftrace_free_filter(struct ftrace_ops *ops) { + ftrace_ops_init(ops); free_ftrace_hash(ops->filter_hash); free_ftrace_hash(ops->notrace_hash); } @@ -2441,7 +2459,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos) !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) || ((iter->flags & FTRACE_ITER_ENABLED) && - !(rec->flags & ~FTRACE_FL_MASK))) { + !(rec->flags & FTRACE_FL_ENABLED))) { rec = NULL; goto retry; @@ -2624,6 +2642,8 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, struct ftrace_hash *hash; int ret = 0; + ftrace_ops_init(ops); + if (unlikely(ftrace_disabled)) return -ENODEV; @@ -2636,28 +2656,26 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, return -ENOMEM; } + iter->ops = ops; + iter->flags = flag; + + mutex_lock(&ops->regex_lock); + if (flag & FTRACE_ITER_NOTRACE) hash = ops->notrace_hash; else hash = ops->filter_hash; - iter->ops = ops; - iter->flags = flag; - if (file->f_mode & FMODE_WRITE) { - mutex_lock(&ftrace_lock); iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash); - mutex_unlock(&ftrace_lock); - if (!iter->hash) { trace_parser_put(&iter->parser); kfree(iter); - return -ENOMEM; + ret = -ENOMEM; + goto out_unlock; } } - mutex_lock(&ftrace_regex_lock); - if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) ftrace_filter_reset(iter->hash); @@ -2677,7 +2695,9 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, } } else file->private_data = iter; - mutex_unlock(&ftrace_regex_lock); + + out_unlock: + mutex_unlock(&ops->regex_lock); return ret; } @@ -2910,6 +2930,8 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, static struct ftrace_ops trace_probe_ops __read_mostly = { .func = function_trace_probe_call, + .flags = FTRACE_OPS_FL_INITIALIZED, + INIT_REGEX_LOCK(trace_probe_ops) }; static int ftrace_probe_registered; @@ -2919,8 +2941,12 @@ static void __enable_ftrace_function_probe(void) int ret; int i; - if (ftrace_probe_registered) + if (ftrace_probe_registered) { + /* still need to update the function call sites */ + if (ftrace_enabled) + ftrace_run_update_code(FTRACE_UPDATE_CALLS); return; + } for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { struct hlist_head *hhd = &ftrace_func_hash[i]; @@ -2990,19 +3016,21 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, if (WARN_ON(not)) return -EINVAL; - mutex_lock(&ftrace_lock); + mutex_lock(&trace_probe_ops.regex_lock); hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); if (!hash) { count = -ENOMEM; - goto out_unlock; + goto out; } if (unlikely(ftrace_disabled)) { count = -ENODEV; - goto out_unlock; + goto out; } + mutex_lock(&ftrace_lock); + do_for_each_ftrace_rec(pg, rec) { if (!ftrace_match_record(rec, NULL, search, len, type)) @@ -3056,6 +3084,8 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, out_unlock: mutex_unlock(&ftrace_lock); + out: + mutex_unlock(&trace_probe_ops.regex_lock); free_ftrace_hash(hash); return count; @@ -3095,7 +3125,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, return; } - mutex_lock(&ftrace_lock); + mutex_lock(&trace_probe_ops.regex_lock); hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); if (!hash) @@ -3133,6 +3163,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, list_add(&entry->free_list, &free_list); } } + mutex_lock(&ftrace_lock); __disable_ftrace_function_probe(); /* * Remove after the disable is called. Otherwise, if the last @@ -3144,9 +3175,10 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, list_del(&entry->free_list); ftrace_free_entry(entry); } + mutex_unlock(&ftrace_lock); out_unlock: - mutex_unlock(&ftrace_lock); + mutex_unlock(&trace_probe_ops.regex_lock); free_ftrace_hash(hash); } @@ -3256,18 +3288,17 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, if (!cnt) return 0; - mutex_lock(&ftrace_regex_lock); - - ret = -ENODEV; - if (unlikely(ftrace_disabled)) - goto out_unlock; - if (file->f_mode & FMODE_READ) { struct seq_file *m = file->private_data; iter = m->private; } else iter = file->private_data; + if (unlikely(ftrace_disabled)) + return -ENODEV; + + /* iter->hash is a local copy, so we don't need regex_lock */ + parser = &iter->parser; read = trace_get_user(parser, ubuf, cnt, ppos); @@ -3276,14 +3307,12 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, ret = ftrace_process_regex(iter->hash, parser->buffer, parser->idx, enable); trace_parser_clear(parser); - if (ret) - goto out_unlock; + if (ret < 0) + goto out; } ret = read; -out_unlock: - mutex_unlock(&ftrace_regex_lock); - + out: return ret; } @@ -3335,16 +3364,19 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, if (unlikely(ftrace_disabled)) return -ENODEV; + mutex_lock(&ops->regex_lock); + if (enable) orig_hash = &ops->filter_hash; else orig_hash = &ops->notrace_hash; hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); - if (!hash) - return -ENOMEM; + if (!hash) { + ret = -ENOMEM; + goto out_regex_unlock; + } - mutex_lock(&ftrace_regex_lock); if (reset) ftrace_filter_reset(hash); if (buf && !ftrace_match_records(hash, buf, len)) { @@ -3366,7 +3398,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, mutex_unlock(&ftrace_lock); out_regex_unlock: - mutex_unlock(&ftrace_regex_lock); + mutex_unlock(&ops->regex_lock); free_ftrace_hash(hash); return ret; @@ -3392,6 +3424,7 @@ ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove, int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, int remove, int reset) { + ftrace_ops_init(ops); return ftrace_set_addr(ops, ip, remove, reset, 1); } EXPORT_SYMBOL_GPL(ftrace_set_filter_ip); @@ -3416,6 +3449,7 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, int len, int reset) { + ftrace_ops_init(ops); return ftrace_set_regex(ops, buf, len, reset, 1); } EXPORT_SYMBOL_GPL(ftrace_set_filter); @@ -3434,6 +3468,7 @@ EXPORT_SYMBOL_GPL(ftrace_set_filter); int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, int len, int reset) { + ftrace_ops_init(ops); return ftrace_set_regex(ops, buf, len, reset, 0); } EXPORT_SYMBOL_GPL(ftrace_set_notrace); @@ -3524,6 +3559,8 @@ ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable) { char *func; + ftrace_ops_init(ops); + while (buf) { func = strsep(&buf, ","); ftrace_set_regex(ops, func, strlen(func), 0, enable); @@ -3551,10 +3588,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file) int filter_hash; int ret; - mutex_lock(&ftrace_regex_lock); if (file->f_mode & FMODE_READ) { iter = m->private; - seq_release(inode, file); } else iter = file->private_data; @@ -3567,6 +3602,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file) trace_parser_put(parser); + mutex_lock(&iter->ops->regex_lock); + if (file->f_mode & FMODE_WRITE) { filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); @@ -3584,10 +3621,11 @@ int ftrace_regex_release(struct inode *inode, struct file *file) mutex_unlock(&ftrace_lock); } + + mutex_unlock(&iter->ops->regex_lock); free_ftrace_hash(iter->hash); kfree(iter); - mutex_unlock(&ftrace_regex_lock); return 0; } @@ -4126,7 +4164,8 @@ void __init ftrace_init(void) static struct ftrace_ops global_ops = { .func = ftrace_stub, - .flags = FTRACE_OPS_FL_RECURSION_SAFE, + .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, + INIT_REGEX_LOCK(global_ops) }; static int __init ftrace_nodyn_init(void) @@ -4180,8 +4219,9 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, } static struct ftrace_ops control_ops = { - .func = ftrace_ops_control_func, - .flags = FTRACE_OPS_FL_RECURSION_SAFE, + .func = ftrace_ops_control_func, + .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, + INIT_REGEX_LOCK(control_ops) }; static inline void @@ -4539,6 +4579,8 @@ int register_ftrace_function(struct ftrace_ops *ops) { int ret = -1; + ftrace_ops_init(ops); + mutex_lock(&ftrace_lock); ret = __register_ftrace_function(ops); diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 53582e982e51673e7294080097e9acc7b2bdf8f2..7a0cf68027ccf62118d4eb1da83ec00af3a0b49b 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -251,7 +251,8 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file, switch (enable) { case 0: /* - * When soft_disable is set and enable is cleared, we want + * When soft_disable is set and enable is cleared, the sm_ref + * reference counter is decremented. If it reaches 0, we want * to clear the SOFT_DISABLED flag but leave the event in the * state that it was. That is, if the event was enabled and * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED @@ -263,6 +264,8 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file, * "soft enable"s (clearing the SOFT_DISABLED bit) wont work. */ if (soft_disable) { + if (atomic_dec_return(&file->sm_ref) > 0) + break; disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED; clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags); } else @@ -291,8 +294,11 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file, */ if (!soft_disable) clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); - else + else { + if (atomic_inc_return(&file->sm_ref) > 1) + break; set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags); + } if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) { @@ -623,6 +629,8 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, if (file->flags & FTRACE_EVENT_FL_ENABLED) { if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED) buf = "0*\n"; + else if (file->flags & FTRACE_EVENT_FL_SOFT_MODE) + buf = "1*\n"; else buf = "1\n"; } else @@ -1521,6 +1529,24 @@ __register_event(struct ftrace_event_call *call, struct module *mod) return 0; } +static struct ftrace_event_file * +trace_create_new_event(struct ftrace_event_call *call, + struct trace_array *tr) +{ + struct ftrace_event_file *file; + + file = kmem_cache_alloc(file_cachep, GFP_TRACE); + if (!file) + return NULL; + + file->event_call = call; + file->tr = tr; + atomic_set(&file->sm_ref, 0); + list_add(&file->list, &tr->events); + + return file; +} + /* Add an event to a trace directory */ static int __trace_add_new_event(struct ftrace_event_call *call, @@ -1532,14 +1558,10 @@ __trace_add_new_event(struct ftrace_event_call *call, { struct ftrace_event_file *file; - file = kmem_cache_alloc(file_cachep, GFP_TRACE); + file = trace_create_new_event(call, tr); if (!file) return -ENOMEM; - file->event_call = call; - file->tr = tr; - list_add(&file->list, &tr->events); - return event_create_dir(tr->event_dir, file, id, enable, filter, format); } @@ -1554,14 +1576,10 @@ __trace_early_add_new_event(struct ftrace_event_call *call, { struct ftrace_event_file *file; - file = kmem_cache_alloc(file_cachep, GFP_TRACE); + file = trace_create_new_event(call, tr); if (!file) return -ENOMEM; - file->event_call = call; - file->tr = tr; - list_add(&file->list, &tr->events); - return 0; } @@ -2061,8 +2079,18 @@ event_enable_func(struct ftrace_hash *hash, if (ret < 0) goto out_put; ret = register_ftrace_function_probe(glob, ops, data); - if (!ret) + /* + * The above returns on success the # of functions enabled, + * but if it didn't find any functions it returns zero. + * Consider no functions a failure too. + */ + if (!ret) { + ret = -ENOENT; + goto out_disable; + } else if (ret < 0) goto out_disable; + /* Just return zero, not the number of enabled functions */ + ret = 0; out: mutex_unlock(&event_mutex); return ret; diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 1865d5f765387f55600d063a633e6a7e79bcae3c..636d45fe69b37a80eefafe6b3aa6d46220f132dd 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -27,7 +27,6 @@ /** * Kprobe event core functions */ - struct trace_probe { struct list_head list; struct kretprobe rp; /* Use rp.kp for kprobe use */ @@ -36,6 +35,7 @@ struct trace_probe { const char *symbol; /* symbol name */ struct ftrace_event_class class; struct ftrace_event_call call; + struct ftrace_event_file **files; ssize_t size; /* trace entry size */ unsigned int nr_args; struct probe_arg args[]; @@ -46,7 +46,7 @@ struct trace_probe { (sizeof(struct probe_arg) * (n))) -static __kprobes int trace_probe_is_return(struct trace_probe *tp) +static __kprobes bool trace_probe_is_return(struct trace_probe *tp) { return tp->rp.handler != NULL; } @@ -183,12 +183,57 @@ static struct trace_probe *find_trace_probe(const char *event, return NULL; } -/* Enable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */ -static int enable_trace_probe(struct trace_probe *tp, int flag) +static int trace_probe_nr_files(struct trace_probe *tp) +{ + struct ftrace_event_file **file = tp->files; + int ret = 0; + + if (file) + while (*(file++)) + ret++; + + return ret; +} + +static DEFINE_MUTEX(probe_enable_lock); + +/* + * Enable trace_probe + * if the file is NULL, enable "perf" handler, or enable "trace" handler. + */ +static int +enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) { int ret = 0; - tp->flags |= flag; + mutex_lock(&probe_enable_lock); + + if (file) { + struct ftrace_event_file **new, **old = tp->files; + int n = trace_probe_nr_files(tp); + + /* 1 is for new one and 1 is for stopper */ + new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *), + GFP_KERNEL); + if (!new) { + ret = -ENOMEM; + goto out_unlock; + } + memcpy(new, old, n * sizeof(struct ftrace_event_file *)); + new[n] = file; + /* The last one keeps a NULL */ + + rcu_assign_pointer(tp->files, new); + tp->flags |= TP_FLAG_TRACE; + + if (old) { + /* Make sure the probe is done with old files */ + synchronize_sched(); + kfree(old); + } + } else + tp->flags |= TP_FLAG_PROFILE; + if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) && !trace_probe_has_gone(tp)) { if (trace_probe_is_return(tp)) @@ -197,19 +242,83 @@ static int enable_trace_probe(struct trace_probe *tp, int flag) ret = enable_kprobe(&tp->rp.kp); } + out_unlock: + mutex_unlock(&probe_enable_lock); + return ret; } -/* Disable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */ -static void disable_trace_probe(struct trace_probe *tp, int flag) +static int +trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file) +{ + int i; + + if (tp->files) { + for (i = 0; tp->files[i]; i++) + if (tp->files[i] == file) + return i; + } + + return -1; +} + +/* + * Disable trace_probe + * if the file is NULL, disable "perf" handler, or disable "trace" handler. + */ +static int +disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) { - tp->flags &= ~flag; + int ret = 0; + + mutex_lock(&probe_enable_lock); + + if (file) { + struct ftrace_event_file **new, **old = tp->files; + int n = trace_probe_nr_files(tp); + int i, j; + + if (n == 0 || trace_probe_file_index(tp, file) < 0) { + ret = -EINVAL; + goto out_unlock; + } + + if (n == 1) { /* Remove the last file */ + tp->flags &= ~TP_FLAG_TRACE; + new = NULL; + } else { + new = kzalloc(n * sizeof(struct ftrace_event_file *), + GFP_KERNEL); + if (!new) { + ret = -ENOMEM; + goto out_unlock; + } + + /* This copy & check loop copies the NULL stopper too */ + for (i = 0, j = 0; j < n && i < n + 1; i++) + if (old[i] != file) + new[j++] = old[i]; + } + + rcu_assign_pointer(tp->files, new); + + /* Make sure the probe is done with old files */ + synchronize_sched(); + kfree(old); + } else + tp->flags &= ~TP_FLAG_PROFILE; + if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) { if (trace_probe_is_return(tp)) disable_kretprobe(&tp->rp); else disable_kprobe(&tp->rp.kp); } + + out_unlock: + mutex_unlock(&probe_enable_lock); + + return ret; } /* Internal register function - just handle k*probes and flags */ @@ -723,9 +832,10 @@ static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp, } /* Kprobe handler */ -static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) +static __kprobes void +__kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs, + struct ftrace_event_file *ftrace_file) { - struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); struct kprobe_trace_entry_head *entry; struct ring_buffer_event *event; struct ring_buffer *buffer; @@ -733,7 +843,10 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) unsigned long irq_flags; struct ftrace_event_call *call = &tp->call; - tp->nhit++; + WARN_ON(call != ftrace_file->event_call); + + if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags)) + return; local_save_flags(irq_flags); pc = preempt_count(); @@ -741,13 +854,14 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) dsize = __get_data_size(tp, regs); size = sizeof(*entry) + tp->size + dsize; - event = trace_current_buffer_lock_reserve(&buffer, call->event.type, - size, irq_flags, pc); + event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, + call->event.type, + size, irq_flags, pc); if (!event) return; entry = ring_buffer_event_data(event); - entry->ip = (unsigned long)kp->addr; + entry->ip = (unsigned long)tp->rp.kp.addr; store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); if (!filter_current_check_discard(buffer, call, entry, event)) @@ -755,11 +869,24 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) irq_flags, pc, regs); } +static __kprobes void +kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs) +{ + struct ftrace_event_file **file = tp->files; + + /* Note: preempt is already disabled around the kprobe handler */ + while (*file) { + __kprobe_trace_func(tp, regs, *file); + file++; + } +} + /* Kretprobe handler */ -static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, - struct pt_regs *regs) +static __kprobes void +__kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, + struct pt_regs *regs, + struct ftrace_event_file *ftrace_file) { - struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); struct kretprobe_trace_entry_head *entry; struct ring_buffer_event *event; struct ring_buffer *buffer; @@ -767,14 +894,20 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, unsigned long irq_flags; struct ftrace_event_call *call = &tp->call; + WARN_ON(call != ftrace_file->event_call); + + if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags)) + return; + local_save_flags(irq_flags); pc = preempt_count(); dsize = __get_data_size(tp, regs); size = sizeof(*entry) + tp->size + dsize; - event = trace_current_buffer_lock_reserve(&buffer, call->event.type, - size, irq_flags, pc); + event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, + call->event.type, + size, irq_flags, pc); if (!event) return; @@ -788,6 +921,19 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, irq_flags, pc, regs); } +static __kprobes void +kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, + struct pt_regs *regs) +{ + struct ftrace_event_file **file = tp->files; + + /* Note: preempt is already disabled around the kprobe handler */ + while (*file) { + __kretprobe_trace_func(tp, ri, regs, *file); + file++; + } +} + /* Event entry printers */ enum print_line_t print_kprobe_event(struct trace_iterator *iter, int flags, @@ -975,10 +1121,9 @@ static int set_print_fmt(struct trace_probe *tp) #ifdef CONFIG_PERF_EVENTS /* Kprobe profile handler */ -static __kprobes void kprobe_perf_func(struct kprobe *kp, - struct pt_regs *regs) +static __kprobes void +kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs) { - struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); struct ftrace_event_call *call = &tp->call; struct kprobe_trace_entry_head *entry; struct hlist_head *head; @@ -997,7 +1142,7 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp, if (!entry) return; - entry->ip = (unsigned long)kp->addr; + entry->ip = (unsigned long)tp->rp.kp.addr; memset(&entry[1], 0, dsize); store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); @@ -1007,10 +1152,10 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp, } /* Kretprobe profile handler */ -static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, - struct pt_regs *regs) +static __kprobes void +kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri, + struct pt_regs *regs) { - struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); struct ftrace_event_call *call = &tp->call; struct kretprobe_trace_entry_head *entry; struct hlist_head *head; @@ -1044,20 +1189,19 @@ int kprobe_register(struct ftrace_event_call *event, enum trace_reg type, void *data) { struct trace_probe *tp = (struct trace_probe *)event->data; + struct ftrace_event_file *file = data; switch (type) { case TRACE_REG_REGISTER: - return enable_trace_probe(tp, TP_FLAG_TRACE); + return enable_trace_probe(tp, file); case TRACE_REG_UNREGISTER: - disable_trace_probe(tp, TP_FLAG_TRACE); - return 0; + return disable_trace_probe(tp, file); #ifdef CONFIG_PERF_EVENTS case TRACE_REG_PERF_REGISTER: - return enable_trace_probe(tp, TP_FLAG_PROFILE); + return enable_trace_probe(tp, NULL); case TRACE_REG_PERF_UNREGISTER: - disable_trace_probe(tp, TP_FLAG_PROFILE); - return 0; + return disable_trace_probe(tp, NULL); case TRACE_REG_PERF_OPEN: case TRACE_REG_PERF_CLOSE: case TRACE_REG_PERF_ADD: @@ -1073,11 +1217,13 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) { struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); + tp->nhit++; + if (tp->flags & TP_FLAG_TRACE) - kprobe_trace_func(kp, regs); + kprobe_trace_func(tp, regs); #ifdef CONFIG_PERF_EVENTS if (tp->flags & TP_FLAG_PROFILE) - kprobe_perf_func(kp, regs); + kprobe_perf_func(tp, regs); #endif return 0; /* We don't tweek kernel, so just return 0 */ } @@ -1087,11 +1233,13 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) { struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); + tp->nhit++; + if (tp->flags & TP_FLAG_TRACE) - kretprobe_trace_func(ri, regs); + kretprobe_trace_func(tp, ri, regs); #ifdef CONFIG_PERF_EVENTS if (tp->flags & TP_FLAG_PROFILE) - kretprobe_perf_func(ri, regs); + kretprobe_perf_func(tp, ri, regs); #endif return 0; /* We don't tweek kernel, so just return 0 */ } @@ -1189,11 +1337,24 @@ static __used int kprobe_trace_selftest_target(int a1, int a2, int a3, return a1 + a2 + a3 + a4 + a5 + a6; } +static struct ftrace_event_file * +find_trace_probe_file(struct trace_probe *tp, struct trace_array *tr) +{ + struct ftrace_event_file *file; + + list_for_each_entry(file, &tr->events, list) + if (file->event_call == &tp->call) + return file; + + return NULL; +} + static __init int kprobe_trace_self_tests_init(void) { int ret, warn = 0; int (*target)(int, int, int, int, int, int); struct trace_probe *tp; + struct ftrace_event_file *file; target = kprobe_trace_selftest_target; @@ -1203,31 +1364,43 @@ static __init int kprobe_trace_self_tests_init(void) "$stack $stack0 +0($stack)", create_trace_probe); if (WARN_ON_ONCE(ret)) { - pr_warning("error on probing function entry.\n"); + pr_warn("error on probing function entry.\n"); warn++; } else { /* Enable trace point */ tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM); if (WARN_ON_ONCE(tp == NULL)) { - pr_warning("error on getting new probe.\n"); + pr_warn("error on getting new probe.\n"); warn++; - } else - enable_trace_probe(tp, TP_FLAG_TRACE); + } else { + file = find_trace_probe_file(tp, top_trace_array()); + if (WARN_ON_ONCE(file == NULL)) { + pr_warn("error on getting probe file.\n"); + warn++; + } else + enable_trace_probe(tp, file); + } } ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target " "$retval", create_trace_probe); if (WARN_ON_ONCE(ret)) { - pr_warning("error on probing function return.\n"); + pr_warn("error on probing function return.\n"); warn++; } else { /* Enable trace point */ tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM); if (WARN_ON_ONCE(tp == NULL)) { - pr_warning("error on getting new probe.\n"); + pr_warn("error on getting 2nd new probe.\n"); warn++; - } else - enable_trace_probe(tp, TP_FLAG_TRACE); + } else { + file = find_trace_probe_file(tp, top_trace_array()); + if (WARN_ON_ONCE(file == NULL)) { + pr_warn("error on getting probe file.\n"); + warn++; + } else + enable_trace_probe(tp, file); + } } if (warn) @@ -1238,27 +1411,39 @@ static __init int kprobe_trace_self_tests_init(void) /* Disable trace points before removing it */ tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM); if (WARN_ON_ONCE(tp == NULL)) { - pr_warning("error on getting test probe.\n"); + pr_warn("error on getting test probe.\n"); warn++; - } else - disable_trace_probe(tp, TP_FLAG_TRACE); + } else { + file = find_trace_probe_file(tp, top_trace_array()); + if (WARN_ON_ONCE(file == NULL)) { + pr_warn("error on getting probe file.\n"); + warn++; + } else + disable_trace_probe(tp, file); + } tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM); if (WARN_ON_ONCE(tp == NULL)) { - pr_warning("error on getting 2nd test probe.\n"); + pr_warn("error on getting 2nd test probe.\n"); warn++; - } else - disable_trace_probe(tp, TP_FLAG_TRACE); + } else { + file = find_trace_probe_file(tp, top_trace_array()); + if (WARN_ON_ONCE(file == NULL)) { + pr_warn("error on getting probe file.\n"); + warn++; + } else + disable_trace_probe(tp, file); + } ret = traceprobe_command("-:testprobe", create_trace_probe); if (WARN_ON_ONCE(ret)) { - pr_warning("error on deleting a probe.\n"); + pr_warn("error on deleting a probe.\n"); warn++; } ret = traceprobe_command("-:testprobe2", create_trace_probe); if (WARN_ON_ONCE(ret)) { - pr_warning("error on deleting a probe.\n"); + pr_warn("error on deleting a probe.\n"); warn++; }