提交 756d17ee 编写于 作者: J jolsa@redhat.com 提交者: Ingo Molnar

tracing: Support multiple pids in set_pid_ftrace file

Adding the possibility to set more than 1 pid in the set_pid_ftrace
file, thus allowing to trace more than 1 independent processes.

Usage:

 sh-4.0# echo 284 > ./set_ftrace_pid
 sh-4.0# cat ./set_ftrace_pid
 284
 sh-4.0# echo 1 >> ./set_ftrace_pid
 sh-4.0# echo 0 >> ./set_ftrace_pid
 sh-4.0# cat ./set_ftrace_pid
 swapper tasks
 1
 284
 sh-4.0# echo 4 > ./set_ftrace_pid
 sh-4.0# cat ./set_ftrace_pid
 4
 sh-4.0# echo > ./set_ftrace_pid
 sh-4.0# cat ./set_ftrace_pid
 no pid
 sh-4.0#
Signed-off-by: NJiri Olsa <jolsa@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <20091013203425.565454612@goodmis.org>
Signed-off-by: NSteven Rostedt <rostedt@goodmis.org>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 194ec341
...@@ -60,6 +60,13 @@ static int last_ftrace_enabled; ...@@ -60,6 +60,13 @@ static int last_ftrace_enabled;
/* Quick disabling of function tracer. */ /* Quick disabling of function tracer. */
int function_trace_stop; int function_trace_stop;
/* List for set_ftrace_pid's pids. */
LIST_HEAD(ftrace_pids);
struct ftrace_pid {
struct list_head list;
struct pid *pid;
};
/* /*
* ftrace_disabled is set when an anomaly is discovered. * ftrace_disabled is set when an anomaly is discovered.
* ftrace_disabled is much stronger than ftrace_enabled. * ftrace_disabled is much stronger than ftrace_enabled.
...@@ -159,7 +166,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops) ...@@ -159,7 +166,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
else else
func = ftrace_list_func; func = ftrace_list_func;
if (ftrace_pid_trace) { if (!list_empty(&ftrace_pids)) {
set_ftrace_pid_function(func); set_ftrace_pid_function(func);
func = ftrace_pid_func; func = ftrace_pid_func;
} }
...@@ -207,7 +214,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) ...@@ -207,7 +214,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
if (ftrace_list->next == &ftrace_list_end) { if (ftrace_list->next == &ftrace_list_end) {
ftrace_func_t func = ftrace_list->func; ftrace_func_t func = ftrace_list->func;
if (ftrace_pid_trace) { if (!list_empty(&ftrace_pids)) {
set_ftrace_pid_function(func); set_ftrace_pid_function(func);
func = ftrace_pid_func; func = ftrace_pid_func;
} }
...@@ -235,7 +242,7 @@ static void ftrace_update_pid_func(void) ...@@ -235,7 +242,7 @@ static void ftrace_update_pid_func(void)
func = __ftrace_trace_function; func = __ftrace_trace_function;
#endif #endif
if (ftrace_pid_trace) { if (!list_empty(&ftrace_pids)) {
set_ftrace_pid_function(func); set_ftrace_pid_function(func);
func = ftrace_pid_func; func = ftrace_pid_func;
} else { } else {
...@@ -825,8 +832,6 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer) ...@@ -825,8 +832,6 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
} }
#endif /* CONFIG_FUNCTION_PROFILER */ #endif /* CONFIG_FUNCTION_PROFILER */
/* set when tracing only a pid */
struct pid *ftrace_pid_trace;
static struct pid * const ftrace_swapper_pid = &init_struct_pid; static struct pid * const ftrace_swapper_pid = &init_struct_pid;
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
...@@ -2758,23 +2763,6 @@ static inline void ftrace_startup_enable(int command) { } ...@@ -2758,23 +2763,6 @@ static inline void ftrace_startup_enable(int command) { }
# define ftrace_shutdown_sysctl() do { } while (0) # define ftrace_shutdown_sysctl() do { } while (0)
#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_DYNAMIC_FTRACE */
static ssize_t
ftrace_pid_read(struct file *file, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[64];
int r;
if (ftrace_pid_trace == ftrace_swapper_pid)
r = sprintf(buf, "swapper tasks\n");
else if (ftrace_pid_trace)
r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace));
else
r = sprintf(buf, "no pid\n");
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
static void clear_ftrace_swapper(void) static void clear_ftrace_swapper(void)
{ {
struct task_struct *p; struct task_struct *p;
...@@ -2825,14 +2813,12 @@ static void set_ftrace_pid(struct pid *pid) ...@@ -2825,14 +2813,12 @@ static void set_ftrace_pid(struct pid *pid)
rcu_read_unlock(); rcu_read_unlock();
} }
static void clear_ftrace_pid_task(struct pid **pid) static void clear_ftrace_pid_task(struct pid *pid)
{ {
if (*pid == ftrace_swapper_pid) if (pid == ftrace_swapper_pid)
clear_ftrace_swapper(); clear_ftrace_swapper();
else else
clear_ftrace_pid(*pid); clear_ftrace_pid(pid);
*pid = NULL;
} }
static void set_ftrace_pid_task(struct pid *pid) static void set_ftrace_pid_task(struct pid *pid)
...@@ -2843,11 +2829,140 @@ static void set_ftrace_pid_task(struct pid *pid) ...@@ -2843,11 +2829,140 @@ static void set_ftrace_pid_task(struct pid *pid)
set_ftrace_pid(pid); set_ftrace_pid(pid);
} }
static int ftrace_pid_add(int p)
{
struct pid *pid;
struct ftrace_pid *fpid;
int ret = -EINVAL;
mutex_lock(&ftrace_lock);
if (!p)
pid = ftrace_swapper_pid;
else
pid = find_get_pid(p);
if (!pid)
goto out;
ret = 0;
list_for_each_entry(fpid, &ftrace_pids, list)
if (fpid->pid == pid)
goto out_put;
ret = -ENOMEM;
fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
if (!fpid)
goto out_put;
list_add(&fpid->list, &ftrace_pids);
fpid->pid = pid;
set_ftrace_pid_task(pid);
ftrace_update_pid_func();
ftrace_startup_enable(0);
mutex_unlock(&ftrace_lock);
return 0;
out_put:
if (pid != ftrace_swapper_pid)
put_pid(pid);
out:
mutex_unlock(&ftrace_lock);
return ret;
}
static void ftrace_pid_reset(void)
{
struct ftrace_pid *fpid, *safe;
mutex_lock(&ftrace_lock);
list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
struct pid *pid = fpid->pid;
clear_ftrace_pid_task(pid);
list_del(&fpid->list);
kfree(fpid);
}
ftrace_update_pid_func();
ftrace_startup_enable(0);
mutex_unlock(&ftrace_lock);
}
static void *fpid_start(struct seq_file *m, loff_t *pos)
{
mutex_lock(&ftrace_lock);
if (list_empty(&ftrace_pids) && (!*pos))
return (void *) 1;
return seq_list_start(&ftrace_pids, *pos);
}
static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
{
if (v == (void *)1)
return NULL;
return seq_list_next(v, &ftrace_pids, pos);
}
static void fpid_stop(struct seq_file *m, void *p)
{
mutex_unlock(&ftrace_lock);
}
static int fpid_show(struct seq_file *m, void *v)
{
const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
if (v == (void *)1) {
seq_printf(m, "no pid\n");
return 0;
}
if (fpid->pid == ftrace_swapper_pid)
seq_printf(m, "swapper tasks\n");
else
seq_printf(m, "%u\n", pid_vnr(fpid->pid));
return 0;
}
static const struct seq_operations ftrace_pid_sops = {
.start = fpid_start,
.next = fpid_next,
.stop = fpid_stop,
.show = fpid_show,
};
static int
ftrace_pid_open(struct inode *inode, struct file *file)
{
int ret = 0;
if ((file->f_mode & FMODE_WRITE) &&
(file->f_flags & O_TRUNC))
ftrace_pid_reset();
if (file->f_mode & FMODE_READ)
ret = seq_open(file, &ftrace_pid_sops);
return ret;
}
static ssize_t static ssize_t
ftrace_pid_write(struct file *filp, const char __user *ubuf, ftrace_pid_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos) size_t cnt, loff_t *ppos)
{ {
struct pid *pid;
char buf[64]; char buf[64];
long val; long val;
int ret; int ret;
...@@ -2860,57 +2975,38 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf, ...@@ -2860,57 +2975,38 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
buf[cnt] = 0; buf[cnt] = 0;
/*
* Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
* to clean the filter quietly.
*/
strstrip(buf);
if (strlen(buf) == 0)
return 1;
ret = strict_strtol(buf, 10, &val); ret = strict_strtol(buf, 10, &val);
if (ret < 0) if (ret < 0)
return ret; return ret;
mutex_lock(&ftrace_lock); ret = ftrace_pid_add(val);
if (val < 0) {
/* disable pid tracing */
if (!ftrace_pid_trace)
goto out;
clear_ftrace_pid_task(&ftrace_pid_trace);
} else {
/* swapper task is special */
if (!val) {
pid = ftrace_swapper_pid;
if (pid == ftrace_pid_trace)
goto out;
} else {
pid = find_get_pid(val);
if (pid == ftrace_pid_trace) {
put_pid(pid);
goto out;
}
}
if (ftrace_pid_trace)
clear_ftrace_pid_task(&ftrace_pid_trace);
if (!pid)
goto out;
ftrace_pid_trace = pid;
set_ftrace_pid_task(ftrace_pid_trace);
}
/* update the function call */ return ret ? ret : cnt;
ftrace_update_pid_func(); }
ftrace_startup_enable(0);
out: static int
mutex_unlock(&ftrace_lock); ftrace_pid_release(struct inode *inode, struct file *file)
{
if (file->f_mode & FMODE_READ)
seq_release(inode, file);
return cnt; return 0;
} }
static const struct file_operations ftrace_pid_fops = { static const struct file_operations ftrace_pid_fops = {
.read = ftrace_pid_read, .open = ftrace_pid_open,
.write = ftrace_pid_write, .write = ftrace_pid_write,
.read = seq_read,
.llseek = seq_lseek,
.release = ftrace_pid_release,
}; };
static __init int ftrace_init_debugfs(void) static __init int ftrace_init_debugfs(void)
......
...@@ -496,12 +496,12 @@ print_graph_function(struct trace_iterator *iter) ...@@ -496,12 +496,12 @@ print_graph_function(struct trace_iterator *iter)
} }
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
extern struct pid *ftrace_pid_trace; extern struct list_head ftrace_pids;
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
static inline int ftrace_trace_task(struct task_struct *task) static inline int ftrace_trace_task(struct task_struct *task)
{ {
if (!ftrace_pid_trace) if (list_empty(&ftrace_pids))
return 1; return 1;
return test_tsk_trace_trace(task); return test_tsk_trace_trace(task);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册