trace_sysprof.c 4.3 KB
Newer Older
I
Ingo Molnar 已提交
1 2 3 4 5
/*
 * trace stack traces
 *
 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6
 * Copyright (C) 2004, 2005, Soeren Sandmann
I
Ingo Molnar 已提交
7 8
 */
#include <linux/kallsyms.h>
I
Ingo Molnar 已提交
9 10
#include <linux/debugfs.h>
#include <linux/hrtimer.h>
I
Ingo Molnar 已提交
11 12
#include <linux/uaccess.h>
#include <linux/ftrace.h>
I
Ingo Molnar 已提交
13
#include <linux/module.h>
14
#include <linux/irq.h>
I
Ingo Molnar 已提交
15
#include <linux/fs.h>
I
Ingo Molnar 已提交
16 17 18

#include "trace.h"

19
static struct trace_array	*sysprof_trace;
I
Ingo Molnar 已提交
20 21
static int __read_mostly	tracer_enabled;

22 23 24
/*
 * 10 msecs for now:
 */
I
Ingo Molnar 已提交
25
static const unsigned long sample_period = 1000000;
I
Ingo Molnar 已提交
26
static const unsigned int sample_max_depth = 512;
I
Ingo Molnar 已提交
27 28 29 30 31 32

/*
 * Per CPU hrtimers that do the profiling:
 */
static DEFINE_PER_CPU(struct hrtimer, stack_trace_hrtimer);

33 34 35 36 37 38 39
struct stack_frame {
	const void __user	*next_fp;
	unsigned long		return_address;
};

static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
{
I
Ingo Molnar 已提交
40 41
	int ret;

42 43 44
	if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
		return 0;

I
Ingo Molnar 已提交
45 46 47 48 49
	ret = 1;
	pagefault_disable();
	if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
		ret = 0;
	pagefault_enable();
50

I
Ingo Molnar 已提交
51
	return ret;
52 53 54 55 56 57 58
}

static void timer_notify(struct pt_regs *regs, int cpu)
{
	struct trace_array_cpu *data;
	struct stack_frame frame;
	struct trace_array *tr;
I
Ingo Molnar 已提交
59
	const void __user *fp;
60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
	int is_user;
	int i;

	if (!regs)
		return;

	tr = sysprof_trace;
	data = tr->data[cpu];
	is_user = user_mode(regs);

	if (!current || current->pid == 0)
		return;

	if (is_user && current->state != TASK_RUNNING)
		return;

	if (!is_user) {
		/* kernel */
		ftrace(tr, data, current->pid, 1, 0);
		return;

	}

	trace_special(tr, data, 0, current->pid, regs->ip);

I
Ingo Molnar 已提交
85
	fp = (void __user *)regs->bp;
86

I
Ingo Molnar 已提交
87
	for (i = 0; i < sample_max_depth; i++) {
I
Ingo Molnar 已提交
88 89 90
		frame.next_fp = 0;
		frame.return_address = 0;
		if (!copy_stack_frame(fp, &frame))
91
			break;
I
Ingo Molnar 已提交
92
		if ((unsigned long)fp < regs->sp)
93 94 95
			break;

		trace_special(tr, data, 1, frame.return_address,
I
Ingo Molnar 已提交
96 97
			      (unsigned long)fp);
		fp = frame.next_fp;
98 99 100 101
	}

	trace_special(tr, data, 2, current->pid, i);

I
Ingo Molnar 已提交
102 103 104
	/*
	 * Special trace entry if we overflow the max depth:
	 */
I
Ingo Molnar 已提交
105
	if (i == sample_max_depth)
106 107 108
		trace_special(tr, data, -1, -1, -1);
}

I
Ingo Molnar 已提交
109 110 111
static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer)
{
	/* trace here */
112
	timer_notify(get_irq_regs(), smp_processor_id());
I
Ingo Molnar 已提交
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156

	hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));

	return HRTIMER_RESTART;
}

static void start_stack_timer(int cpu)
{
	struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu);

	hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	hrtimer->function = stack_trace_timer_fn;
	hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;

	hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL);
}

static void start_stack_timers(void)
{
	cpumask_t saved_mask = current->cpus_allowed;
	int cpu;

	for_each_online_cpu(cpu) {
		set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
		start_stack_timer(cpu);
	}
	set_cpus_allowed_ptr(current, &saved_mask);
}

static void stop_stack_timer(int cpu)
{
	struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu);

	hrtimer_cancel(hrtimer);
}

static void stop_stack_timers(void)
{
	int cpu;

	for_each_online_cpu(cpu)
		stop_stack_timer(cpu);
}

I
Ingo Molnar 已提交
157 158 159 160 161 162 163 164 165 166 167 168 169
static notrace void stack_reset(struct trace_array *tr)
{
	int cpu;

	tr->time_start = ftrace_now(tr->cpu);

	for_each_online_cpu(cpu)
		tracing_reset(tr->data[cpu]);
}

static notrace void start_stack_trace(struct trace_array *tr)
{
	stack_reset(tr);
I
Ingo Molnar 已提交
170
	start_stack_timers();
I
Ingo Molnar 已提交
171 172 173 174 175
	tracer_enabled = 1;
}

static notrace void stop_stack_trace(struct trace_array *tr)
{
I
Ingo Molnar 已提交
176
	stop_stack_timers();
I
Ingo Molnar 已提交
177 178 179 180 181
	tracer_enabled = 0;
}

static notrace void stack_trace_init(struct trace_array *tr)
{
182
	sysprof_trace = tr;
I
Ingo Molnar 已提交
183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209

	if (tr->ctrl)
		start_stack_trace(tr);
}

static notrace void stack_trace_reset(struct trace_array *tr)
{
	if (tr->ctrl)
		stop_stack_trace(tr);
}

static void stack_trace_ctrl_update(struct trace_array *tr)
{
	/* When starting a new trace, reset the buffers */
	if (tr->ctrl)
		start_stack_trace(tr);
	else
		stop_stack_trace(tr);
}

static struct tracer stack_trace __read_mostly =
{
	.name		= "sysprof",
	.init		= stack_trace_init,
	.reset		= stack_trace_reset,
	.ctrl_update	= stack_trace_ctrl_update,
#ifdef CONFIG_FTRACE_SELFTEST
210
	.selftest    = trace_selftest_startup_sysprof,
I
Ingo Molnar 已提交
211 212 213 214 215 216 217 218
#endif
};

__init static int init_stack_trace(void)
{
	return register_tracer(&stack_trace);
}
device_initcall(init_stack_trace);