trace_hw_branches.c 7.7 KB
Newer Older
1
/*
I
Ingo Molnar 已提交
2
 * h/w branch tracer for x86 based on BTS
3
 *
4 5
 * Copyright (C) 2008-2009 Intel Corporation.
 * Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009
6
 */
7 8
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
9 10
#include <linux/debugfs.h>
#include <linux/ftrace.h>
11
#include <linux/module.h>
12 13
#include <linux/cpu.h>
#include <linux/smp.h>
14
#include <linux/fs.h>
15 16 17

#include <asm/ds.h>

18
#include "trace_output.h"
I
Ingo Molnar 已提交
19
#include "trace.h"
20 21


22
#define BTS_BUFFER_SIZE (1 << 13)
23

24 25 26 27 28 29 30 31 32 33 34
/*
 * The tracer lock protects the below per-cpu tracer array.
 * It needs to be held to:
 * - start tracing on all cpus
 * - stop tracing on all cpus
 * - start tracing on a single hotplug cpu
 * - stop tracing on a single hotplug cpu
 * - read the trace from all cpus
 * - read the trace from a single cpu
 */
static DEFINE_SPINLOCK(bts_tracer_lock);
35
static DEFINE_PER_CPU(struct bts_tracer *, tracer);
36
static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], buffer);
37 38 39 40

#define this_tracer per_cpu(tracer, smp_processor_id())
#define this_buffer per_cpu(buffer, smp_processor_id())

41 42
static int trace_hw_branches_enabled __read_mostly;
static int trace_hw_branches_suspended __read_mostly;
43
static struct trace_array *hw_branch_trace __read_mostly;
44

45 46

/*
47
 * Initialize the tracer for the current cpu.
48 49
 * The argument is ignored.
 *
50
 * pre: bts_tracer_lock must be locked.
51
 */
52
static void bts_trace_init_cpu(void *arg)
53
{
54 55 56
	if (this_tracer)
		ds_release_bts(this_tracer);

57 58
	this_tracer = ds_request_bts(NULL, this_buffer, BTS_BUFFER_SIZE,
				     NULL, (size_t)-1, BTS_KERNEL);
59 60 61 62 63 64
	if (IS_ERR(this_tracer)) {
		this_tracer = NULL;
		return;
	}
}

65
static int bts_trace_init(struct trace_array *tr)
66
{
67 68
	int cpu, avail;

69
	spin_lock(&bts_tracer_lock);
70

71 72 73 74 75 76 77 78 79 80 81 82
	hw_branch_trace = tr;

	on_each_cpu(bts_trace_init_cpu, NULL, 1);

	/* Check on how many cpus we could enable tracing */
	avail = 0;
	for_each_online_cpu(cpu)
		if (per_cpu(tracer, cpu))
			avail++;

	trace_hw_branches_enabled = (avail ? 1 : 0);
	trace_hw_branches_suspended = 0;
83

84
	spin_unlock(&bts_tracer_lock);
85 86 87 88


	/* If we could not enable tracing on a single cpu, we fail. */
	return avail ? 0 : -EOPNOTSUPP;
89 90
}

91
/*
92
 * Release the tracer for the current cpu.
93 94
 * The argument is ignored.
 *
95
 * pre: bts_tracer_lock must be locked.
96
 */
97
static void bts_trace_release_cpu(void *arg)
98 99 100 101 102 103 104
{
	if (this_tracer) {
		ds_release_bts(this_tracer);
		this_tracer = NULL;
	}
}

105
static void bts_trace_reset(struct trace_array *tr)
106
{
107
	spin_lock(&bts_tracer_lock);
108

109
	on_each_cpu(bts_trace_release_cpu, NULL, 1);
110
	trace_hw_branches_enabled = 0;
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
	trace_hw_branches_suspended = 0;

	spin_unlock(&bts_tracer_lock);
}

/*
 * Resume tracing on the current cpu.
 * The argument is ignored.
 *
 * pre: bts_tracer_lock must be locked.
 */
static void bts_trace_resume_cpu(void *arg)
{
	if (this_tracer)
		ds_resume_bts(this_tracer);
}

static void bts_trace_start(struct trace_array *tr)
{
	spin_lock(&bts_tracer_lock);

	on_each_cpu(bts_trace_resume_cpu, NULL, 1);
	trace_hw_branches_suspended = 0;

	spin_unlock(&bts_tracer_lock);
}

/*
 * Suspend tracing on the current cpu.
 * The argument is ignored.
 *
 * pre: bts_tracer_lock must be locked.
 */
static void bts_trace_suspend_cpu(void *arg)
{
	if (this_tracer)
		ds_suspend_bts(this_tracer);
}

static void bts_trace_stop(struct trace_array *tr)
{
	spin_lock(&bts_tracer_lock);

	on_each_cpu(bts_trace_suspend_cpu, NULL, 1);
	trace_hw_branches_suspended = 1;
156

157
	spin_unlock(&bts_tracer_lock);
158 159 160 161 162 163 164
}

static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb,
				     unsigned long action, void *hcpu)
{
	unsigned int cpu = (unsigned long)hcpu;

165
	spin_lock(&bts_tracer_lock);
166 167 168 169 170 171 172

	if (!trace_hw_branches_enabled)
		goto out;

	switch (action) {
	case CPU_ONLINE:
	case CPU_DOWN_FAILED:
173 174 175 176 177
		smp_call_function_single(cpu, bts_trace_init_cpu, NULL, 1);

		if (trace_hw_branches_suspended)
			smp_call_function_single(cpu, bts_trace_suspend_cpu,
						 NULL, 1);
178 179
		break;
	case CPU_DOWN_PREPARE:
180
		smp_call_function_single(cpu, bts_trace_release_cpu, NULL, 1);
181 182 183 184
		break;
	}

 out:
185
	spin_unlock(&bts_tracer_lock);
186
	return NOTIFY_DONE;
187 188
}

189 190 191 192
static struct notifier_block bts_hotcpu_notifier __cpuinitdata = {
	.notifier_call = bts_hotcpu_handler
};

193 194
static void bts_trace_print_header(struct seq_file *m)
{
195
	seq_puts(m, "# CPU#        TO  <-  FROM\n");
196 197 198 199
}

static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
{
I
Ingo Molnar 已提交
200
	unsigned long symflags = TRACE_ITER_SYM_OFFSET;
201 202
	struct trace_entry *entry = iter->ent;
	struct trace_seq *seq = &iter->seq;
203
	struct hw_branch_entry *it;
204 205 206

	trace_assign_type(it, entry);

207
	if (entry->type == TRACE_HW_BRANCHES) {
208
		if (trace_seq_printf(seq, "%4d  ", iter->cpu) &&
209 210 211
		    seq_print_ip_sym(seq, it->to, symflags) &&
		    trace_seq_printf(seq, "\t  <-  ") &&
		    seq_print_ip_sym(seq, it->from, symflags) &&
212 213 214
		    trace_seq_printf(seq, "\n"))
			return TRACE_TYPE_HANDLED;
		return TRACE_TYPE_PARTIAL_LINE;;
215 216 217 218
	}
	return TRACE_TYPE_UNHANDLED;
}

219
void trace_hw_branch(u64 from, u64 to)
220
{
221
	struct trace_array *tr = hw_branch_trace;
222
	struct ring_buffer_event *event;
223
	struct hw_branch_entry *entry;
224
	unsigned long irq1;
225
	int cpu;
226

227 228 229 230
	if (unlikely(!tr))
		return;

	if (unlikely(!trace_hw_branches_enabled))
231
		return;
232 233 234 235 236 237

	local_irq_save(irq1);
	cpu = raw_smp_processor_id();
	if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
		goto out;

238 239
	event = trace_buffer_lock_reserve(tr, TRACE_HW_BRANCHES,
					  sizeof(*entry), 0, 0);
240 241
	if (!event)
		goto out;
242 243
	entry	= ring_buffer_event_data(event);
	tracing_generic_entry_update(&entry->ent, 0, from);
244
	entry->ent.type = TRACE_HW_BRANCHES;
245 246
	entry->from = from;
	entry->to   = to;
247
	trace_buffer_unlock_commit(tr, event, 0, 0);
248 249 250 251

 out:
	atomic_dec(&tr->data[cpu]->disabled);
	local_irq_restore(irq1);
252 253
}

254
static void trace_bts_at(const struct bts_trace *trace, void *at)
255
{
256 257
	struct bts_struct bts;
	int err = 0;
258

259 260
	WARN_ON_ONCE(!trace->read);
	if (!trace->read)
261 262
		return;

263 264 265
	err = trace->read(this_tracer, at, &bts);
	if (err < 0)
		return;
266

267 268
	switch (bts.qualifier) {
	case BTS_BRANCH:
269
		trace_hw_branch(bts.variant.lbr.from, bts.variant.lbr.to);
270 271
		break;
	}
272 273
}

274 275 276
/*
 * Collect the trace on the current cpu and write it into the ftrace buffer.
 *
277
 * pre: bts_tracer_lock must be locked
278
 */
279 280
static void trace_bts_cpu(void *arg)
{
281
	struct trace_array *tr = (struct trace_array *)arg;
282 283
	const struct bts_trace *trace;
	unsigned char *at;
284

285
	if (unlikely(!tr))
286 287
		return;

288 289 290
	if (unlikely(atomic_read(&tr->data[raw_smp_processor_id()]->disabled)))
		return;

291 292 293
	if (unlikely(!this_tracer))
		return;

294 295 296
	ds_suspend_bts(this_tracer);
	trace = ds_read_bts(this_tracer);
	if (!trace)
297 298
		goto out;

299 300
	for (at = trace->ds.top; (void *)at < trace->ds.end;
	     at += trace->ds.size)
301
		trace_bts_at(trace, at);
302

303 304
	for (at = trace->ds.begin; (void *)at < trace->ds.top;
	     at += trace->ds.size)
305
		trace_bts_at(trace, at);
306 307

out:
308
	ds_resume_bts(this_tracer);
309 310 311 312
}

static void trace_bts_prepare(struct trace_iterator *iter)
{
313
	spin_lock(&bts_tracer_lock);
314 315

	on_each_cpu(trace_bts_cpu, iter->tr, 1);
316

317
	spin_unlock(&bts_tracer_lock);
318 319
}

320 321 322 323 324
static void trace_bts_close(struct trace_iterator *iter)
{
	tracing_reset_online_cpus(iter->tr);
}

325 326
void trace_hw_branch_oops(void)
{
327
	spin_lock(&bts_tracer_lock);
328

329 330
	if (trace_hw_branches_enabled)
		trace_bts_cpu(hw_branch_trace);
331

332
	spin_unlock(&bts_tracer_lock);
333 334
}

335 336
struct tracer bts_tracer __read_mostly =
{
337
	.name		= "hw-branch-tracer",
338
	.init		= bts_trace_init,
339
	.reset		= bts_trace_reset,
340 341 342 343
	.print_header	= bts_trace_print_header,
	.print_line	= bts_trace_print_line,
	.start		= bts_trace_start,
	.stop		= bts_trace_stop,
344
	.open		= trace_bts_prepare,
345 346 347 348
	.close		= trace_bts_close,
#ifdef CONFIG_FTRACE_SELFTEST
	.selftest	= trace_selftest_startup_hw_branches,
#endif /* CONFIG_FTRACE_SELFTEST */
349 350 351 352
};

__init static int init_bts_trace(void)
{
353
	register_hotcpu_notifier(&bts_hotcpu_notifier);
354 355 356
	return register_tracer(&bts_tracer);
}
device_initcall(init_bts_trace);