trace.c 136.6 KB
Newer Older
1 2 3
/*
 * ring buffer based function tracer
 *
4
 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 6 7 8 9 10 11
 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
 *
 * Originally taken from the RT patch by:
 *    Arnaldo Carvalho de Melo <acme@redhat.com>
 *
 * Based on code from the latency_tracer, that is:
 *  Copyright (C) 2004-2006 Ingo Molnar
12
 *  Copyright (C) 2004 Nadia Yvette Chambers
13
 */
14
#include <linux/ring_buffer.h>
15
#include <generated/utsrelease.h>
16 17
#include <linux/stacktrace.h>
#include <linux/writeback.h>
18 19
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
20
#include <linux/notifier.h>
21
#include <linux/irqflags.h>
22
#include <linux/debugfs.h>
23
#include <linux/pagemap.h>
24 25 26
#include <linux/hardirq.h>
#include <linux/linkage.h>
#include <linux/uaccess.h>
27
#include <linux/kprobes.h>
28 29 30
#include <linux/ftrace.h>
#include <linux/module.h>
#include <linux/percpu.h>
31
#include <linux/splice.h>
32
#include <linux/kdebug.h>
33
#include <linux/string.h>
34
#include <linux/rwsem.h>
35
#include <linux/slab.h>
36 37
#include <linux/ctype.h>
#include <linux/init.h>
38
#include <linux/poll.h>
39
#include <linux/nmi.h>
40
#include <linux/fs.h>
41
#include <linux/sched/rt.h>
I
Ingo Molnar 已提交
42

43
#include "trace.h"
44
#include "trace_output.h"
45

46 47 48 49
/*
 * On boot up, the ring buffer is set to the minimum size, so that
 * we do not waste memory on systems that are not using tracing.
 */
50
int ring_buffer_expanded;
51

52 53
/*
 * We need to change this state when a selftest is running.
54 55
 * A selftest will lurk into the ring-buffer to count the
 * entries inserted during the selftest although some concurrent
56
 * insertions into the ring-buffer such as trace_printk could occurred
57 58
 * at the same time, giving false positive or negative results.
 */
59
static bool __read_mostly tracing_selftest_running;
60

61 62 63
/*
 * If a tracer is running, we do not want to run SELFTEST.
 */
64
bool __read_mostly tracing_selftest_disabled;
65

66 67 68 69 70 71 72 73 74 75 76 77 78 79
/* For tracers that don't implement custom flags */
static struct tracer_opt dummy_tracer_opt[] = {
	{ }
};

static struct tracer_flags dummy_tracer_flags = {
	.val = 0,
	.opts = dummy_tracer_opt
};

static int dummy_set_flag(u32 old_flags, u32 bit, int set)
{
	return 0;
}
80

81 82 83 84 85 86 87
/*
 * To prevent the comm cache from being overwritten when no
 * tracing is active, only save the comm when a trace event
 * occurred.
 */
static DEFINE_PER_CPU(bool, trace_cmdline_save);

88 89 90 91 92 93
/*
 * Kill all tracing for good (never come back).
 * It is initialized to 1 but will turn to zero if the initialization
 * of the tracer is successful. But that is the only place that sets
 * this back to zero.
 */
94
static int tracing_disabled = 1;
95

96
DEFINE_PER_CPU(int, ftrace_cpu_disabled);
97

98
cpumask_var_t __read_mostly	tracing_buffer_mask;
99

100 101 102 103 104 105 106 107 108 109 110
/*
 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
 *
 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
 * is set, then ftrace_dump is called. This will output the contents
 * of the ftrace buffers to the console.  This is very useful for
 * capturing traces that lead to crashes and outputing it to a
 * serial console.
 *
 * It is default off, but you can enable it with either specifying
 * "ftrace_dump_on_oops" in the kernel command line, or setting
111 112 113
 * /proc/sys/kernel/ftrace_dump_on_oops
 * Set 1 if you want to dump buffers of all CPUs
 * Set 2 if you want to dump the buffer of the CPU that triggered oops
114
 */
115 116

enum ftrace_dump_mode ftrace_dump_on_oops;
117

118 119
static int tracing_set_tracer(const char *buf);

L
Li Zefan 已提交
120 121
#define MAX_TRACER_SIZE		100
static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
122
static char *default_bootup_tracer;
123

124
static int __init set_cmdline_ftrace(char *str)
125
{
L
Li Zefan 已提交
126
	strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
127
	default_bootup_tracer = bootup_tracer_buf;
128 129
	/* We are using ftrace early, expand it */
	ring_buffer_expanded = 1;
130 131
	return 1;
}
132
__setup("ftrace=", set_cmdline_ftrace);
133

134 135
static int __init set_ftrace_dump_on_oops(char *str)
{
136 137 138 139 140 141 142 143 144 145 146
	if (*str++ != '=' || !*str) {
		ftrace_dump_on_oops = DUMP_ALL;
		return 1;
	}

	if (!strcmp("orig_cpu", str)) {
		ftrace_dump_on_oops = DUMP_ORIG;
                return 1;
        }

        return 0;
147 148
}
__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
S
Steven Rostedt 已提交
149

150 151 152 153 154 155 156 157 158 159 160 161

static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
static char *trace_boot_options __initdata;

static int __init set_trace_boot_options(char *str)
{
	strncpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
	trace_boot_options = trace_boot_options_buf;
	return 0;
}
__setup("trace_options=", set_trace_boot_options);

162
unsigned long long ns2usecs(cycle_t nsec)
163 164 165 166 167 168
{
	nsec += 500;
	do_div(nsec, 1000);
	return nsec;
}

S
Steven Rostedt 已提交
169 170 171 172 173 174 175 176 177 178 179 180
/*
 * The global_trace is the descriptor that holds the tracing
 * buffers for the live tracing. For each CPU, it contains
 * a link list of pages that will store trace entries. The
 * page descriptor of the pages in the memory is used to hold
 * the link list by linking the lru item in the page descriptor
 * to each of the pages in the buffer per CPU.
 *
 * For each active CPU there is a data field that holds the
 * pages for the buffer for that CPU. Each CPU has the same number
 * of pages allocated for its buffer.
 */
181 182
static struct trace_array	global_trace;

183 184
LIST_HEAD(ftrace_trace_arrays);

185 186
int filter_current_check_discard(struct ring_buffer *buffer,
				 struct ftrace_event_call *call, void *rec,
187 188
				 struct ring_buffer_event *event)
{
189
	return filter_check_discard(call, rec, buffer, event);
190
}
191
EXPORT_SYMBOL_GPL(filter_current_check_discard);
192

193 194 195 196 197
cycle_t ftrace_now(int cpu)
{
	u64 ts;

	/* Early boot up does not have a buffer yet */
198
	if (!global_trace.trace_buffer.buffer)
199 200
		return trace_clock_local();

201 202
	ts = ring_buffer_time_stamp(global_trace.trace_buffer.buffer, cpu);
	ring_buffer_normalize_time_stamp(global_trace.trace_buffer.buffer, cpu, &ts);
203 204 205

	return ts;
}
206

207 208
int tracing_is_enabled(void)
{
209
	return tracing_is_on();
210 211
}

S
Steven Rostedt 已提交
212
/*
213 214 215
 * trace_buf_size is the size in bytes that is allocated
 * for a buffer. Note, the number of bytes is always rounded
 * to page size.
216 217 218 219 220
 *
 * This number is purposely set to a low number of 16384.
 * If the dump on oops happens, it will be much appreciated
 * to not have to wait for all that output. Anyway this can be
 * boot time and run time configurable.
S
Steven Rostedt 已提交
221
 */
222
#define TRACE_BUF_SIZE_DEFAULT	1441792UL /* 16384 * 88 (sizeof(entry)) */
223

224
static unsigned long		trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
225

S
Steven Rostedt 已提交
226
/* trace_types holds a link list of available tracers. */
227
static struct tracer		*trace_types __read_mostly;
S
Steven Rostedt 已提交
228 229 230 231

/*
 * trace_types_lock is used to protect the trace_types list.
 */
232
static DEFINE_MUTEX(trace_types_lock);
S
Steven Rostedt 已提交
233

234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
/*
 * serialize the access of the ring buffer
 *
 * ring buffer serializes readers, but it is low level protection.
 * The validity of the events (which returns by ring_buffer_peek() ..etc)
 * are not protected by ring buffer.
 *
 * The content of events may become garbage if we allow other process consumes
 * these events concurrently:
 *   A) the page of the consumed events may become a normal page
 *      (not reader page) in ring buffer, and this page will be rewrited
 *      by events producer.
 *   B) The page of the consumed events may become a page for splice_read,
 *      and this page will be returned to system.
 *
 * These primitives allow multi process access to different cpu ring buffer
 * concurrently.
 *
 * These primitives don't distinguish read-only and read-consume access.
 * Multi read-only access are also serialized.
 */

#ifdef CONFIG_SMP
static DECLARE_RWSEM(all_cpu_access_lock);
static DEFINE_PER_CPU(struct mutex, cpu_access_lock);

static inline void trace_access_lock(int cpu)
{
262
	if (cpu == RING_BUFFER_ALL_CPUS) {
263 264 265 266 267
		/* gain it for accessing the whole ring buffer. */
		down_write(&all_cpu_access_lock);
	} else {
		/* gain it for accessing a cpu ring buffer. */

268
		/* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
269 270 271 272 273 274 275 276 277
		down_read(&all_cpu_access_lock);

		/* Secondly block other access to this @cpu ring buffer. */
		mutex_lock(&per_cpu(cpu_access_lock, cpu));
	}
}

static inline void trace_access_unlock(int cpu)
{
278
	if (cpu == RING_BUFFER_ALL_CPUS) {
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
		up_write(&all_cpu_access_lock);
	} else {
		mutex_unlock(&per_cpu(cpu_access_lock, cpu));
		up_read(&all_cpu_access_lock);
	}
}

static inline void trace_access_lock_init(void)
{
	int cpu;

	for_each_possible_cpu(cpu)
		mutex_init(&per_cpu(cpu_access_lock, cpu));
}

#else

static DEFINE_MUTEX(access_lock);

static inline void trace_access_lock(int cpu)
{
	(void)cpu;
	mutex_lock(&access_lock);
}

static inline void trace_access_unlock(int cpu)
{
	(void)cpu;
	mutex_unlock(&access_lock);
}

static inline void trace_access_lock_init(void)
{
}

#endif

316
/* trace_flags holds trace_options default values */
317
unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
318
	TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
319
	TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
320
	TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS;
321

322 323 324 325 326 327 328 329
/**
 * tracing_on - enable tracing buffers
 *
 * This function enables tracing buffers that may have been
 * disabled with tracing_off.
 */
void tracing_on(void)
{
330 331
	if (global_trace.trace_buffer.buffer)
		ring_buffer_record_on(global_trace.trace_buffer.buffer);
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
	/*
	 * This flag is only looked at when buffers haven't been
	 * allocated yet. We don't really care about the race
	 * between setting this flag and actually turning
	 * on the buffer.
	 */
	global_trace.buffer_disabled = 0;
}
EXPORT_SYMBOL_GPL(tracing_on);

/**
 * tracing_off - turn off tracing buffers
 *
 * This function stops the tracing buffers from recording data.
 * It does not disable any overhead the tracers themselves may
 * be causing. This function simply causes all recording to
 * the ring buffers to fail.
 */
void tracing_off(void)
{
352 353
	if (global_trace.trace_buffer.buffer)
		ring_buffer_record_off(global_trace.trace_buffer.buffer);
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
	/*
	 * This flag is only looked at when buffers haven't been
	 * allocated yet. We don't really care about the race
	 * between setting this flag and actually turning
	 * on the buffer.
	 */
	global_trace.buffer_disabled = 1;
}
EXPORT_SYMBOL_GPL(tracing_off);

/**
 * tracing_is_on - show state of ring buffers enabled
 */
int tracing_is_on(void)
{
369 370
	if (global_trace.trace_buffer.buffer)
		return ring_buffer_record_is_on(global_trace.trace_buffer.buffer);
371 372 373 374
	return !global_trace.buffer_disabled;
}
EXPORT_SYMBOL_GPL(tracing_is_on);

375
static int __init set_buf_size(char *str)
376
{
377
	unsigned long buf_size;
378

379 380
	if (!str)
		return 0;
381
	buf_size = memparse(str, &str);
382
	/* nr_entries can not be zero */
383
	if (buf_size == 0)
384
		return 0;
385
	trace_buf_size = buf_size;
386 387
	return 1;
}
388
__setup("trace_buf_size=", set_buf_size);
389

390 391
static int __init set_tracing_thresh(char *str)
{
392
	unsigned long threshold;
393 394 395 396
	int ret;

	if (!str)
		return 0;
397
	ret = kstrtoul(str, 0, &threshold);
398 399
	if (ret < 0)
		return 0;
400
	tracing_thresh = threshold * 1000;
401 402 403 404
	return 1;
}
__setup("tracing_thresh=", set_tracing_thresh);

S
Steven Rostedt 已提交
405 406 407 408 409
unsigned long nsecs_to_usecs(unsigned long nsecs)
{
	return nsecs / 1000;
}

S
Steven Rostedt 已提交
410
/* These must match the bit postions in trace_iterator_flags */
411 412 413 414 415
static const char *trace_options[] = {
	"print-parent",
	"sym-offset",
	"sym-addr",
	"verbose",
I
Ingo Molnar 已提交
416
	"raw",
417
	"hex",
I
Ingo Molnar 已提交
418
	"bin",
419
	"block",
I
Ingo Molnar 已提交
420
	"stacktrace",
421
	"trace_printk",
422
	"ftrace_preempt",
423
	"branch",
424
	"annotate",
425
	"userstacktrace",
426
	"sym-userobj",
427
	"printk-msg-only",
428
	"context-info",
429
	"latency-format",
430
	"sleep-time",
431
	"graph-time",
432
	"record-cmd",
433
	"overwrite",
434
	"disable_on_free",
435
	"irq-info",
436
	"markers",
437 438 439
	NULL
};

440 441 442
static struct {
	u64 (*func)(void);
	const char *name;
443
	int in_ns;		/* is this clock in nanoseconds? */
444
} trace_clocks[] = {
445 446 447
	{ trace_clock_local,	"local",	1 },
	{ trace_clock_global,	"global",	1 },
	{ trace_clock_counter,	"counter",	0 },
D
David Sharp 已提交
448
	ARCH_TRACE_CLOCKS
449 450 451 452
};

int trace_clock_id;

453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529
/*
 * trace_parser_get_init - gets the buffer for trace parser
 */
int trace_parser_get_init(struct trace_parser *parser, int size)
{
	memset(parser, 0, sizeof(*parser));

	parser->buffer = kmalloc(size, GFP_KERNEL);
	if (!parser->buffer)
		return 1;

	parser->size = size;
	return 0;
}

/*
 * trace_parser_put - frees the buffer for trace parser
 */
void trace_parser_put(struct trace_parser *parser)
{
	kfree(parser->buffer);
}

/*
 * trace_get_user - reads the user input string separated by  space
 * (matched by isspace(ch))
 *
 * For each string found the 'struct trace_parser' is updated,
 * and the function returns.
 *
 * Returns number of bytes read.
 *
 * See kernel/trace/trace.h for 'struct trace_parser' details.
 */
int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
	size_t cnt, loff_t *ppos)
{
	char ch;
	size_t read = 0;
	ssize_t ret;

	if (!*ppos)
		trace_parser_clear(parser);

	ret = get_user(ch, ubuf++);
	if (ret)
		goto out;

	read++;
	cnt--;

	/*
	 * The parser is not finished with the last write,
	 * continue reading the user input without skipping spaces.
	 */
	if (!parser->cont) {
		/* skip white space */
		while (cnt && isspace(ch)) {
			ret = get_user(ch, ubuf++);
			if (ret)
				goto out;
			read++;
			cnt--;
		}

		/* only spaces were written */
		if (isspace(ch)) {
			*ppos += read;
			ret = read;
			goto out;
		}

		parser->idx = 0;
	}

	/* read the non-space input */
	while (cnt && !isspace(ch)) {
530
		if (parser->idx < parser->size - 1)
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
			parser->buffer[parser->idx++] = ch;
		else {
			ret = -EINVAL;
			goto out;
		}
		ret = get_user(ch, ubuf++);
		if (ret)
			goto out;
		read++;
		cnt--;
	}

	/* We either got finished input or we have to wait for another call. */
	if (isspace(ch)) {
		parser->buffer[parser->idx] = 0;
		parser->cont = false;
	} else {
		parser->cont = true;
		parser->buffer[parser->idx++] = ch;
	}

	*ppos += read;
	ret = read;

out:
	return ret;
}

559 560 561 562 563
ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
{
	int len;
	int ret;

564 565 566
	if (!cnt)
		return 0;

567 568 569 570 571 572 573
	if (s->len <= s->readpos)
		return -EBUSY;

	len = s->len - s->readpos;
	if (cnt > len)
		cnt = len;
	ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
574
	if (ret == cnt)
575 576
		return -EFAULT;

577 578
	cnt -= ret;

579
	s->readpos += cnt;
580
	return cnt;
S
Steven Rostedt 已提交
581 582
}

583
static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
584 585 586 587 588 589 590 591 592
{
	int len;

	if (s->len <= s->readpos)
		return -EBUSY;

	len = s->len - s->readpos;
	if (cnt > len)
		cnt = len;
593
	memcpy(buf, s->buffer + s->readpos, cnt);
594

595
	s->readpos += cnt;
596 597 598
	return cnt;
}

599 600 601 602 603 604
/*
 * ftrace_max_lock is used to protect the swapping of buffers
 * when taking a max snapshot. The buffers themselves are
 * protected by per_cpu spinlocks. But the action of the swap
 * needs its own lock.
 *
605
 * This is defined as a arch_spinlock_t in order to help
606 607 608 609 610 611
 * with performance when lockdep debugging is enabled.
 *
 * It is also used in other places outside the update_max_tr
 * so it needs to be defined outside of the
 * CONFIG_TRACER_MAX_TRACE.
 */
612
static arch_spinlock_t ftrace_max_lock =
613
	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
614

615 616
unsigned long __read_mostly	tracing_thresh;

617 618 619 620 621 622 623 624 625 626 627
#ifdef CONFIG_TRACER_MAX_TRACE
unsigned long __read_mostly	tracing_max_latency;

/*
 * Copy the new maximum trace into the separate maximum-trace
 * structure. (this way the maximum trace is permanently saved,
 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
 */
static void
__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
628 629 630 631
	struct trace_buffer *trace_buf = &tr->trace_buffer;
	struct trace_buffer *max_buf = &tr->max_buffer;
	struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
	struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
632

633 634
	max_buf->cpu = cpu;
	max_buf->time_start = data->preempt_timestamp;
635

636 637 638
	max_data->saved_latency = tracing_max_latency;
	max_data->critical_start = data->critical_start;
	max_data->critical_end = data->critical_end;
639

640
	memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
641 642 643 644 645
	max_data->pid = tsk->pid;
	max_data->uid = task_uid(tsk);
	max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
	max_data->policy = tsk->policy;
	max_data->rt_priority = tsk->rt_priority;
646 647 648 649 650

	/* record this tasks comm */
	tracing_record_cmdline(tsk);
}

S
Steven Rostedt 已提交
651 652 653 654 655 656 657 658 659
/**
 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
 * @tr: tracer
 * @tsk: the task with the latency
 * @cpu: The cpu that initiated the trace.
 *
 * Flip the buffers between the @tr and the max_tr and record information
 * about which task was the cause of this latency.
 */
I
Ingo Molnar 已提交
660
void
661 662
update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
663
	struct ring_buffer *buf;
664

665
	if (tr->stop_count)
666 667
		return;

668
	WARN_ON_ONCE(!irqs_disabled());
669

670
	if (!tr->current_trace->allocated_snapshot) {
671
		/* Only the nop tracer should hit this when disabling */
672
		WARN_ON_ONCE(tr->current_trace != &nop_trace);
673
		return;
674
	}
675

676
	arch_spin_lock(&ftrace_max_lock);
677

678 679 680
	buf = tr->trace_buffer.buffer;
	tr->trace_buffer.buffer = tr->max_buffer.buffer;
	tr->max_buffer.buffer = buf;
681

682
	__update_max_tr(tr, tsk, cpu);
683
	arch_spin_unlock(&ftrace_max_lock);
684 685 686 687 688 689 690
}

/**
 * update_max_tr_single - only copy one trace over, and reset the rest
 * @tr - tracer
 * @tsk - task with the latency
 * @cpu - the cpu of the buffer to copy.
S
Steven Rostedt 已提交
691 692
 *
 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
693
 */
I
Ingo Molnar 已提交
694
void
695 696
update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
697
	int ret;
698

699
	if (tr->stop_count)
700 701
		return;

702
	WARN_ON_ONCE(!irqs_disabled());
703
	if (WARN_ON_ONCE(!tr->current_trace->allocated_snapshot))
704 705
		return;

706
	arch_spin_lock(&ftrace_max_lock);
707

708
	ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
709

710 711 712 713 714 715 716
	if (ret == -EBUSY) {
		/*
		 * We failed to swap the buffer due to a commit taking
		 * place on this CPU. We fail to record, but we reset
		 * the max trace buffer (no one writes directly to it)
		 * and flag that it failed.
		 */
717
		trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
718 719 720 721
			"Failed to swap buffers due to commit in progress\n");
	}

	WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
722 723

	__update_max_tr(tr, tsk, cpu);
724
	arch_spin_unlock(&ftrace_max_lock);
725
}
726
#endif /* CONFIG_TRACER_MAX_TRACE */
727

728 729
static void default_wait_pipe(struct trace_iterator *iter)
{
730 731 732
	/* Iterators are static, they should be filled or empty */
	if (trace_buffer_iter(iter, iter->cpu_file))
		return;
733

734
	ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
735 736
}

S
Steven Rostedt 已提交
737 738 739 740 741 742
/**
 * register_tracer - register a tracer with the ftrace system.
 * @type - the plugin for the tracer
 *
 * Register a new plugin tracer.
 */
743 744 745 746 747 748 749 750 751 752
int register_tracer(struct tracer *type)
{
	struct tracer *t;
	int ret = 0;

	if (!type->name) {
		pr_info("Tracer must have a name\n");
		return -1;
	}

753
	if (strlen(type->name) >= MAX_TRACER_SIZE) {
L
Li Zefan 已提交
754 755 756 757
		pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
		return -1;
	}

758
	mutex_lock(&trace_types_lock);
I
Ingo Molnar 已提交
759

760 761
	tracing_selftest_running = true;

762 763 764
	for (t = trace_types; t; t = t->next) {
		if (strcmp(type->name, t->name) == 0) {
			/* already found */
L
Li Zefan 已提交
765
			pr_info("Tracer %s already registered\n",
766 767 768 769 770 771
				type->name);
			ret = -1;
			goto out;
		}
	}

772 773 774 775 776 777 778
	if (!type->set_flag)
		type->set_flag = &dummy_set_flag;
	if (!type->flags)
		type->flags = &dummy_tracer_flags;
	else
		if (!type->flags->opts)
			type->flags->opts = dummy_tracer_opt;
779 780 781
	if (!type->wait_pipe)
		type->wait_pipe = default_wait_pipe;

782

S
Steven Rostedt 已提交
783
#ifdef CONFIG_FTRACE_STARTUP_TEST
784
	if (type->selftest && !tracing_selftest_disabled) {
S
Steven Rostedt 已提交
785
		struct trace_array *tr = &global_trace;
786
		struct tracer *saved_tracer = tr->current_trace;
787

S
Steven Rostedt 已提交
788 789 790 791 792 793 794
		/*
		 * Run a selftest on this tracer.
		 * Here we reset the trace buffer, and set the current
		 * tracer to be this tracer. The tracer can then run some
		 * internal tracing to verify that everything is in order.
		 * If we fail, we do not register this tracer.
		 */
795
		tracing_reset_online_cpus(&tr->trace_buffer);
I
Ingo Molnar 已提交
796

797
		tr->current_trace = type;
798

799
#ifdef CONFIG_TRACER_MAX_TRACE
800 801 802
		if (type->use_max_tr) {
			/* If we expanded the buffers, make sure the max is expanded too */
			if (ring_buffer_expanded)
803
				ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
804 805 806
						   RING_BUFFER_ALL_CPUS);
			type->allocated_snapshot = true;
		}
807
#endif
808

S
Steven Rostedt 已提交
809 810 811 812
		/* the test is responsible for initializing and enabling */
		pr_info("Testing tracer %s: ", type->name);
		ret = type->selftest(type, tr);
		/* the test is responsible for resetting too */
813
		tr->current_trace = saved_tracer;
S
Steven Rostedt 已提交
814 815
		if (ret) {
			printk(KERN_CONT "FAILED!\n");
816 817
			/* Add the warning after printing 'FAILED' */
			WARN_ON(1);
S
Steven Rostedt 已提交
818 819
			goto out;
		}
S
Steven Rostedt 已提交
820
		/* Only reset on passing, to avoid touching corrupted buffers */
821
		tracing_reset_online_cpus(&tr->trace_buffer);
I
Ingo Molnar 已提交
822

823
#ifdef CONFIG_TRACER_MAX_TRACE
824 825 826 827 828
		if (type->use_max_tr) {
			type->allocated_snapshot = false;

			/* Shrink the max buffer again */
			if (ring_buffer_expanded)
829
				ring_buffer_resize(tr->max_buffer.buffer, 1,
830 831
						   RING_BUFFER_ALL_CPUS);
		}
832
#endif
833

S
Steven Rostedt 已提交
834 835 836 837
		printk(KERN_CONT "PASSED\n");
	}
#endif

838 839
	type->next = trace_types;
	trace_types = type;
S
Steven Rostedt 已提交
840

841
 out:
842
	tracing_selftest_running = false;
843 844
	mutex_unlock(&trace_types_lock);

S
Steven Rostedt 已提交
845 846 847
	if (ret || !default_bootup_tracer)
		goto out_unlock;

L
Li Zefan 已提交
848
	if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
S
Steven Rostedt 已提交
849 850 851 852 853 854 855 856
		goto out_unlock;

	printk(KERN_INFO "Starting tracer '%s'\n", type->name);
	/* Do we want this tracer to start on bootup? */
	tracing_set_tracer(type->name);
	default_bootup_tracer = NULL;
	/* disable other selftests, since this will break it. */
	tracing_selftest_disabled = 1;
857
#ifdef CONFIG_FTRACE_STARTUP_TEST
S
Steven Rostedt 已提交
858 859
	printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
	       type->name);
860 861
#endif

S
Steven Rostedt 已提交
862
 out_unlock:
863 864 865
	return ret;
}

866
void tracing_reset(struct trace_buffer *buf, int cpu)
867
{
868
	struct ring_buffer *buffer = buf->buffer;
869

870 871 872
	if (!buffer)
		return;

873 874 875 876
	ring_buffer_record_disable(buffer);

	/* Make sure all commits have finished */
	synchronize_sched();
877
	ring_buffer_reset_cpu(buffer, cpu);
878 879 880 881

	ring_buffer_record_enable(buffer);
}

882
void tracing_reset_online_cpus(struct trace_buffer *buf)
883
{
884
	struct ring_buffer *buffer = buf->buffer;
885 886
	int cpu;

887 888 889
	if (!buffer)
		return;

890 891 892 893 894
	ring_buffer_record_disable(buffer);

	/* Make sure all commits have finished */
	synchronize_sched();

895
	buf->time_start = ftrace_now(buf->cpu);
896 897

	for_each_online_cpu(cpu)
898
		ring_buffer_reset_cpu(buffer, cpu);
899 900

	ring_buffer_record_enable(buffer);
901 902
}

903 904
void tracing_reset_current(int cpu)
{
905
	tracing_reset(&global_trace.trace_buffer, cpu);
906 907
}

908
void tracing_reset_all_online_cpus(void)
909
{
910 911 912 913
	struct trace_array *tr;

	mutex_lock(&trace_types_lock);
	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
914 915 916 917
		tracing_reset_online_cpus(&tr->trace_buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
		tracing_reset_online_cpus(&tr->max_buffer);
#endif
918 919
	}
	mutex_unlock(&trace_types_lock);
920 921
}

922
#define SAVED_CMDLINES 128
923
#define NO_CMDLINE_MAP UINT_MAX
924 925 926 927
static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
static int cmdline_idx;
928
static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
929 930

/* temporary disable recording */
931
static atomic_t trace_record_cmdline_disabled __read_mostly;
932 933 934

static void trace_init_cmdlines(void)
{
935 936
	memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
	memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
937 938 939
	cmdline_idx = 0;
}

940 941
int is_tracing_stopped(void)
{
942
	return global_trace.stop_count;
943 944
}

S
Steven Rostedt 已提交
945 946 947 948 949 950 951 952 953 954 955 956 957 958 959
/**
 * ftrace_off_permanent - disable all ftrace code permanently
 *
 * This should only be called when a serious anomally has
 * been detected.  This will turn off the function tracing,
 * ring buffers, and other tracing utilites. It takes no
 * locks and can be called from any context.
 */
void ftrace_off_permanent(void)
{
	tracing_disabled = 1;
	ftrace_stop();
	tracing_off_permanent();
}

960 961 962 963 964 965 966 967 968 969 970 971 972 973
/**
 * tracing_start - quick start of the tracer
 *
 * If tracing is enabled but was stopped by tracing_stop,
 * this will start the tracer back up.
 */
void tracing_start(void)
{
	struct ring_buffer *buffer;
	unsigned long flags;

	if (tracing_disabled)
		return;

974 975 976
	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
	if (--global_trace.stop_count) {
		if (global_trace.stop_count < 0) {
977 978
			/* Someone screwed up their debugging */
			WARN_ON_ONCE(1);
979
			global_trace.stop_count = 0;
980
		}
981 982 983
		goto out;
	}

984 985
	/* Prevent the buffers from switching */
	arch_spin_lock(&ftrace_max_lock);
986

987
	buffer = global_trace.trace_buffer.buffer;
988 989 990
	if (buffer)
		ring_buffer_record_enable(buffer);

991 992
#ifdef CONFIG_TRACER_MAX_TRACE
	buffer = global_trace.max_buffer.buffer;
993 994
	if (buffer)
		ring_buffer_record_enable(buffer);
995
#endif
996

997 998
	arch_spin_unlock(&ftrace_max_lock);

999 1000
	ftrace_start();
 out:
1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
	raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
}

static void tracing_start_tr(struct trace_array *tr)
{
	struct ring_buffer *buffer;
	unsigned long flags;

	if (tracing_disabled)
		return;

	/* If global, we need to also start the max tracer */
	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
		return tracing_start();

	raw_spin_lock_irqsave(&tr->start_lock, flags);

	if (--tr->stop_count) {
		if (tr->stop_count < 0) {
			/* Someone screwed up their debugging */
			WARN_ON_ONCE(1);
			tr->stop_count = 0;
		}
		goto out;
	}

1027
	buffer = tr->trace_buffer.buffer;
1028 1029 1030 1031 1032
	if (buffer)
		ring_buffer_record_enable(buffer);

 out:
	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046
}

/**
 * tracing_stop - quick stop of the tracer
 *
 * Light weight way to stop tracing. Use in conjunction with
 * tracing_start.
 */
void tracing_stop(void)
{
	struct ring_buffer *buffer;
	unsigned long flags;

	ftrace_stop();
1047 1048
	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
	if (global_trace.stop_count++)
1049 1050
		goto out;

1051 1052 1053
	/* Prevent the buffers from switching */
	arch_spin_lock(&ftrace_max_lock);

1054
	buffer = global_trace.trace_buffer.buffer;
1055 1056 1057
	if (buffer)
		ring_buffer_record_disable(buffer);

1058 1059
#ifdef CONFIG_TRACER_MAX_TRACE
	buffer = global_trace.max_buffer.buffer;
1060 1061
	if (buffer)
		ring_buffer_record_disable(buffer);
1062
#endif
1063

1064 1065
	arch_spin_unlock(&ftrace_max_lock);

1066
 out:
1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082
	raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
}

static void tracing_stop_tr(struct trace_array *tr)
{
	struct ring_buffer *buffer;
	unsigned long flags;

	/* If global, we need to also stop the max tracer */
	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
		return tracing_stop();

	raw_spin_lock_irqsave(&tr->start_lock, flags);
	if (tr->stop_count++)
		goto out;

1083
	buffer = tr->trace_buffer.buffer;
1084 1085 1086 1087 1088
	if (buffer)
		ring_buffer_record_disable(buffer);

 out:
	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1089 1090
}

I
Ingo Molnar 已提交
1091
void trace_stop_cmdline_recording(void);
1092

I
Ingo Molnar 已提交
1093
static void trace_save_cmdline(struct task_struct *tsk)
1094
{
1095
	unsigned pid, idx;
1096 1097 1098 1099 1100 1101 1102 1103 1104 1105

	if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
		return;

	/*
	 * It's not the end of the world if we don't get
	 * the lock, but we also don't want to spin
	 * nor do we want to disable interrupts,
	 * so if we miss here, then better luck next time.
	 */
1106
	if (!arch_spin_trylock(&trace_cmdline_lock))
1107 1108 1109
		return;

	idx = map_pid_to_cmdline[tsk->pid];
1110
	if (idx == NO_CMDLINE_MAP) {
1111 1112
		idx = (cmdline_idx + 1) % SAVED_CMDLINES;

1113 1114 1115 1116 1117 1118 1119 1120 1121
		/*
		 * Check whether the cmdline buffer at idx has a pid
		 * mapped. We are going to overwrite that entry so we
		 * need to clear the map_pid_to_cmdline. Otherwise we
		 * would read the new comm for the old pid.
		 */
		pid = map_cmdline_to_pid[idx];
		if (pid != NO_CMDLINE_MAP)
			map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1122

1123
		map_cmdline_to_pid[idx] = tsk->pid;
1124 1125 1126 1127 1128 1129 1130
		map_pid_to_cmdline[tsk->pid] = idx;

		cmdline_idx = idx;
	}

	memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);

1131
	arch_spin_unlock(&trace_cmdline_lock);
1132 1133
}

1134
void trace_find_cmdline(int pid, char comm[])
1135 1136 1137
{
	unsigned map;

1138 1139 1140 1141
	if (!pid) {
		strcpy(comm, "<idle>");
		return;
	}
1142

1143 1144 1145 1146 1147
	if (WARN_ON_ONCE(pid < 0)) {
		strcpy(comm, "<XXX>");
		return;
	}

1148 1149 1150 1151
	if (pid > PID_MAX_DEFAULT) {
		strcpy(comm, "<...>");
		return;
	}
1152

1153
	preempt_disable();
1154
	arch_spin_lock(&trace_cmdline_lock);
1155
	map = map_pid_to_cmdline[pid];
1156 1157 1158 1159
	if (map != NO_CMDLINE_MAP)
		strcpy(comm, saved_cmdlines[map]);
	else
		strcpy(comm, "<...>");
1160

1161
	arch_spin_unlock(&trace_cmdline_lock);
1162
	preempt_enable();
1163 1164
}

I
Ingo Molnar 已提交
1165
void tracing_record_cmdline(struct task_struct *tsk)
1166
{
1167
	if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1168 1169
		return;

1170 1171 1172 1173 1174
	if (!__this_cpu_read(trace_cmdline_save))
		return;

	__this_cpu_write(trace_cmdline_save, false);

1175 1176 1177
	trace_save_cmdline(tsk);
}

1178
void
1179 1180
tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
			     int pc)
1181 1182 1183
{
	struct task_struct *tsk = current;

1184 1185 1186
	entry->preempt_count		= pc & 0xff;
	entry->pid			= (tsk) ? tsk->pid : 0;
	entry->flags =
1187
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1188
		(irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1189 1190 1191
#else
		TRACE_FLAG_IRQS_NOSUPPORT |
#endif
1192 1193 1194 1195
		((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
		((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
		(need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
}
1196
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1197

1198 1199 1200 1201 1202
struct ring_buffer_event *
trace_buffer_lock_reserve(struct ring_buffer *buffer,
			  int type,
			  unsigned long len,
			  unsigned long flags, int pc)
1203 1204 1205
{
	struct ring_buffer_event *event;

1206
	event = ring_buffer_lock_reserve(buffer, len);
1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
	if (event != NULL) {
		struct trace_entry *ent = ring_buffer_event_data(event);

		tracing_generic_entry_update(ent, flags, pc);
		ent->type = type;
	}

	return event;
}

1217 1218 1219 1220 1221 1222 1223
void
__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
{
	__this_cpu_write(trace_cmdline_save, true);
	ring_buffer_unlock_commit(buffer, event);
}

1224 1225 1226
static inline void
__trace_buffer_unlock_commit(struct ring_buffer *buffer,
			     struct ring_buffer_event *event,
1227
			     unsigned long flags, int pc)
1228
{
1229
	__buffer_unlock_commit(buffer, event);
1230

1231 1232
	ftrace_trace_stack(buffer, flags, 6, pc);
	ftrace_trace_userstack(buffer, flags, pc);
1233 1234
}

1235 1236 1237
void trace_buffer_unlock_commit(struct ring_buffer *buffer,
				struct ring_buffer_event *event,
				unsigned long flags, int pc)
1238
{
1239
	__trace_buffer_unlock_commit(buffer, event, flags, pc);
1240
}
1241
EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1242

1243 1244 1245 1246 1247 1248
struct ring_buffer_event *
trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
			  struct ftrace_event_file *ftrace_file,
			  int type, unsigned long len,
			  unsigned long flags, int pc)
{
1249
	*current_rb = ftrace_file->tr->trace_buffer.buffer;
1250 1251 1252 1253 1254
	return trace_buffer_lock_reserve(*current_rb,
					 type, len, flags, pc);
}
EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);

1255
struct ring_buffer_event *
1256 1257
trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
				  int type, unsigned long len,
1258 1259
				  unsigned long flags, int pc)
{
1260
	*current_rb = global_trace.trace_buffer.buffer;
1261
	return trace_buffer_lock_reserve(*current_rb,
1262 1263
					 type, len, flags, pc);
}
1264
EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1265

1266 1267
void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
					struct ring_buffer_event *event,
1268 1269
					unsigned long flags, int pc)
{
1270
	__trace_buffer_unlock_commit(buffer, event, flags, pc);
1271
}
1272
EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
1273

1274 1275 1276 1277
void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
				     struct ring_buffer_event *event,
				     unsigned long flags, int pc,
				     struct pt_regs *regs)
1278
{
1279
	__buffer_unlock_commit(buffer, event);
1280 1281 1282 1283

	ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
	ftrace_trace_userstack(buffer, flags, pc);
}
1284
EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1285

1286 1287
void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
					 struct ring_buffer_event *event)
1288
{
1289
	ring_buffer_discard_commit(buffer, event);
1290
}
1291
EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1292

I
Ingo Molnar 已提交
1293
void
1294
trace_function(struct trace_array *tr,
1295 1296
	       unsigned long ip, unsigned long parent_ip, unsigned long flags,
	       int pc)
1297
{
1298
	struct ftrace_event_call *call = &event_function;
1299
	struct ring_buffer *buffer = tr->trace_buffer.buffer;
1300
	struct ring_buffer_event *event;
1301
	struct ftrace_entry *entry;
1302

1303
	/* If we are reading the ring buffer, don't trace */
R
Rusty Russell 已提交
1304
	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1305 1306
		return;

1307
	event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1308
					  flags, pc);
1309 1310 1311
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
1312 1313
	entry->ip			= ip;
	entry->parent_ip		= parent_ip;
1314

1315
	if (!filter_check_discard(call, entry, buffer, event))
1316
		__buffer_unlock_commit(buffer, event);
1317 1318
}

I
Ingo Molnar 已提交
1319
void
I
Ingo Molnar 已提交
1320
ftrace(struct trace_array *tr, struct trace_array_cpu *data,
1321 1322
       unsigned long ip, unsigned long parent_ip, unsigned long flags,
       int pc)
I
Ingo Molnar 已提交
1323 1324
{
	if (likely(!atomic_read(&data->disabled)))
1325
		trace_function(tr, ip, parent_ip, flags, pc);
I
Ingo Molnar 已提交
1326 1327
}

1328
#ifdef CONFIG_STACKTRACE
1329 1330 1331 1332 1333 1334 1335 1336 1337

#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
struct ftrace_stack {
	unsigned long		calls[FTRACE_STACK_MAX_ENTRIES];
};

static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
static DEFINE_PER_CPU(int, ftrace_stack_reserve);

1338
static void __ftrace_trace_stack(struct ring_buffer *buffer,
1339
				 unsigned long flags,
1340
				 int skip, int pc, struct pt_regs *regs)
I
Ingo Molnar 已提交
1341
{
1342
	struct ftrace_event_call *call = &event_kernel_stack;
1343
	struct ring_buffer_event *event;
1344
	struct stack_entry *entry;
I
Ingo Molnar 已提交
1345
	struct stack_trace trace;
1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359
	int use_stack;
	int size = FTRACE_STACK_ENTRIES;

	trace.nr_entries	= 0;
	trace.skip		= skip;

	/*
	 * Since events can happen in NMIs there's no safe way to
	 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
	 * or NMI comes in, it will just have to use the default
	 * FTRACE_STACK_SIZE.
	 */
	preempt_disable_notrace();

1360
	use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384
	/*
	 * We don't need any atomic variables, just a barrier.
	 * If an interrupt comes in, we don't care, because it would
	 * have exited and put the counter back to what we want.
	 * We just need a barrier to keep gcc from moving things
	 * around.
	 */
	barrier();
	if (use_stack == 1) {
		trace.entries		= &__get_cpu_var(ftrace_stack).calls[0];
		trace.max_entries	= FTRACE_STACK_MAX_ENTRIES;

		if (regs)
			save_stack_trace_regs(regs, &trace);
		else
			save_stack_trace(&trace);

		if (trace.nr_entries > size)
			size = trace.nr_entries;
	} else
		/* From now on, use_stack is a boolean */
		use_stack = 0;

	size *= sizeof(unsigned long);
I
Ingo Molnar 已提交
1385

1386
	event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1387
					  sizeof(*entry) + size, flags, pc);
1388
	if (!event)
1389 1390
		goto out;
	entry = ring_buffer_event_data(event);
I
Ingo Molnar 已提交
1391

1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406
	memset(&entry->caller, 0, size);

	if (use_stack)
		memcpy(&entry->caller, trace.entries,
		       trace.nr_entries * sizeof(unsigned long));
	else {
		trace.max_entries	= FTRACE_STACK_ENTRIES;
		trace.entries		= entry->caller;
		if (regs)
			save_stack_trace_regs(regs, &trace);
		else
			save_stack_trace(&trace);
	}

	entry->size = trace.nr_entries;
I
Ingo Molnar 已提交
1407

1408
	if (!filter_check_discard(call, entry, buffer, event))
1409
		__buffer_unlock_commit(buffer, event);
1410 1411 1412 1413

 out:
	/* Again, don't let gcc optimize things here */
	barrier();
1414
	__this_cpu_dec(ftrace_stack_reserve);
1415 1416
	preempt_enable_notrace();

I
Ingo Molnar 已提交
1417 1418
}

1419 1420 1421 1422 1423 1424 1425 1426 1427
void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
			     int skip, int pc, struct pt_regs *regs)
{
	if (!(trace_flags & TRACE_ITER_STACKTRACE))
		return;

	__ftrace_trace_stack(buffer, flags, skip, pc, regs);
}

1428 1429
void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
			int skip, int pc)
1430 1431 1432 1433
{
	if (!(trace_flags & TRACE_ITER_STACKTRACE))
		return;

1434
	__ftrace_trace_stack(buffer, flags, skip, pc, NULL);
1435 1436
}

1437 1438
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
		   int pc)
1439
{
1440
	__ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1441 1442
}

S
Steven Rostedt 已提交
1443 1444 1445 1446 1447 1448 1449 1450
/**
 * trace_dump_stack - record a stack back trace in the trace buffer
 */
void trace_dump_stack(void)
{
	unsigned long flags;

	if (tracing_disabled || tracing_selftest_running)
1451
		return;
S
Steven Rostedt 已提交
1452 1453 1454 1455

	local_save_flags(flags);

	/* skipping 3 traces, seems to get us at the caller of this function */
1456 1457
	__ftrace_trace_stack(global_trace.trace_buffer.buffer, flags, 3,
			     preempt_count(), NULL);
S
Steven Rostedt 已提交
1458 1459
}

1460 1461
static DEFINE_PER_CPU(int, user_stack_count);

1462 1463
void
ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1464
{
1465
	struct ftrace_event_call *call = &event_user_stack;
1466
	struct ring_buffer_event *event;
1467 1468 1469 1470 1471 1472
	struct userstack_entry *entry;
	struct stack_trace trace;

	if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
		return;

1473 1474 1475 1476 1477 1478
	/*
	 * NMIs can not handle page faults, even with fix ups.
	 * The save user stack can (and often does) fault.
	 */
	if (unlikely(in_nmi()))
		return;
1479

1480 1481 1482 1483 1484 1485 1486 1487 1488 1489
	/*
	 * prevent recursion, since the user stack tracing may
	 * trigger other kernel events.
	 */
	preempt_disable();
	if (__this_cpu_read(user_stack_count))
		goto out;

	__this_cpu_inc(user_stack_count);

1490
	event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1491
					  sizeof(*entry), flags, pc);
1492
	if (!event)
L
Li Zefan 已提交
1493
		goto out_drop_count;
1494 1495
	entry	= ring_buffer_event_data(event);

1496
	entry->tgid		= current->tgid;
1497 1498 1499 1500 1501 1502 1503 1504
	memset(&entry->caller, 0, sizeof(entry->caller));

	trace.nr_entries	= 0;
	trace.max_entries	= FTRACE_STACK_ENTRIES;
	trace.skip		= 0;
	trace.entries		= entry->caller;

	save_stack_trace_user(&trace);
1505
	if (!filter_check_discard(call, entry, buffer, event))
1506
		__buffer_unlock_commit(buffer, event);
1507

L
Li Zefan 已提交
1508
 out_drop_count:
1509 1510 1511
	__this_cpu_dec(user_stack_count);
 out:
	preempt_enable();
1512 1513
}

1514 1515
#ifdef UNUSED
static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1516
{
1517
	ftrace_trace_userstack(tr, flags, preempt_count());
1518
}
1519
#endif /* UNUSED */
1520

1521 1522
#endif /* CONFIG_STACKTRACE */

1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559
/* created for use with alloc_percpu */
struct trace_buffer_struct {
	char buffer[TRACE_BUF_SIZE];
};

static struct trace_buffer_struct *trace_percpu_buffer;
static struct trace_buffer_struct *trace_percpu_sirq_buffer;
static struct trace_buffer_struct *trace_percpu_irq_buffer;
static struct trace_buffer_struct *trace_percpu_nmi_buffer;

/*
 * The buffer used is dependent on the context. There is a per cpu
 * buffer for normal context, softirq contex, hard irq context and
 * for NMI context. Thise allows for lockless recording.
 *
 * Note, if the buffers failed to be allocated, then this returns NULL
 */
static char *get_trace_buf(void)
{
	struct trace_buffer_struct *percpu_buffer;

	/*
	 * If we have allocated per cpu buffers, then we do not
	 * need to do any locking.
	 */
	if (in_nmi())
		percpu_buffer = trace_percpu_nmi_buffer;
	else if (in_irq())
		percpu_buffer = trace_percpu_irq_buffer;
	else if (in_softirq())
		percpu_buffer = trace_percpu_sirq_buffer;
	else
		percpu_buffer = trace_percpu_buffer;

	if (!percpu_buffer)
		return NULL;

1560
	return this_cpu_ptr(&percpu_buffer->buffer[0]);
1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603
}

static int alloc_percpu_trace_buffer(void)
{
	struct trace_buffer_struct *buffers;
	struct trace_buffer_struct *sirq_buffers;
	struct trace_buffer_struct *irq_buffers;
	struct trace_buffer_struct *nmi_buffers;

	buffers = alloc_percpu(struct trace_buffer_struct);
	if (!buffers)
		goto err_warn;

	sirq_buffers = alloc_percpu(struct trace_buffer_struct);
	if (!sirq_buffers)
		goto err_sirq;

	irq_buffers = alloc_percpu(struct trace_buffer_struct);
	if (!irq_buffers)
		goto err_irq;

	nmi_buffers = alloc_percpu(struct trace_buffer_struct);
	if (!nmi_buffers)
		goto err_nmi;

	trace_percpu_buffer = buffers;
	trace_percpu_sirq_buffer = sirq_buffers;
	trace_percpu_irq_buffer = irq_buffers;
	trace_percpu_nmi_buffer = nmi_buffers;

	return 0;

 err_nmi:
	free_percpu(irq_buffers);
 err_irq:
	free_percpu(sirq_buffers);
 err_sirq:
	free_percpu(buffers);
 err_warn:
	WARN(1, "Could not allocate percpu trace_printk buffer");
	return -ENOMEM;
}

1604 1605
static int buffers_allocated;

1606 1607 1608 1609 1610 1611 1612 1613 1614 1615
void trace_printk_init_buffers(void)
{
	if (buffers_allocated)
		return;

	if (alloc_percpu_trace_buffer())
		return;

	pr_info("ftrace: Allocated trace_printk buffers\n");

1616 1617 1618
	/* Expand the buffers to set size */
	tracing_update_buffers();

1619
	buffers_allocated = 1;
1620 1621 1622 1623 1624 1625 1626

	/*
	 * trace_printk_init_buffers() can be called by modules.
	 * If that happens, then we need to start cmdline recording
	 * directly here. If the global_trace.buffer is already
	 * allocated here, then this was called by module code.
	 */
1627
	if (global_trace.trace_buffer.buffer)
1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647
		tracing_start_cmdline_record();
}

void trace_printk_start_comm(void)
{
	/* Start tracing comms if trace printk is set */
	if (!buffers_allocated)
		return;
	tracing_start_cmdline_record();
}

static void trace_printk_start_stop_comm(int enabled)
{
	if (!buffers_allocated)
		return;

	if (enabled)
		tracing_start_cmdline_record();
	else
		tracing_stop_cmdline_record();
1648 1649
}

1650
/**
1651
 * trace_vbprintk - write binary msg to tracing buffer
1652 1653
 *
 */
1654
int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1655
{
1656
	struct ftrace_event_call *call = &event_bprint;
1657
	struct ring_buffer_event *event;
1658
	struct ring_buffer *buffer;
1659
	struct trace_array *tr = &global_trace;
1660
	struct bprint_entry *entry;
1661
	unsigned long flags;
1662 1663
	char *tbuffer;
	int len = 0, size, pc;
1664 1665 1666 1667 1668 1669 1670 1671

	if (unlikely(tracing_selftest_running || tracing_disabled))
		return 0;

	/* Don't pollute graph traces with trace_vprintk internals */
	pause_graph_tracing();

	pc = preempt_count();
1672
	preempt_disable_notrace();
1673

1674 1675 1676
	tbuffer = get_trace_buf();
	if (!tbuffer) {
		len = 0;
1677
		goto out;
1678
	}
1679

1680
	len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
1681

1682 1683
	if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
		goto out;
1684

1685
	local_save_flags(flags);
1686
	size = sizeof(*entry) + sizeof(u32) * len;
1687
	buffer = tr->trace_buffer.buffer;
1688 1689
	event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
					  flags, pc);
1690
	if (!event)
1691
		goto out;
1692 1693 1694 1695
	entry = ring_buffer_event_data(event);
	entry->ip			= ip;
	entry->fmt			= fmt;

1696
	memcpy(entry->buf, tbuffer, sizeof(u32) * len);
1697
	if (!filter_check_discard(call, entry, buffer, event)) {
1698
		__buffer_unlock_commit(buffer, event);
1699 1700
		ftrace_trace_stack(buffer, flags, 6, pc);
	}
1701 1702

out:
1703
	preempt_enable_notrace();
1704 1705 1706 1707
	unpause_graph_tracing();

	return len;
}
1708 1709
EXPORT_SYMBOL_GPL(trace_vbprintk);

1710 1711 1712
static int
__trace_array_vprintk(struct ring_buffer *buffer,
		      unsigned long ip, const char *fmt, va_list args)
1713
{
1714
	struct ftrace_event_call *call = &event_print;
1715
	struct ring_buffer_event *event;
1716
	int len = 0, size, pc;
1717
	struct print_entry *entry;
1718 1719
	unsigned long flags;
	char *tbuffer;
1720 1721 1722 1723

	if (tracing_disabled || tracing_selftest_running)
		return 0;

1724 1725 1726
	/* Don't pollute graph traces with trace_vprintk internals */
	pause_graph_tracing();

1727 1728 1729
	pc = preempt_count();
	preempt_disable_notrace();

1730 1731 1732 1733

	tbuffer = get_trace_buf();
	if (!tbuffer) {
		len = 0;
1734
		goto out;
1735
	}
1736

1737 1738 1739
	len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
	if (len > TRACE_BUF_SIZE)
		goto out;
1740

1741
	local_save_flags(flags);
1742
	size = sizeof(*entry) + len + 1;
1743
	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
1744
					  flags, pc);
1745
	if (!event)
1746
		goto out;
1747
	entry = ring_buffer_event_data(event);
C
Carsten Emde 已提交
1748
	entry->ip = ip;
1749

1750
	memcpy(&entry->buf, tbuffer, len);
C
Carsten Emde 已提交
1751
	entry->buf[len] = '\0';
1752
	if (!filter_check_discard(call, entry, buffer, event)) {
1753
		__buffer_unlock_commit(buffer, event);
1754
		ftrace_trace_stack(buffer, flags, 6, pc);
1755
	}
1756 1757
 out:
	preempt_enable_notrace();
1758
	unpause_graph_tracing();
1759 1760 1761

	return len;
}
1762

1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798
int trace_array_vprintk(struct trace_array *tr,
			unsigned long ip, const char *fmt, va_list args)
{
	return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
}

int trace_array_printk(struct trace_array *tr,
		       unsigned long ip, const char *fmt, ...)
{
	int ret;
	va_list ap;

	if (!(trace_flags & TRACE_ITER_PRINTK))
		return 0;

	va_start(ap, fmt);
	ret = trace_array_vprintk(tr, ip, fmt, ap);
	va_end(ap);
	return ret;
}

int trace_array_printk_buf(struct ring_buffer *buffer,
			   unsigned long ip, const char *fmt, ...)
{
	int ret;
	va_list ap;

	if (!(trace_flags & TRACE_ITER_PRINTK))
		return 0;

	va_start(ap, fmt);
	ret = __trace_array_vprintk(buffer, ip, fmt, ap);
	va_end(ap);
	return ret;
}

1799 1800
int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
{
S
Steven Rostedt 已提交
1801
	return trace_array_vprintk(&global_trace, ip, fmt, args);
1802
}
1803 1804
EXPORT_SYMBOL_GPL(trace_vprintk);

1805
static void trace_iterator_increment(struct trace_iterator *iter)
S
Steven Rostedt 已提交
1806
{
1807 1808
	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);

S
Steven Rostedt 已提交
1809
	iter->idx++;
1810 1811
	if (buf_iter)
		ring_buffer_read(buf_iter, NULL);
S
Steven Rostedt 已提交
1812 1813
}

I
Ingo Molnar 已提交
1814
static struct trace_entry *
1815 1816
peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
		unsigned long *lost_events)
1817
{
1818
	struct ring_buffer_event *event;
1819
	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
1820

1821 1822 1823
	if (buf_iter)
		event = ring_buffer_iter_peek(buf_iter, ts);
	else
1824
		event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
1825
					 lost_events);
1826

1827 1828 1829 1830 1831 1832
	if (event) {
		iter->ent_size = ring_buffer_event_length(event);
		return ring_buffer_event_data(event);
	}
	iter->ent_size = 0;
	return NULL;
1833
}
1834

1835
static struct trace_entry *
1836 1837
__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
		  unsigned long *missing_events, u64 *ent_ts)
1838
{
1839
	struct ring_buffer *buffer = iter->trace_buffer->buffer;
1840
	struct trace_entry *ent, *next = NULL;
1841
	unsigned long lost_events = 0, next_lost = 0;
1842
	int cpu_file = iter->cpu_file;
1843
	u64 next_ts = 0, ts;
1844
	int next_cpu = -1;
1845
	int next_size = 0;
1846 1847
	int cpu;

1848 1849 1850 1851
	/*
	 * If we are in a per_cpu trace file, don't bother by iterating over
	 * all cpu and peek directly.
	 */
1852
	if (cpu_file > RING_BUFFER_ALL_CPUS) {
1853 1854
		if (ring_buffer_empty_cpu(buffer, cpu_file))
			return NULL;
1855
		ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
1856 1857 1858 1859 1860 1861
		if (ent_cpu)
			*ent_cpu = cpu_file;

		return ent;
	}

1862
	for_each_tracing_cpu(cpu) {
1863

1864 1865
		if (ring_buffer_empty_cpu(buffer, cpu))
			continue;
1866

1867
		ent = peek_next_entry(iter, cpu, &ts, &lost_events);
1868

I
Ingo Molnar 已提交
1869 1870 1871
		/*
		 * Pick the entry with the smallest timestamp:
		 */
1872
		if (ent && (!next || ts < next_ts)) {
1873 1874
			next = ent;
			next_cpu = cpu;
1875
			next_ts = ts;
1876
			next_lost = lost_events;
1877
			next_size = iter->ent_size;
1878 1879 1880
		}
	}

1881 1882
	iter->ent_size = next_size;

1883 1884 1885
	if (ent_cpu)
		*ent_cpu = next_cpu;

1886 1887 1888
	if (ent_ts)
		*ent_ts = next_ts;

1889 1890 1891
	if (missing_events)
		*missing_events = next_lost;

1892 1893 1894
	return next;
}

1895
/* Find the next real entry, without updating the iterator itself */
1896 1897
struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
					  int *ent_cpu, u64 *ent_ts)
1898
{
1899
	return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
1900 1901 1902
}

/* Find the next real entry, and increment the iterator to the next entry */
1903
void *trace_find_next_entry_inc(struct trace_iterator *iter)
1904
{
1905 1906
	iter->ent = __find_next_entry(iter, &iter->cpu,
				      &iter->lost_events, &iter->ts);
1907

1908
	if (iter->ent)
1909
		trace_iterator_increment(iter);
1910

1911
	return iter->ent ? iter : NULL;
1912
}
1913

I
Ingo Molnar 已提交
1914
static void trace_consume(struct trace_iterator *iter)
1915
{
1916
	ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
1917
			    &iter->lost_events);
1918 1919
}

I
Ingo Molnar 已提交
1920
static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1921 1922 1923
{
	struct trace_iterator *iter = m->private;
	int i = (int)*pos;
I
Ingo Molnar 已提交
1924
	void *ent;
1925

1926 1927
	WARN_ON_ONCE(iter->leftover);

1928 1929 1930 1931 1932 1933 1934
	(*pos)++;

	/* can't go backwards */
	if (iter->idx > i)
		return NULL;

	if (iter->idx < 0)
1935
		ent = trace_find_next_entry_inc(iter);
1936 1937 1938 1939
	else
		ent = iter;

	while (ent && iter->idx < i)
1940
		ent = trace_find_next_entry_inc(iter);
1941 1942 1943 1944 1945 1946

	iter->pos = *pos;

	return ent;
}

1947
void tracing_iter_reset(struct trace_iterator *iter, int cpu)
1948 1949 1950 1951 1952 1953
{
	struct ring_buffer_event *event;
	struct ring_buffer_iter *buf_iter;
	unsigned long entries = 0;
	u64 ts;

1954
	per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
1955

1956 1957
	buf_iter = trace_buffer_iter(iter, cpu);
	if (!buf_iter)
1958 1959 1960 1961 1962 1963 1964 1965 1966 1967
		return;

	ring_buffer_iter_reset(buf_iter);

	/*
	 * We could have the case with the max latency tracers
	 * that a reset never took place on a cpu. This is evident
	 * by the timestamp being before the start of the buffer.
	 */
	while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
1968
		if (ts >= iter->trace_buffer->time_start)
1969 1970 1971 1972 1973
			break;
		entries++;
		ring_buffer_read(buf_iter, NULL);
	}

1974
	per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
1975 1976
}

1977 1978 1979 1980
/*
 * The current tracer is copied to avoid a global locking
 * all around.
 */
1981 1982 1983
static void *s_start(struct seq_file *m, loff_t *pos)
{
	struct trace_iterator *iter = m->private;
1984
	struct trace_array *tr = iter->tr;
1985
	int cpu_file = iter->cpu_file;
1986 1987
	void *p = NULL;
	loff_t l = 0;
1988
	int cpu;
1989

1990 1991 1992 1993 1994 1995
	/*
	 * copy the tracer to avoid using a global lock all around.
	 * iter->trace is a copy of current_trace, the pointer to the
	 * name may be used instead of a strcmp(), as iter->trace->name
	 * will point to the same string as current_trace->name.
	 */
1996
	mutex_lock(&trace_types_lock);
1997 1998
	if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
		*iter->trace = *tr->current_trace;
1999
	mutex_unlock(&trace_types_lock);
2000

2001
#ifdef CONFIG_TRACER_MAX_TRACE
2002 2003
	if (iter->snapshot && iter->trace->use_max_tr)
		return ERR_PTR(-EBUSY);
2004
#endif
2005 2006 2007

	if (!iter->snapshot)
		atomic_inc(&trace_record_cmdline_disabled);
2008 2009 2010 2011 2012 2013

	if (*pos != iter->pos) {
		iter->ent = NULL;
		iter->cpu = 0;
		iter->idx = -1;

2014
		if (cpu_file == RING_BUFFER_ALL_CPUS) {
2015
			for_each_tracing_cpu(cpu)
2016
				tracing_iter_reset(iter, cpu);
2017
		} else
2018
			tracing_iter_reset(iter, cpu_file);
2019

2020
		iter->leftover = 0;
2021 2022 2023 2024
		for (p = iter; p && l < *pos; p = s_next(m, p, &l))
			;

	} else {
2025 2026 2027 2028 2029 2030 2031 2032 2033 2034
		/*
		 * If we overflowed the seq_file before, then we want
		 * to just reuse the trace_seq buffer again.
		 */
		if (iter->leftover)
			p = iter;
		else {
			l = *pos - 1;
			p = s_next(m, p, &l);
		}
2035 2036
	}

2037
	trace_event_read_lock();
2038
	trace_access_lock(cpu_file);
2039 2040 2041 2042 2043
	return p;
}

static void s_stop(struct seq_file *m, void *p)
{
2044 2045
	struct trace_iterator *iter = m->private;

2046
#ifdef CONFIG_TRACER_MAX_TRACE
2047 2048
	if (iter->snapshot && iter->trace->use_max_tr)
		return;
2049
#endif
2050 2051 2052

	if (!iter->snapshot)
		atomic_dec(&trace_record_cmdline_disabled);
2053

2054
	trace_access_unlock(iter->cpu_file);
2055
	trace_event_read_unlock();
2056 2057
}

2058
static void
2059 2060
get_total_entries(struct trace_buffer *buf,
		  unsigned long *total, unsigned long *entries)
2061 2062 2063 2064 2065 2066 2067 2068
{
	unsigned long count;
	int cpu;

	*total = 0;
	*entries = 0;

	for_each_tracing_cpu(cpu) {
2069
		count = ring_buffer_entries_cpu(buf->buffer, cpu);
2070 2071 2072 2073 2074
		/*
		 * If this buffer has skipped entries, then we hold all
		 * entries for the trace and we need to ignore the
		 * ones before the time stamp.
		 */
2075 2076
		if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
			count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2077 2078 2079 2080
			/* total is the same as the entries */
			*total += count;
		} else
			*total += count +
2081
				ring_buffer_overrun_cpu(buf->buffer, cpu);
2082 2083 2084 2085
		*entries += count;
	}
}

I
Ingo Molnar 已提交
2086
static void print_lat_help_header(struct seq_file *m)
2087
{
2088 2089 2090 2091 2092
	seq_puts(m, "#                  _------=> CPU#            \n");
	seq_puts(m, "#                 / _-----=> irqs-off        \n");
	seq_puts(m, "#                | / _----=> need-resched    \n");
	seq_puts(m, "#                || / _---=> hardirq/softirq \n");
	seq_puts(m, "#                ||| / _--=> preempt-depth   \n");
2093 2094 2095
	seq_puts(m, "#                |||| /     delay             \n");
	seq_puts(m, "#  cmd     pid   ||||| time  |   caller      \n");
	seq_puts(m, "#     \\   /      |||||  \\    |   /           \n");
2096 2097
}

2098
static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2099
{
2100 2101 2102
	unsigned long total;
	unsigned long entries;

2103
	get_total_entries(buf, &total, &entries);
2104 2105 2106 2107 2108
	seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
		   entries, total, num_online_cpus());
	seq_puts(m, "#\n");
}

2109
static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2110
{
2111
	print_event_info(buf, m);
2112
	seq_puts(m, "#           TASK-PID   CPU#      TIMESTAMP  FUNCTION\n");
2113
	seq_puts(m, "#              | |       |          |         |\n");
2114 2115
}

2116
static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2117
{
2118
	print_event_info(buf, m);
2119 2120 2121 2122 2123 2124 2125 2126
	seq_puts(m, "#                              _-----=> irqs-off\n");
	seq_puts(m, "#                             / _----=> need-resched\n");
	seq_puts(m, "#                            | / _---=> hardirq/softirq\n");
	seq_puts(m, "#                            || / _--=> preempt-depth\n");
	seq_puts(m, "#                            ||| /     delay\n");
	seq_puts(m, "#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION\n");
	seq_puts(m, "#              | |       |   ||||       |         |\n");
}
2127

2128
void
2129 2130 2131
print_trace_header(struct seq_file *m, struct trace_iterator *iter)
{
	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2132 2133
	struct trace_buffer *buf = iter->trace_buffer;
	struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2134
	struct tracer *type = iter->trace;
2135 2136
	unsigned long entries;
	unsigned long total;
2137 2138
	const char *name = "preemption";

2139
	name = type->name;
2140

2141
	get_total_entries(buf, &total, &entries);
2142

2143
	seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2144
		   name, UTS_RELEASE);
2145
	seq_puts(m, "# -----------------------------------"
2146
		 "---------------------------------\n");
2147
	seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2148
		   " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
S
Steven Rostedt 已提交
2149
		   nsecs_to_usecs(data->saved_latency),
2150
		   entries,
2151
		   total,
2152
		   buf->cpu,
2153 2154 2155 2156
#if defined(CONFIG_PREEMPT_NONE)
		   "server",
#elif defined(CONFIG_PREEMPT_VOLUNTARY)
		   "desktop",
2157
#elif defined(CONFIG_PREEMPT)
2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168
		   "preempt",
#else
		   "unknown",
#endif
		   /* These are reserved for later use */
		   0, 0, 0, 0);
#ifdef CONFIG_SMP
	seq_printf(m, " #P:%d)\n", num_online_cpus());
#else
	seq_puts(m, ")\n");
#endif
2169 2170
	seq_puts(m, "#    -----------------\n");
	seq_printf(m, "#    | task: %.16s-%d "
2171
		   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2172 2173
		   data->comm, data->pid,
		   from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2174
		   data->policy, data->rt_priority);
2175
	seq_puts(m, "#    -----------------\n");
2176 2177

	if (data->critical_start) {
2178
		seq_puts(m, "#  => started at: ");
S
Steven Rostedt 已提交
2179 2180
		seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
		trace_print_seq(m, &iter->seq);
2181
		seq_puts(m, "\n#  => ended at:   ");
S
Steven Rostedt 已提交
2182 2183
		seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
		trace_print_seq(m, &iter->seq);
2184
		seq_puts(m, "\n#\n");
2185 2186
	}

2187
	seq_puts(m, "#\n");
2188 2189
}

2190 2191 2192 2193
static void test_cpu_buff_start(struct trace_iterator *iter)
{
	struct trace_seq *s = &iter->seq;

2194 2195 2196 2197 2198 2199
	if (!(trace_flags & TRACE_ITER_ANNOTATE))
		return;

	if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
		return;

2200
	if (cpumask_test_cpu(iter->cpu, iter->started))
2201 2202
		return;

2203
	if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2204 2205
		return;

2206
	cpumask_set_cpu(iter->cpu, iter->started);
2207 2208 2209 2210 2211

	/* Don't print started cpu buffer for the first entry of the trace */
	if (iter->idx > 1)
		trace_seq_printf(s, "##### CPU %u buffer started ####\n",
				iter->cpu);
2212 2213
}

2214
static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2215
{
S
Steven Rostedt 已提交
2216
	struct trace_seq *s = &iter->seq;
2217
	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
I
Ingo Molnar 已提交
2218
	struct trace_entry *entry;
2219
	struct trace_event *event;
2220

I
Ingo Molnar 已提交
2221
	entry = iter->ent;
2222

2223 2224
	test_cpu_buff_start(iter);

2225
	event = ftrace_find_event(entry->type);
2226

2227
	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2228 2229 2230 2231 2232 2233 2234
		if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
			if (!trace_print_lat_context(iter))
				goto partial;
		} else {
			if (!trace_print_context(iter))
				goto partial;
		}
2235
	}
2236

2237
	if (event)
2238
		return event->funcs->trace(iter, sym_flags, event);
2239 2240 2241

	if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
		goto partial;
2242

2243
	return TRACE_TYPE_HANDLED;
2244 2245
partial:
	return TRACE_TYPE_PARTIAL_LINE;
2246 2247
}

2248
static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
I
Ingo Molnar 已提交
2249 2250 2251
{
	struct trace_seq *s = &iter->seq;
	struct trace_entry *entry;
2252
	struct trace_event *event;
I
Ingo Molnar 已提交
2253 2254

	entry = iter->ent;
2255

2256
	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2257 2258 2259
		if (!trace_seq_printf(s, "%d %d %llu ",
				      entry->pid, iter->cpu, iter->ts))
			goto partial;
2260
	}
I
Ingo Molnar 已提交
2261

2262
	event = ftrace_find_event(entry->type);
2263
	if (event)
2264
		return event->funcs->raw(iter, 0, event);
2265 2266 2267

	if (!trace_seq_printf(s, "%d ?\n", entry->type))
		goto partial;
2268

2269
	return TRACE_TYPE_HANDLED;
2270 2271
partial:
	return TRACE_TYPE_PARTIAL_LINE;
I
Ingo Molnar 已提交
2272 2273
}

2274
static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2275 2276 2277 2278
{
	struct trace_seq *s = &iter->seq;
	unsigned char newline = '\n';
	struct trace_entry *entry;
2279
	struct trace_event *event;
2280 2281

	entry = iter->ent;
2282

2283 2284 2285 2286 2287
	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
		SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
		SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
		SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
	}
2288

2289
	event = ftrace_find_event(entry->type);
2290
	if (event) {
2291
		enum print_line_t ret = event->funcs->hex(iter, 0, event);
2292 2293 2294
		if (ret != TRACE_TYPE_HANDLED)
			return ret;
	}
S
Steven Rostedt 已提交
2295

2296 2297
	SEQ_PUT_FIELD_RET(s, newline);

2298
	return TRACE_TYPE_HANDLED;
2299 2300
}

2301
static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
I
Ingo Molnar 已提交
2302 2303 2304
{
	struct trace_seq *s = &iter->seq;
	struct trace_entry *entry;
2305
	struct trace_event *event;
I
Ingo Molnar 已提交
2306 2307

	entry = iter->ent;
2308

2309 2310
	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
		SEQ_PUT_FIELD_RET(s, entry->pid);
2311
		SEQ_PUT_FIELD_RET(s, iter->cpu);
2312 2313
		SEQ_PUT_FIELD_RET(s, iter->ts);
	}
I
Ingo Molnar 已提交
2314

2315
	event = ftrace_find_event(entry->type);
2316 2317
	return event ? event->funcs->binary(iter, 0, event) :
		TRACE_TYPE_HANDLED;
I
Ingo Molnar 已提交
2318 2319
}

2320
int trace_empty(struct trace_iterator *iter)
2321
{
2322
	struct ring_buffer_iter *buf_iter;
2323 2324
	int cpu;

2325
	/* If we are looking at one CPU buffer, only check that one */
2326
	if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
2327
		cpu = iter->cpu_file;
2328 2329 2330
		buf_iter = trace_buffer_iter(iter, cpu);
		if (buf_iter) {
			if (!ring_buffer_iter_empty(buf_iter))
2331 2332
				return 0;
		} else {
2333
			if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2334 2335 2336 2337 2338
				return 0;
		}
		return 1;
	}

2339
	for_each_tracing_cpu(cpu) {
2340 2341 2342
		buf_iter = trace_buffer_iter(iter, cpu);
		if (buf_iter) {
			if (!ring_buffer_iter_empty(buf_iter))
2343 2344
				return 0;
		} else {
2345
			if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2346 2347
				return 0;
		}
2348
	}
2349

2350
	return 1;
2351 2352
}

2353
/*  Called with trace_event_read_lock() held. */
2354
enum print_line_t print_trace_line(struct trace_iterator *iter)
I
Ingo Molnar 已提交
2355
{
2356 2357
	enum print_line_t ret;

2358 2359 2360 2361
	if (iter->lost_events &&
	    !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
				 iter->cpu, iter->lost_events))
		return TRACE_TYPE_PARTIAL_LINE;
2362

2363 2364 2365 2366 2367
	if (iter->trace && iter->trace->print_line) {
		ret = iter->trace->print_line(iter);
		if (ret != TRACE_TYPE_UNHANDLED)
			return ret;
	}
2368

2369 2370 2371
	if (iter->ent->type == TRACE_BPRINT &&
			trace_flags & TRACE_ITER_PRINTK &&
			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2372
		return trace_print_bprintk_msg_only(iter);
2373

2374 2375 2376
	if (iter->ent->type == TRACE_PRINT &&
			trace_flags & TRACE_ITER_PRINTK &&
			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2377
		return trace_print_printk_msg_only(iter);
2378

I
Ingo Molnar 已提交
2379 2380 2381
	if (trace_flags & TRACE_ITER_BIN)
		return print_bin_fmt(iter);

2382 2383 2384
	if (trace_flags & TRACE_ITER_HEX)
		return print_hex_fmt(iter);

I
Ingo Molnar 已提交
2385 2386 2387 2388 2389 2390
	if (trace_flags & TRACE_ITER_RAW)
		return print_raw_fmt(iter);

	return print_trace_fmt(iter);
}

2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405
void trace_latency_header(struct seq_file *m)
{
	struct trace_iterator *iter = m->private;

	/* print nothing if the buffers are empty */
	if (trace_empty(iter))
		return;

	if (iter->iter_flags & TRACE_FILE_LAT_FMT)
		print_trace_header(m, iter);

	if (!(trace_flags & TRACE_ITER_VERBOSE))
		print_lat_help_header(m);
}

2406 2407 2408 2409
void trace_default_header(struct seq_file *m)
{
	struct trace_iterator *iter = m->private;

2410 2411 2412
	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
		return;

2413 2414 2415 2416 2417 2418 2419 2420
	if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
		/* print nothing if the buffers are empty */
		if (trace_empty(iter))
			return;
		print_trace_header(m, iter);
		if (!(trace_flags & TRACE_ITER_VERBOSE))
			print_lat_help_header(m);
	} else {
2421 2422
		if (!(trace_flags & TRACE_ITER_VERBOSE)) {
			if (trace_flags & TRACE_ITER_IRQ_INFO)
2423
				print_func_help_header_irq(iter->trace_buffer, m);
2424
			else
2425
				print_func_help_header(iter->trace_buffer, m);
2426
		}
2427 2428 2429
	}
}

2430 2431 2432 2433 2434 2435 2436 2437
static void test_ftrace_alive(struct seq_file *m)
{
	if (!ftrace_is_dead())
		return;
	seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
	seq_printf(m, "#          MAY BE MISSING FUNCTION EVENTS\n");
}

2438
#ifdef CONFIG_TRACER_MAX_TRACE
2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463
static void show_snapshot_main_help(struct seq_file *m)
{
	seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
	seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
	seq_printf(m, "#                      Takes a snapshot of the main buffer.\n");
	seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n");
	seq_printf(m, "#                      (Doesn't have to be '2' works with any number that\n");
	seq_printf(m, "#                       is not a '0' or '1')\n");
}

static void show_snapshot_percpu_help(struct seq_file *m)
{
	seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
	seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
	seq_printf(m, "#                      Takes a snapshot of the main buffer for this cpu.\n");
#else
	seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
	seq_printf(m, "#                     Must use main snapshot file to allocate.\n");
#endif
	seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
	seq_printf(m, "#                      (Doesn't have to be '2' works with any number that\n");
	seq_printf(m, "#                       is not a '0' or '1')\n");
}

2464 2465 2466 2467 2468 2469 2470 2471
static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
{
	if (iter->trace->allocated_snapshot)
		seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
	else
		seq_printf(m, "#\n# * Snapshot is freed *\n#\n");

	seq_printf(m, "# Snapshot commands:\n");
2472 2473 2474 2475
	if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
		show_snapshot_main_help(m);
	else
		show_snapshot_percpu_help(m);
2476 2477 2478 2479 2480 2481
}
#else
/* Should never be called */
static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
#endif

2482 2483 2484
static int s_show(struct seq_file *m, void *v)
{
	struct trace_iterator *iter = v;
2485
	int ret;
2486 2487 2488 2489 2490

	if (iter->ent == NULL) {
		if (iter->tr) {
			seq_printf(m, "# tracer: %s\n", iter->trace->name);
			seq_puts(m, "#\n");
2491
			test_ftrace_alive(m);
2492
		}
2493 2494 2495
		if (iter->snapshot && trace_empty(iter))
			print_snapshot_help(m, iter);
		else if (iter->trace && iter->trace->print_header)
2496
			iter->trace->print_header(m);
2497 2498 2499
		else
			trace_default_header(m);

2500 2501 2502 2503 2504 2505 2506 2507 2508 2509
	} else if (iter->leftover) {
		/*
		 * If we filled the seq_file buffer earlier, we
		 * want to just show it now.
		 */
		ret = trace_print_seq(m, &iter->seq);

		/* ret should this time be zero, but you never know */
		iter->leftover = ret;

2510
	} else {
I
Ingo Molnar 已提交
2511
		print_trace_line(iter);
2512 2513 2514 2515 2516 2517 2518 2519 2520
		ret = trace_print_seq(m, &iter->seq);
		/*
		 * If we overflow the seq_file buffer, then it will
		 * ask us for this data again at start up.
		 * Use that instead.
		 *  ret is 0 if seq_file write succeeded.
		 *        -1 otherwise.
		 */
		iter->leftover = ret;
2521 2522 2523 2524 2525
	}

	return 0;
}

J
James Morris 已提交
2526
static const struct seq_operations tracer_seq_ops = {
I
Ingo Molnar 已提交
2527 2528 2529 2530
	.start		= s_start,
	.next		= s_next,
	.stop		= s_stop,
	.show		= s_show,
2531 2532
};

I
Ingo Molnar 已提交
2533
static struct trace_iterator *
2534
__tracing_open(struct inode *inode, struct file *file, bool snapshot)
2535
{
2536 2537
	struct trace_cpu *tc = inode->i_private;
	struct trace_array *tr = tc->tr;
2538
	struct trace_iterator *iter;
2539
	int cpu;
2540

2541 2542
	if (tracing_disabled)
		return ERR_PTR(-ENODEV);
S
Steven Rostedt 已提交
2543

2544
	iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
2545 2546
	if (!iter)
		return ERR_PTR(-ENOMEM);
2547

2548 2549
	iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
				    GFP_KERNEL);
2550 2551 2552
	if (!iter->buffer_iter)
		goto release;

2553 2554 2555 2556
	/*
	 * We make a copy of the current tracer to avoid concurrent
	 * changes on it while we are reading.
	 */
2557
	mutex_lock(&trace_types_lock);
2558
	iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
2559
	if (!iter->trace)
2560
		goto fail;
2561

2562
	*iter->trace = *tr->current_trace;
2563

2564
	if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
2565 2566
		goto fail;

2567 2568 2569
	iter->tr = tr;

#ifdef CONFIG_TRACER_MAX_TRACE
2570 2571
	/* Currently only the top directory has a snapshot */
	if (tr->current_trace->print_max || snapshot)
2572
		iter->trace_buffer = &tr->max_buffer;
2573
	else
2574 2575
#endif
		iter->trace_buffer = &tr->trace_buffer;
2576
	iter->snapshot = snapshot;
2577
	iter->pos = -1;
2578
	mutex_init(&iter->mutex);
2579
	iter->cpu_file = tc->cpu;
2580

2581 2582
	/* Notify the tracer early; before we stop tracing. */
	if (iter->trace && iter->trace->open)
2583
		iter->trace->open(iter);
2584

2585
	/* Annotate start of buffers if we had overruns */
2586
	if (ring_buffer_overruns(iter->trace_buffer->buffer))
2587 2588
		iter->iter_flags |= TRACE_FILE_ANNOTATE;

2589 2590 2591 2592
	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
	if (trace_clocks[trace_clock_id].in_ns)
		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;

2593 2594
	/* stop the trace while dumping if we are not opening "snapshot" */
	if (!iter->snapshot)
2595
		tracing_stop_tr(tr);
2596

2597
	if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
2598 2599
		for_each_tracing_cpu(cpu) {
			iter->buffer_iter[cpu] =
2600
				ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
2601 2602 2603 2604
		}
		ring_buffer_read_prepare_sync();
		for_each_tracing_cpu(cpu) {
			ring_buffer_read_start(iter->buffer_iter[cpu]);
2605
			tracing_iter_reset(iter, cpu);
2606 2607 2608
		}
	} else {
		cpu = iter->cpu_file;
2609
		iter->buffer_iter[cpu] =
2610
			ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
2611 2612
		ring_buffer_read_prepare_sync();
		ring_buffer_read_start(iter->buffer_iter[cpu]);
2613
		tracing_iter_reset(iter, cpu);
2614 2615
	}

2616 2617 2618
	mutex_unlock(&trace_types_lock);

	return iter;
2619

2620
 fail:
2621
	mutex_unlock(&trace_types_lock);
2622
	kfree(iter->trace);
2623
	kfree(iter->buffer_iter);
2624
release:
2625 2626
	seq_release_private(inode, file);
	return ERR_PTR(-ENOMEM);
2627 2628 2629 2630
}

int tracing_open_generic(struct inode *inode, struct file *filp)
{
S
Steven Rostedt 已提交
2631 2632 2633
	if (tracing_disabled)
		return -ENODEV;

2634 2635 2636 2637
	filp->private_data = inode->i_private;
	return 0;
}

2638
static int tracing_release(struct inode *inode, struct file *file)
2639
{
2640
	struct seq_file *m = file->private_data;
2641
	struct trace_iterator *iter;
2642
	struct trace_array *tr;
2643
	int cpu;
2644

2645 2646 2647 2648
	if (!(file->f_mode & FMODE_READ))
		return 0;

	iter = m->private;
2649
	tr = iter->tr;
2650

2651
	mutex_lock(&trace_types_lock);
2652 2653 2654 2655 2656
	for_each_tracing_cpu(cpu) {
		if (iter->buffer_iter[cpu])
			ring_buffer_read_finish(iter->buffer_iter[cpu]);
	}

2657 2658 2659
	if (iter->trace && iter->trace->close)
		iter->trace->close(iter);

2660 2661
	if (!iter->snapshot)
		/* reenable tracing if it was previously enabled */
2662
		tracing_start_tr(tr);
2663 2664
	mutex_unlock(&trace_types_lock);

2665
	mutex_destroy(&iter->mutex);
2666
	free_cpumask_var(iter->started);
2667
	kfree(iter->trace);
2668
	kfree(iter->buffer_iter);
2669
	seq_release_private(inode, file);
2670 2671 2672 2673 2674
	return 0;
}

static int tracing_open(struct inode *inode, struct file *file)
{
2675 2676
	struct trace_iterator *iter;
	int ret = 0;
2677

2678 2679
	/* If this file was open for write, then erase contents */
	if ((file->f_mode & FMODE_WRITE) &&
2680
	    (file->f_flags & O_TRUNC)) {
2681 2682
		struct trace_cpu *tc = inode->i_private;
		struct trace_array *tr = tc->tr;
2683

2684
		if (tc->cpu == RING_BUFFER_ALL_CPUS)
2685
			tracing_reset_online_cpus(&tr->trace_buffer);
2686
		else
2687
			tracing_reset(&tr->trace_buffer, tc->cpu);
2688
	}
2689

2690
	if (file->f_mode & FMODE_READ) {
2691
		iter = __tracing_open(inode, file, false);
2692 2693 2694 2695 2696
		if (IS_ERR(iter))
			ret = PTR_ERR(iter);
		else if (trace_flags & TRACE_ITER_LATENCY_FMT)
			iter->iter_flags |= TRACE_FILE_LAT_FMT;
	}
2697 2698 2699
	return ret;
}

I
Ingo Molnar 已提交
2700
static void *
2701 2702
t_next(struct seq_file *m, void *v, loff_t *pos)
{
L
Li Zefan 已提交
2703
	struct tracer *t = v;
2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714

	(*pos)++;

	if (t)
		t = t->next;

	return t;
}

static void *t_start(struct seq_file *m, loff_t *pos)
{
L
Li Zefan 已提交
2715
	struct tracer *t;
2716 2717 2718
	loff_t l = 0;

	mutex_lock(&trace_types_lock);
L
Li Zefan 已提交
2719
	for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745
		;

	return t;
}

static void t_stop(struct seq_file *m, void *p)
{
	mutex_unlock(&trace_types_lock);
}

static int t_show(struct seq_file *m, void *v)
{
	struct tracer *t = v;

	if (!t)
		return 0;

	seq_printf(m, "%s", t->name);
	if (t->next)
		seq_putc(m, ' ');
	else
		seq_putc(m, '\n');

	return 0;
}

J
James Morris 已提交
2746
static const struct seq_operations show_traces_seq_ops = {
I
Ingo Molnar 已提交
2747 2748 2749 2750
	.start		= t_start,
	.next		= t_next,
	.stop		= t_stop,
	.show		= t_show,
2751 2752 2753 2754
};

static int show_traces_open(struct inode *inode, struct file *file)
{
S
Steven Rostedt 已提交
2755 2756 2757
	if (tracing_disabled)
		return -ENODEV;

L
Li Zefan 已提交
2758
	return seq_open(file, &show_traces_seq_ops);
2759 2760
}

2761 2762 2763 2764 2765 2766 2767
static ssize_t
tracing_write_stub(struct file *filp, const char __user *ubuf,
		   size_t count, loff_t *ppos)
{
	return count;
}

2768 2769 2770 2771 2772 2773 2774 2775
static loff_t tracing_seek(struct file *file, loff_t offset, int origin)
{
	if (file->f_mode & FMODE_READ)
		return seq_lseek(file, offset, origin);
	else
		return 0;
}

2776
static const struct file_operations tracing_fops = {
I
Ingo Molnar 已提交
2777 2778
	.open		= tracing_open,
	.read		= seq_read,
2779
	.write		= tracing_write_stub,
2780
	.llseek		= tracing_seek,
I
Ingo Molnar 已提交
2781
	.release	= tracing_release,
2782 2783
};

2784
static const struct file_operations show_traces_fops = {
I
Ingo Molnar 已提交
2785 2786 2787
	.open		= show_traces_open,
	.read		= seq_read,
	.release	= seq_release,
2788
	.llseek		= seq_lseek,
I
Ingo Molnar 已提交
2789 2790
};

2791 2792 2793
/*
 * Only trace on a CPU if the bitmask is set:
 */
2794
static cpumask_var_t tracing_cpumask;
2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807

/*
 * The tracer itself will not take this lock, but still we want
 * to provide a consistent cpumask to user-space:
 */
static DEFINE_MUTEX(tracing_cpumask_update_lock);

/*
 * Temporary storage for the character representation of the
 * CPU bitmask (and one more byte for the newline):
 */
static char mask_str[NR_CPUS + 1];

I
Ingo Molnar 已提交
2808 2809 2810 2811
static ssize_t
tracing_cpumask_read(struct file *filp, char __user *ubuf,
		     size_t count, loff_t *ppos)
{
2812
	int len;
I
Ingo Molnar 已提交
2813 2814

	mutex_lock(&tracing_cpumask_update_lock);
2815

2816
	len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
2817 2818 2819 2820 2821 2822 2823 2824
	if (count - len < 2) {
		count = -EINVAL;
		goto out_err;
	}
	len += sprintf(mask_str + len, "\n");
	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);

out_err:
I
Ingo Molnar 已提交
2825 2826 2827 2828 2829 2830 2831 2832 2833
	mutex_unlock(&tracing_cpumask_update_lock);

	return count;
}

static ssize_t
tracing_cpumask_write(struct file *filp, const char __user *ubuf,
		      size_t count, loff_t *ppos)
{
2834
	struct trace_array *tr = filp->private_data;
2835
	cpumask_var_t tracing_cpumask_new;
2836
	int err, cpu;
2837 2838 2839

	if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
		return -ENOMEM;
I
Ingo Molnar 已提交
2840

2841
	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
I
Ingo Molnar 已提交
2842
	if (err)
2843 2844
		goto err_unlock;

2845 2846
	mutex_lock(&tracing_cpumask_update_lock);

2847
	local_irq_disable();
2848
	arch_spin_lock(&ftrace_max_lock);
2849
	for_each_tracing_cpu(cpu) {
2850 2851 2852 2853
		/*
		 * Increase/decrease the disabled counter if we are
		 * about to flip a bit in the cpumask:
		 */
2854 2855
		if (cpumask_test_cpu(cpu, tracing_cpumask) &&
				!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2856 2857
			atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
			ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
2858
		}
2859 2860
		if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
				cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2861 2862
			atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
			ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
2863 2864
		}
	}
2865
	arch_spin_unlock(&ftrace_max_lock);
2866
	local_irq_enable();
2867

2868
	cpumask_copy(tracing_cpumask, tracing_cpumask_new);
2869 2870

	mutex_unlock(&tracing_cpumask_update_lock);
2871
	free_cpumask_var(tracing_cpumask_new);
I
Ingo Molnar 已提交
2872 2873

	return count;
2874 2875

err_unlock:
2876
	free_cpumask_var(tracing_cpumask_new);
2877 2878

	return err;
I
Ingo Molnar 已提交
2879 2880
}

2881
static const struct file_operations tracing_cpumask_fops = {
I
Ingo Molnar 已提交
2882 2883 2884
	.open		= tracing_open_generic,
	.read		= tracing_cpumask_read,
	.write		= tracing_cpumask_write,
2885
	.llseek		= generic_file_llseek,
2886 2887
};

L
Li Zefan 已提交
2888
static int tracing_trace_options_show(struct seq_file *m, void *v)
2889
{
2890
	struct tracer_opt *trace_opts;
2891
	struct trace_array *tr = m->private;
2892 2893
	u32 tracer_flags;
	int i;
2894

2895
	mutex_lock(&trace_types_lock);
2896 2897
	tracer_flags = tr->current_trace->flags->val;
	trace_opts = tr->current_trace->flags->opts;
2898

2899 2900
	for (i = 0; trace_options[i]; i++) {
		if (trace_flags & (1 << i))
L
Li Zefan 已提交
2901
			seq_printf(m, "%s\n", trace_options[i]);
2902
		else
L
Li Zefan 已提交
2903
			seq_printf(m, "no%s\n", trace_options[i]);
2904 2905
	}

2906 2907
	for (i = 0; trace_opts[i].name; i++) {
		if (tracer_flags & trace_opts[i].bit)
L
Li Zefan 已提交
2908
			seq_printf(m, "%s\n", trace_opts[i].name);
2909
		else
L
Li Zefan 已提交
2910
			seq_printf(m, "no%s\n", trace_opts[i].name);
2911
	}
2912
	mutex_unlock(&trace_types_lock);
2913

L
Li Zefan 已提交
2914
	return 0;
2915 2916
}

L
Li Zefan 已提交
2917 2918 2919 2920 2921
static int __set_tracer_option(struct tracer *trace,
			       struct tracer_flags *tracer_flags,
			       struct tracer_opt *opts, int neg)
{
	int ret;
2922

L
Li Zefan 已提交
2923 2924 2925 2926 2927 2928 2929 2930 2931
	ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
	if (ret)
		return ret;

	if (neg)
		tracer_flags->val &= ~opts->bit;
	else
		tracer_flags->val |= opts->bit;
	return 0;
2932 2933
}

2934 2935 2936
/* Try to assign a tracer specific option */
static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
{
2937
	struct tracer_flags *tracer_flags = trace->flags;
2938
	struct tracer_opt *opts = NULL;
L
Li Zefan 已提交
2939
	int i;
2940

2941 2942
	for (i = 0; tracer_flags->opts[i].name; i++) {
		opts = &tracer_flags->opts[i];
2943

L
Li Zefan 已提交
2944 2945 2946
		if (strcmp(cmp, opts->name) == 0)
			return __set_tracer_option(trace, trace->flags,
						   opts, neg);
2947 2948
	}

L
Li Zefan 已提交
2949
	return -EINVAL;
2950 2951
}

2952 2953 2954 2955 2956 2957 2958 2959 2960
/* Some tracers require overwrite to stay enabled */
int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
{
	if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
		return -1;

	return 0;
}

2961
int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
2962 2963 2964
{
	/* do nothing if flag is already set */
	if (!!(trace_flags & mask) == !!enabled)
2965 2966 2967
		return 0;

	/* Give the tracer a chance to approve the change */
2968 2969
	if (tr->current_trace->flag_changed)
		if (tr->current_trace->flag_changed(tr->current_trace, mask, !!enabled))
2970
			return -EINVAL;
2971 2972 2973 2974 2975

	if (enabled)
		trace_flags |= mask;
	else
		trace_flags &= ~mask;
2976 2977 2978

	if (mask == TRACE_ITER_RECORD_CMD)
		trace_event_enable_cmd_record(enabled);
2979

2980
	if (mask == TRACE_ITER_OVERWRITE) {
2981
		ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
2982
#ifdef CONFIG_TRACER_MAX_TRACE
2983
		ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
2984 2985
#endif
	}
2986 2987 2988

	if (mask == TRACE_ITER_PRINTK)
		trace_printk_start_stop_comm(enabled);
2989 2990

	return 0;
2991 2992
}

2993
static int trace_set_options(struct trace_array *tr, char *option)
2994
{
L
Li Zefan 已提交
2995
	char *cmp;
2996
	int neg = 0;
2997
	int ret = -ENODEV;
2998 2999
	int i;

3000
	cmp = strstrip(option);
3001

L
Li Zefan 已提交
3002
	if (strncmp(cmp, "no", 2) == 0) {
3003 3004 3005 3006
		neg = 1;
		cmp += 2;
	}

3007 3008
	mutex_lock(&trace_types_lock);

3009
	for (i = 0; trace_options[i]; i++) {
L
Li Zefan 已提交
3010
		if (strcmp(cmp, trace_options[i]) == 0) {
3011
			ret = set_tracer_flag(tr, 1 << i, !neg);
3012 3013 3014
			break;
		}
	}
3015 3016

	/* If no option could be set, test the specific tracer options */
3017
	if (!trace_options[i])
3018
		ret = set_tracer_option(tr->current_trace, cmp, neg);
3019 3020

	mutex_unlock(&trace_types_lock);
3021

3022 3023 3024 3025 3026 3027 3028
	return ret;
}

static ssize_t
tracing_trace_options_write(struct file *filp, const char __user *ubuf,
			size_t cnt, loff_t *ppos)
{
3029 3030
	struct seq_file *m = filp->private_data;
	struct trace_array *tr = m->private;
3031
	char buf[64];
3032
	int ret;
3033 3034 3035 3036 3037 3038 3039

	if (cnt >= sizeof(buf))
		return -EINVAL;

	if (copy_from_user(&buf, ubuf, cnt))
		return -EFAULT;

3040 3041
	buf[cnt] = 0;

3042
	ret = trace_set_options(tr, buf);
3043 3044
	if (ret < 0)
		return ret;
3045

3046
	*ppos += cnt;
3047 3048 3049 3050

	return cnt;
}

L
Li Zefan 已提交
3051 3052 3053 3054
static int tracing_trace_options_open(struct inode *inode, struct file *file)
{
	if (tracing_disabled)
		return -ENODEV;
3055 3056

	return single_open(file, tracing_trace_options_show, inode->i_private);
L
Li Zefan 已提交
3057 3058
}

3059
static const struct file_operations tracing_iter_fops = {
L
Li Zefan 已提交
3060 3061 3062 3063
	.open		= tracing_trace_options_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
3064
	.write		= tracing_trace_options_write,
3065 3066
};

I
Ingo Molnar 已提交
3067 3068
static const char readme_msg[] =
	"tracing mini-HOWTO:\n\n"
3069 3070
	"# mount -t debugfs nodev /sys/kernel/debug\n\n"
	"# cat /sys/kernel/debug/tracing/available_tracers\n"
3071
	"wakeup wakeup_rt preemptirqsoff preemptoff irqsoff function nop\n\n"
3072
	"# cat /sys/kernel/debug/tracing/current_tracer\n"
3073
	"nop\n"
3074
	"# echo wakeup > /sys/kernel/debug/tracing/current_tracer\n"
3075
	"# cat /sys/kernel/debug/tracing/current_tracer\n"
3076
	"wakeup\n"
3077
	"# cat /sys/kernel/debug/tracing/trace_options\n"
I
Ingo Molnar 已提交
3078
	"noprint-parent nosym-offset nosym-addr noverbose\n"
3079
	"# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
3080
	"# echo 1 > /sys/kernel/debug/tracing/tracing_on\n"
3081
	"# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n"
3082
	"# echo 0 > /sys/kernel/debug/tracing/tracing_on\n"
I
Ingo Molnar 已提交
3083 3084 3085 3086 3087 3088 3089 3090 3091 3092
;

static ssize_t
tracing_readme_read(struct file *filp, char __user *ubuf,
		       size_t cnt, loff_t *ppos)
{
	return simple_read_from_buffer(ubuf, cnt, ppos,
					readme_msg, strlen(readme_msg));
}

3093
static const struct file_operations tracing_readme_fops = {
I
Ingo Molnar 已提交
3094 3095
	.open		= tracing_open_generic,
	.read		= tracing_readme_read,
3096
	.llseek		= generic_file_llseek,
I
Ingo Molnar 已提交
3097 3098
};

3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146
static ssize_t
tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
				size_t cnt, loff_t *ppos)
{
	char *buf_comm;
	char *file_buf;
	char *buf;
	int len = 0;
	int pid;
	int i;

	file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
	if (!file_buf)
		return -ENOMEM;

	buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
	if (!buf_comm) {
		kfree(file_buf);
		return -ENOMEM;
	}

	buf = file_buf;

	for (i = 0; i < SAVED_CMDLINES; i++) {
		int r;

		pid = map_cmdline_to_pid[i];
		if (pid == -1 || pid == NO_CMDLINE_MAP)
			continue;

		trace_find_cmdline(pid, buf_comm);
		r = sprintf(buf, "%d %s\n", pid, buf_comm);
		buf += r;
		len += r;
	}

	len = simple_read_from_buffer(ubuf, cnt, ppos,
				      file_buf, len);

	kfree(file_buf);
	kfree(buf_comm);

	return len;
}

static const struct file_operations tracing_saved_cmdlines_fops = {
    .open       = tracing_open_generic,
    .read       = tracing_saved_cmdlines_read,
3147
    .llseek	= generic_file_llseek,
3148 3149
};

3150 3151 3152 3153
static ssize_t
tracing_set_trace_read(struct file *filp, char __user *ubuf,
		       size_t cnt, loff_t *ppos)
{
3154
	struct trace_array *tr = filp->private_data;
L
Li Zefan 已提交
3155
	char buf[MAX_TRACER_SIZE+2];
3156 3157 3158
	int r;

	mutex_lock(&trace_types_lock);
3159
	r = sprintf(buf, "%s\n", tr->current_trace->name);
3160 3161
	mutex_unlock(&trace_types_lock);

I
Ingo Molnar 已提交
3162
	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3163 3164
}

3165 3166
int tracer_init(struct tracer *t, struct trace_array *tr)
{
3167
	tracing_reset_online_cpus(&tr->trace_buffer);
3168 3169 3170
	return t->init(tr);
}

3171
static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
3172 3173 3174
{
	int cpu;
	for_each_tracing_cpu(cpu)
3175
		per_cpu_ptr(buf->data, cpu)->entries = val;
3176 3177
}

3178
#ifdef CONFIG_TRACER_MAX_TRACE
3179
/* resize @tr's buffer to the size of @size_tr's entries */
3180 3181
static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
					struct trace_buffer *size_buf, int cpu_id)
3182 3183 3184 3185 3186
{
	int cpu, ret = 0;

	if (cpu_id == RING_BUFFER_ALL_CPUS) {
		for_each_tracing_cpu(cpu) {
3187 3188
			ret = ring_buffer_resize(trace_buf->buffer,
				 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
3189 3190
			if (ret < 0)
				break;
3191 3192
			per_cpu_ptr(trace_buf->data, cpu)->entries =
				per_cpu_ptr(size_buf->data, cpu)->entries;
3193 3194
		}
	} else {
3195 3196
		ret = ring_buffer_resize(trace_buf->buffer,
				 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
3197
		if (ret == 0)
3198 3199
			per_cpu_ptr(trace_buf->data, cpu_id)->entries =
				per_cpu_ptr(size_buf->data, cpu_id)->entries;
3200 3201 3202 3203
	}

	return ret;
}
3204
#endif /* CONFIG_TRACER_MAX_TRACE */
3205

3206 3207
static int __tracing_resize_ring_buffer(struct trace_array *tr,
					unsigned long size, int cpu)
3208 3209 3210 3211 3212
{
	int ret;

	/*
	 * If kernel or user changes the size of the ring buffer
3213 3214
	 * we use the size that was given, and we can forget about
	 * expanding it later.
3215 3216 3217
	 */
	ring_buffer_expanded = 1;

3218
	/* May be called before buffers are initialized */
3219
	if (!tr->trace_buffer.buffer)
3220 3221
		return 0;

3222
	ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
3223 3224 3225
	if (ret < 0)
		return ret;

3226
#ifdef CONFIG_TRACER_MAX_TRACE
3227 3228
	if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
	    !tr->current_trace->use_max_tr)
3229 3230
		goto out;

3231
	ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
3232
	if (ret < 0) {
3233 3234
		int r = resize_buffer_duplicate_size(&tr->trace_buffer,
						     &tr->trace_buffer, cpu);
3235
		if (r < 0) {
3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249
			/*
			 * AARGH! We are left with different
			 * size max buffer!!!!
			 * The max buffer is our "snapshot" buffer.
			 * When a tracer needs a snapshot (one of the
			 * latency tracers), it swaps the max buffer
			 * with the saved snap shot. We succeeded to
			 * update the size of the main buffer, but failed to
			 * update the size of the max buffer. But when we tried
			 * to reset the main buffer to the original size, we
			 * failed there too. This is very unlikely to
			 * happen, but if it does, warn and kill all
			 * tracing.
			 */
3250 3251 3252 3253 3254 3255
			WARN_ON(1);
			tracing_disabled = 1;
		}
		return ret;
	}

3256
	if (cpu == RING_BUFFER_ALL_CPUS)
3257
		set_buffer_entries(&tr->max_buffer, size);
3258
	else
3259
		per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
3260

3261
 out:
3262 3263
#endif /* CONFIG_TRACER_MAX_TRACE */

3264
	if (cpu == RING_BUFFER_ALL_CPUS)
3265
		set_buffer_entries(&tr->trace_buffer, size);
3266
	else
3267
		per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
3268 3269 3270 3271

	return ret;
}

3272 3273
static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
					  unsigned long size, int cpu_id)
3274
{
3275
	int ret = size;
3276 3277 3278

	mutex_lock(&trace_types_lock);

3279 3280 3281 3282 3283 3284 3285
	if (cpu_id != RING_BUFFER_ALL_CPUS) {
		/* make sure, this cpu is enabled in the mask */
		if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
			ret = -EINVAL;
			goto out;
		}
	}
3286

3287
	ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
3288 3289 3290
	if (ret < 0)
		ret = -ENOMEM;

3291
out:
3292 3293 3294 3295 3296
	mutex_unlock(&trace_types_lock);

	return ret;
}

3297

3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311
/**
 * tracing_update_buffers - used by tracing facility to expand ring buffers
 *
 * To save on memory when the tracing is never used on a system with it
 * configured in. The ring buffers are set to a minimum size. But once
 * a user starts to use the tracing facility, then they need to grow
 * to their default size.
 *
 * This function is to be called when a tracer is about to be used.
 */
int tracing_update_buffers(void)
{
	int ret = 0;

3312
	mutex_lock(&trace_types_lock);
3313
	if (!ring_buffer_expanded)
3314
		ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
3315
						RING_BUFFER_ALL_CPUS);
3316
	mutex_unlock(&trace_types_lock);
3317 3318 3319 3320

	return ret;
}

3321 3322 3323
struct trace_option_dentry;

static struct trace_option_dentry *
3324
create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
3325 3326 3327 3328

static void
destroy_trace_option_files(struct trace_option_dentry *topts);

3329
static int tracing_set_tracer(const char *buf)
3330
{
3331
	static struct trace_option_dentry *topts;
3332 3333
	struct trace_array *tr = &global_trace;
	struct tracer *t;
3334
#ifdef CONFIG_TRACER_MAX_TRACE
3335
	bool had_max_tr;
3336
#endif
3337
	int ret = 0;
3338

3339 3340
	mutex_lock(&trace_types_lock);

3341
	if (!ring_buffer_expanded) {
3342
		ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
3343
						RING_BUFFER_ALL_CPUS);
3344
		if (ret < 0)
3345
			goto out;
3346 3347 3348
		ret = 0;
	}

3349 3350 3351 3352
	for (t = trace_types; t; t = t->next) {
		if (strcmp(t->name, buf) == 0)
			break;
	}
3353 3354 3355 3356
	if (!t) {
		ret = -EINVAL;
		goto out;
	}
3357
	if (t == tr->current_trace)
3358 3359
		goto out;

3360
	trace_branch_disable();
3361

3362
	tr->current_trace->enabled = false;
3363

3364 3365
	if (tr->current_trace->reset)
		tr->current_trace->reset(tr);
3366

3367
#ifdef CONFIG_TRACER_MAX_TRACE
3368
	had_max_tr = tr->current_trace->allocated_snapshot;
3369 3370

	/* Current trace needs to be nop_trace before synchronize_sched */
3371
	tr->current_trace = &nop_trace;
3372 3373 3374 3375 3376 3377 3378 3379 3380 3381

	if (had_max_tr && !t->use_max_tr) {
		/*
		 * We need to make sure that the update_max_tr sees that
		 * current_trace changed to nop_trace to keep it from
		 * swapping the buffers after we resize it.
		 * The update_max_tr is called from interrupts disabled
		 * so a synchronized_sched() is sufficient.
		 */
		synchronize_sched();
3382 3383 3384 3385 3386
		/*
		 * We don't free the ring buffer. instead, resize it because
		 * The max_tr ring buffer has some state (e.g. ring->clock) and
		 * we want preserve it.
		 */
3387 3388 3389
		ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
		set_buffer_entries(&tr->max_buffer, 1);
		tracing_reset_online_cpus(&tr->max_buffer);
3390
		tr->current_trace->allocated_snapshot = false;
3391
	}
3392 3393 3394
#else
	tr->current_trace = &nop_trace;
#endif
3395 3396
	destroy_trace_option_files(topts);

3397
	topts = create_trace_option_files(tr, t);
3398 3399

#ifdef CONFIG_TRACER_MAX_TRACE
3400
	if (t->use_max_tr && !had_max_tr) {
3401
		/* we need to make per cpu buffer sizes equivalent */
3402
		ret = resize_buffer_duplicate_size(&tr->max_buffer, &tr->trace_buffer,
3403 3404 3405
						   RING_BUFFER_ALL_CPUS);
		if (ret < 0)
			goto out;
3406
		t->allocated_snapshot = true;
3407
	}
3408
#endif
3409

3410
	if (t->init) {
3411
		ret = tracer_init(t, tr);
3412 3413 3414
		if (ret)
			goto out;
	}
3415

3416 3417
	tr->current_trace = t;
	tr->current_trace->enabled = true;
3418
	trace_branch_enable(tr);
3419 3420 3421
 out:
	mutex_unlock(&trace_types_lock);

3422 3423 3424 3425 3426 3427 3428
	return ret;
}

static ssize_t
tracing_set_trace_write(struct file *filp, const char __user *ubuf,
			size_t cnt, loff_t *ppos)
{
L
Li Zefan 已提交
3429
	char buf[MAX_TRACER_SIZE+1];
3430 3431
	int i;
	size_t ret;
3432 3433 3434
	int err;

	ret = cnt;
3435

L
Li Zefan 已提交
3436 3437
	if (cnt > MAX_TRACER_SIZE)
		cnt = MAX_TRACER_SIZE;
3438 3439 3440 3441 3442 3443 3444 3445 3446 3447

	if (copy_from_user(&buf, ubuf, cnt))
		return -EFAULT;

	buf[cnt] = 0;

	/* strip ending whitespace. */
	for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
		buf[i] = 0;

3448 3449 3450
	err = tracing_set_tracer(buf);
	if (err)
		return err;
3451

3452
	*ppos += ret;
3453

3454
	return ret;
3455 3456 3457 3458 3459 3460 3461 3462 3463 3464
}

static ssize_t
tracing_max_lat_read(struct file *filp, char __user *ubuf,
		     size_t cnt, loff_t *ppos)
{
	unsigned long *ptr = filp->private_data;
	char buf[64];
	int r;

S
Steven Rostedt 已提交
3465
	r = snprintf(buf, sizeof(buf), "%ld\n",
3466
		     *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
S
Steven Rostedt 已提交
3467 3468
	if (r > sizeof(buf))
		r = sizeof(buf);
I
Ingo Molnar 已提交
3469
	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3470 3471 3472 3473 3474 3475
}

static ssize_t
tracing_max_lat_write(struct file *filp, const char __user *ubuf,
		      size_t cnt, loff_t *ppos)
{
3476 3477
	unsigned long *ptr = filp->private_data;
	unsigned long val;
3478
	int ret;
3479

3480 3481
	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
	if (ret)
3482
		return ret;
3483 3484 3485 3486 3487 3488

	*ptr = val * 1000;

	return cnt;
}

3489 3490
static int tracing_open_pipe(struct inode *inode, struct file *filp)
{
3491 3492
	struct trace_cpu *tc = inode->i_private;
	struct trace_array *tr = tc->tr;
3493
	struct trace_iterator *iter;
3494
	int ret = 0;
3495 3496 3497 3498

	if (tracing_disabled)
		return -ENODEV;

3499 3500
	mutex_lock(&trace_types_lock);

3501 3502
	/* create a buffer to store the information to pass to userspace */
	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3503 3504 3505 3506
	if (!iter) {
		ret = -ENOMEM;
		goto out;
	}
3507

3508 3509 3510 3511 3512 3513 3514 3515 3516
	/*
	 * We make a copy of the current tracer to avoid concurrent
	 * changes on it while we are reading.
	 */
	iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
	if (!iter->trace) {
		ret = -ENOMEM;
		goto fail;
	}
3517
	*iter->trace = *tr->current_trace;
3518

3519
	if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
3520
		ret = -ENOMEM;
3521
		goto fail;
3522 3523
	}

3524
	/* trace pipe does not show start of buffer */
3525
	cpumask_setall(iter->started);
3526

3527 3528 3529
	if (trace_flags & TRACE_ITER_LATENCY_FMT)
		iter->iter_flags |= TRACE_FILE_LAT_FMT;

3530 3531 3532 3533
	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
	if (trace_clocks[trace_clock_id].in_ns)
		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;

3534 3535
	iter->cpu_file = tc->cpu;
	iter->tr = tc->tr;
3536
	iter->trace_buffer = &tc->tr->trace_buffer;
3537
	mutex_init(&iter->mutex);
3538 3539
	filp->private_data = iter;

3540 3541 3542
	if (iter->trace->pipe_open)
		iter->trace->pipe_open(iter);

3543
	nonseekable_open(inode, filp);
3544 3545 3546
out:
	mutex_unlock(&trace_types_lock);
	return ret;
3547 3548 3549 3550 3551 3552

fail:
	kfree(iter->trace);
	kfree(iter);
	mutex_unlock(&trace_types_lock);
	return ret;
3553 3554 3555 3556 3557 3558
}

static int tracing_release_pipe(struct inode *inode, struct file *file)
{
	struct trace_iterator *iter = file->private_data;

3559 3560
	mutex_lock(&trace_types_lock);

3561
	if (iter->trace->pipe_close)
S
Steven Rostedt 已提交
3562 3563
		iter->trace->pipe_close(iter);

3564 3565
	mutex_unlock(&trace_types_lock);

3566
	free_cpumask_var(iter->started);
3567 3568
	mutex_destroy(&iter->mutex);
	kfree(iter->trace);
3569 3570 3571 3572 3573
	kfree(iter);

	return 0;
}

3574
static unsigned int
3575
trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
3576
{
3577 3578 3579 3580 3581
	/* Iterators are static, they should be filled or empty */
	if (trace_buffer_iter(iter, iter->cpu_file))
		return POLLIN | POLLRDNORM;

	if (trace_flags & TRACE_ITER_BLOCK)
3582 3583 3584 3585
		/*
		 * Always select as readable when in blocking mode
		 */
		return POLLIN | POLLRDNORM;
3586
	else
3587
		return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
3588
					     filp, poll_table);
3589 3590
}

3591 3592 3593 3594 3595 3596 3597 3598
static unsigned int
tracing_poll_pipe(struct file *filp, poll_table *poll_table)
{
	struct trace_iterator *iter = filp->private_data;

	return trace_poll(iter, filp, poll_table);
}

3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617
/*
 * This is a make-shift waitqueue.
 * A tracer might use this callback on some rare cases:
 *
 *  1) the current tracer might hold the runqueue lock when it wakes up
 *     a reader, hence a deadlock (sched, function, and function graph tracers)
 *  2) the function tracers, trace all functions, we don't want
 *     the overhead of calling wake_up and friends
 *     (and tracing them too)
 *
 *     Anyway, this is really very primitive wakeup.
 */
void poll_wait_pipe(struct trace_iterator *iter)
{
	set_current_state(TASK_INTERRUPTIBLE);
	/* sleep for 100 msecs, and try again. */
	schedule_timeout(HZ / 10);
}

3618 3619
/* Must be called with trace_types_lock mutex held. */
static int tracing_wait_pipe(struct file *filp)
3620 3621 3622 3623
{
	struct trace_iterator *iter = filp->private_data;

	while (trace_empty(iter)) {
3624

3625
		if ((filp->f_flags & O_NONBLOCK)) {
3626
			return -EAGAIN;
3627
		}
3628

3629
		mutex_unlock(&iter->mutex);
3630

3631
		iter->trace->wait_pipe(iter);
3632

3633
		mutex_lock(&iter->mutex);
3634

3635
		if (signal_pending(current))
3636
			return -EINTR;
3637 3638

		/*
L
Liu Bo 已提交
3639
		 * We block until we read something and tracing is disabled.
3640 3641 3642 3643 3644 3645 3646
		 * We still block if tracing is disabled, but we have never
		 * read anything. This allows a user to cat this file, and
		 * then enable tracing. But after we have read something,
		 * we give an EOF when tracing is again disabled.
		 *
		 * iter->pos will be 0 if we haven't read anything.
		 */
L
Liu Bo 已提交
3647
		if (!tracing_is_enabled() && iter->pos)
3648 3649 3650
			break;
	}

3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661
	return 1;
}

/*
 * Consumer reader.
 */
static ssize_t
tracing_read_pipe(struct file *filp, char __user *ubuf,
		  size_t cnt, loff_t *ppos)
{
	struct trace_iterator *iter = filp->private_data;
3662
	struct trace_array *tr = iter->tr;
3663 3664 3665 3666 3667 3668 3669
	ssize_t sret;

	/* return any leftover data */
	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
	if (sret != -EBUSY)
		return sret;

3670
	trace_seq_init(&iter->seq);
3671

3672
	/* copy the tracer to avoid using a global lock all around */
3673
	mutex_lock(&trace_types_lock);
3674 3675
	if (unlikely(iter->trace->name != tr->current_trace->name))
		*iter->trace = *tr->current_trace;
3676 3677 3678 3679 3680 3681 3682 3683
	mutex_unlock(&trace_types_lock);

	/*
	 * Avoid more than one consumer on a single file descriptor
	 * This is just a matter of traces coherency, the ring buffer itself
	 * is protected.
	 */
	mutex_lock(&iter->mutex);
3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694
	if (iter->trace->read) {
		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
		if (sret)
			goto out;
	}

waitagain:
	sret = tracing_wait_pipe(filp);
	if (sret <= 0)
		goto out;

3695
	/* stop when tracing is finished */
3696 3697
	if (trace_empty(iter)) {
		sret = 0;
3698
		goto out;
3699
	}
3700 3701 3702 3703

	if (cnt >= PAGE_SIZE)
		cnt = PAGE_SIZE - 1;

3704 3705 3706 3707
	/* reset all but tr, trace, and overruns */
	memset(&iter->seq, 0,
	       sizeof(struct trace_iterator) -
	       offsetof(struct trace_iterator, seq));
3708
	iter->pos = -1;
3709

3710
	trace_event_read_lock();
3711
	trace_access_lock(iter->cpu_file);
3712
	while (trace_find_next_entry_inc(iter) != NULL) {
3713
		enum print_line_t ret;
S
Steven Rostedt 已提交
3714 3715
		int len = iter->seq.len;

I
Ingo Molnar 已提交
3716
		ret = print_trace_line(iter);
3717
		if (ret == TRACE_TYPE_PARTIAL_LINE) {
S
Steven Rostedt 已提交
3718 3719
			/* don't print partial lines */
			iter->seq.len = len;
3720
			break;
S
Steven Rostedt 已提交
3721
		}
3722 3723
		if (ret != TRACE_TYPE_NO_CONSUME)
			trace_consume(iter);
3724 3725 3726

		if (iter->seq.len >= cnt)
			break;
3727 3728 3729 3730 3731 3732 3733 3734

		/*
		 * Setting the full flag means we reached the trace_seq buffer
		 * size and we should leave by partial output condition above.
		 * One of the trace_seq_* functions is not used properly.
		 */
		WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
			  iter->ent->type);
3735
	}
3736
	trace_access_unlock(iter->cpu_file);
3737
	trace_event_read_unlock();
3738 3739

	/* Now copy what we have to the user */
3740 3741
	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
	if (iter->seq.readpos >= iter->seq.len)
3742
		trace_seq_init(&iter->seq);
P
Pekka Paalanen 已提交
3743 3744

	/*
L
Lucas De Marchi 已提交
3745
	 * If there was nothing to send to user, in spite of consuming trace
P
Pekka Paalanen 已提交
3746 3747
	 * entries, go back to wait for more entries.
	 */
3748
	if (sret == -EBUSY)
P
Pekka Paalanen 已提交
3749
		goto waitagain;
3750

3751
out:
3752
	mutex_unlock(&iter->mutex);
3753

3754
	return sret;
3755 3756
}

3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768
static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
				     struct pipe_buffer *buf)
{
	__free_page(buf->page);
}

static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
				     unsigned int idx)
{
	__free_page(spd->pages[idx]);
}

3769
static const struct pipe_buf_operations tracing_pipe_buf_ops = {
S
Steven Rostedt 已提交
3770 3771 3772 3773 3774 3775 3776
	.can_merge		= 0,
	.map			= generic_pipe_buf_map,
	.unmap			= generic_pipe_buf_unmap,
	.confirm		= generic_pipe_buf_confirm,
	.release		= tracing_pipe_buf_release,
	.steal			= generic_pipe_buf_steal,
	.get			= generic_pipe_buf_get,
3777 3778
};

S
Steven Rostedt 已提交
3779
static size_t
3780
tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
S
Steven Rostedt 已提交
3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799
{
	size_t count;
	int ret;

	/* Seq buffer is page-sized, exactly what we need. */
	for (;;) {
		count = iter->seq.len;
		ret = print_trace_line(iter);
		count = iter->seq.len - count;
		if (rem < count) {
			rem = 0;
			iter->seq.len -= count;
			break;
		}
		if (ret == TRACE_TYPE_PARTIAL_LINE) {
			iter->seq.len -= count;
			break;
		}

3800 3801
		if (ret != TRACE_TYPE_NO_CONSUME)
			trace_consume(iter);
S
Steven Rostedt 已提交
3802
		rem -= count;
3803
		if (!trace_find_next_entry_inc(iter))	{
S
Steven Rostedt 已提交
3804 3805 3806 3807 3808 3809 3810 3811 3812
			rem = 0;
			iter->ent = NULL;
			break;
		}
	}

	return rem;
}

3813 3814 3815 3816 3817 3818
static ssize_t tracing_splice_read_pipe(struct file *filp,
					loff_t *ppos,
					struct pipe_inode_info *pipe,
					size_t len,
					unsigned int flags)
{
3819 3820
	struct page *pages_def[PIPE_DEF_BUFFERS];
	struct partial_page partial_def[PIPE_DEF_BUFFERS];
3821 3822
	struct trace_iterator *iter = filp->private_data;
	struct splice_pipe_desc spd = {
3823 3824
		.pages		= pages_def,
		.partial	= partial_def,
S
Steven Rostedt 已提交
3825
		.nr_pages	= 0, /* This gets updated below. */
3826
		.nr_pages_max	= PIPE_DEF_BUFFERS,
S
Steven Rostedt 已提交
3827 3828 3829
		.flags		= flags,
		.ops		= &tracing_pipe_buf_ops,
		.spd_release	= tracing_spd_release_pipe,
3830
	};
3831
	struct trace_array *tr = iter->tr;
3832
	ssize_t ret;
S
Steven Rostedt 已提交
3833
	size_t rem;
3834 3835
	unsigned int i;

3836 3837 3838
	if (splice_grow_spd(pipe, &spd))
		return -ENOMEM;

3839
	/* copy the tracer to avoid using a global lock all around */
3840
	mutex_lock(&trace_types_lock);
3841 3842
	if (unlikely(iter->trace->name != tr->current_trace->name))
		*iter->trace = *tr->current_trace;
3843 3844 3845
	mutex_unlock(&trace_types_lock);

	mutex_lock(&iter->mutex);
3846 3847 3848 3849 3850

	if (iter->trace->splice_read) {
		ret = iter->trace->splice_read(iter, filp,
					       ppos, pipe, len, flags);
		if (ret)
S
Steven Rostedt 已提交
3851
			goto out_err;
3852 3853 3854 3855
	}

	ret = tracing_wait_pipe(filp);
	if (ret <= 0)
S
Steven Rostedt 已提交
3856
		goto out_err;
3857

3858
	if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3859
		ret = -EFAULT;
S
Steven Rostedt 已提交
3860
		goto out_err;
3861 3862
	}

3863
	trace_event_read_lock();
3864
	trace_access_lock(iter->cpu_file);
3865

3866
	/* Fill as many pages as possible. */
3867 3868 3869
	for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
		spd.pages[i] = alloc_page(GFP_KERNEL);
		if (!spd.pages[i])
S
Steven Rostedt 已提交
3870
			break;
3871

3872
		rem = tracing_fill_pipe_page(rem, iter);
3873 3874 3875

		/* Copy the data into the page, so we can start over. */
		ret = trace_seq_to_buffer(&iter->seq,
3876
					  page_address(spd.pages[i]),
3877 3878
					  iter->seq.len);
		if (ret < 0) {
3879
			__free_page(spd.pages[i]);
3880 3881
			break;
		}
3882 3883
		spd.partial[i].offset = 0;
		spd.partial[i].len = iter->seq.len;
3884

3885
		trace_seq_init(&iter->seq);
3886 3887
	}

3888
	trace_access_unlock(iter->cpu_file);
3889
	trace_event_read_unlock();
3890
	mutex_unlock(&iter->mutex);
3891 3892 3893

	spd.nr_pages = i;

3894 3895
	ret = splice_to_pipe(pipe, &spd);
out:
3896
	splice_shrink_spd(&spd);
3897
	return ret;
3898

S
Steven Rostedt 已提交
3899
out_err:
3900
	mutex_unlock(&iter->mutex);
3901
	goto out;
3902 3903
}

3904 3905 3906 3907
static ssize_t
tracing_entries_read(struct file *filp, char __user *ubuf,
		     size_t cnt, loff_t *ppos)
{
3908 3909
	struct trace_cpu *tc = filp->private_data;
	struct trace_array *tr = tc->tr;
3910 3911 3912
	char buf[64];
	int r = 0;
	ssize_t ret;
3913

3914
	mutex_lock(&trace_types_lock);
3915

3916
	if (tc->cpu == RING_BUFFER_ALL_CPUS) {
3917 3918 3919 3920 3921 3922 3923 3924 3925
		int cpu, buf_size_same;
		unsigned long size;

		size = 0;
		buf_size_same = 1;
		/* check if all cpu sizes are same */
		for_each_tracing_cpu(cpu) {
			/* fill in the size from first enabled cpu */
			if (size == 0)
3926 3927
				size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
			if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942
				buf_size_same = 0;
				break;
			}
		}

		if (buf_size_same) {
			if (!ring_buffer_expanded)
				r = sprintf(buf, "%lu (expanded: %lu)\n",
					    size >> 10,
					    trace_buf_size >> 10);
			else
				r = sprintf(buf, "%lu\n", size >> 10);
		} else
			r = sprintf(buf, "X\n");
	} else
3943
		r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, tc->cpu)->entries >> 10);
3944

3945 3946
	mutex_unlock(&trace_types_lock);

3947 3948
	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
	return ret;
3949 3950 3951 3952 3953 3954
}

static ssize_t
tracing_entries_write(struct file *filp, const char __user *ubuf,
		      size_t cnt, loff_t *ppos)
{
3955
	struct trace_cpu *tc = filp->private_data;
3956
	unsigned long val;
3957
	int ret;
3958

3959 3960
	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
	if (ret)
3961
		return ret;
3962 3963 3964 3965 3966

	/* must have at least 1 entry */
	if (!val)
		return -EINVAL;

3967 3968 3969
	/* value is in KB */
	val <<= 10;

3970
	ret = tracing_resize_ring_buffer(tc->tr, val, tc->cpu);
3971 3972
	if (ret < 0)
		return ret;
3973

3974
	*ppos += cnt;
3975

3976 3977
	return cnt;
}
S
Steven Rostedt 已提交
3978

3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989
static ssize_t
tracing_total_entries_read(struct file *filp, char __user *ubuf,
				size_t cnt, loff_t *ppos)
{
	struct trace_array *tr = filp->private_data;
	char buf[64];
	int r, cpu;
	unsigned long size = 0, expanded_size = 0;

	mutex_lock(&trace_types_lock);
	for_each_tracing_cpu(cpu) {
3990
		size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002
		if (!ring_buffer_expanded)
			expanded_size += trace_buf_size >> 10;
	}
	if (ring_buffer_expanded)
		r = sprintf(buf, "%lu\n", size);
	else
		r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
	mutex_unlock(&trace_types_lock);

	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}

4003 4004 4005 4006 4007 4008 4009 4010 4011 4012
static ssize_t
tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
			  size_t cnt, loff_t *ppos)
{
	/*
	 * There is no need to read what the user has written, this function
	 * is just to make sure that there is no error when "echo" is used
	 */

	*ppos += cnt;
4013 4014 4015 4016

	return cnt;
}

4017 4018 4019
static int
tracing_free_buffer_release(struct inode *inode, struct file *filp)
{
4020 4021
	struct trace_array *tr = inode->i_private;

4022 4023 4024
	/* disable tracing ? */
	if (trace_flags & TRACE_ITER_STOP_ON_FREE)
		tracing_off();
4025
	/* resize the ring buffer to 0 */
4026
	tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4027 4028 4029 4030

	return 0;
}

4031 4032 4033 4034
static ssize_t
tracing_mark_write(struct file *filp, const char __user *ubuf,
					size_t cnt, loff_t *fpos)
{
4035 4036 4037 4038 4039 4040
	unsigned long addr = (unsigned long)ubuf;
	struct ring_buffer_event *event;
	struct ring_buffer *buffer;
	struct print_entry *entry;
	unsigned long irq_flags;
	struct page *pages[2];
4041
	void *map_page[2];
4042 4043 4044 4045 4046 4047
	int nr_pages = 1;
	ssize_t written;
	int offset;
	int size;
	int len;
	int ret;
4048
	int i;
4049

S
Steven Rostedt 已提交
4050
	if (tracing_disabled)
4051 4052
		return -EINVAL;

4053 4054 4055
	if (!(trace_flags & TRACE_ITER_MARKERS))
		return -EINVAL;

4056 4057 4058
	if (cnt > TRACE_BUF_SIZE)
		cnt = TRACE_BUF_SIZE;

4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073
	/*
	 * Userspace is injecting traces into the kernel trace buffer.
	 * We want to be as non intrusive as possible.
	 * To do so, we do not want to allocate any special buffers
	 * or take any locks, but instead write the userspace data
	 * straight into the ring buffer.
	 *
	 * First we need to pin the userspace buffer into memory,
	 * which, most likely it is, because it just referenced it.
	 * But there's no guarantee that it is. By using get_user_pages_fast()
	 * and kmap_atomic/kunmap_atomic() we can get access to the
	 * pages directly. We then write the data directly into the
	 * ring buffer.
	 */
	BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
4074

4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087
	/* check if we cross pages */
	if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
		nr_pages = 2;

	offset = addr & (PAGE_SIZE - 1);
	addr &= PAGE_MASK;

	ret = get_user_pages_fast(addr, nr_pages, 0, pages);
	if (ret < nr_pages) {
		while (--ret >= 0)
			put_page(pages[ret]);
		written = -EFAULT;
		goto out;
4088
	}
4089

4090 4091
	for (i = 0; i < nr_pages; i++)
		map_page[i] = kmap_atomic(pages[i]);
4092 4093 4094

	local_save_flags(irq_flags);
	size = sizeof(*entry) + cnt + 2; /* possible \n added */
4095
	buffer = global_trace.trace_buffer.buffer;
4096 4097 4098 4099 4100 4101
	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
					  irq_flags, preempt_count());
	if (!event) {
		/* Ring buffer disabled, return as if not open for write */
		written = -EBADF;
		goto out_unlock;
4102
	}
4103 4104 4105 4106 4107 4108

	entry = ring_buffer_event_data(event);
	entry->ip = _THIS_IP_;

	if (nr_pages == 2) {
		len = PAGE_SIZE - offset;
4109 4110
		memcpy(&entry->buf, map_page[0] + offset, len);
		memcpy(&entry->buf[len], map_page[1], cnt - len);
C
Carsten Emde 已提交
4111
	} else
4112
		memcpy(&entry->buf, map_page[0] + offset, cnt);
4113

4114 4115 4116 4117 4118 4119
	if (entry->buf[cnt - 1] != '\n') {
		entry->buf[cnt] = '\n';
		entry->buf[cnt + 1] = '\0';
	} else
		entry->buf[cnt] = '\0';

4120
	__buffer_unlock_commit(buffer, event);
4121

4122
	written = cnt;
4123

4124
	*fpos += written;
4125

4126
 out_unlock:
4127 4128 4129 4130
	for (i = 0; i < nr_pages; i++){
		kunmap_atomic(map_page[i]);
		put_page(pages[i]);
	}
4131
 out:
4132
	return written;
4133 4134
}

L
Li Zefan 已提交
4135
static int tracing_clock_show(struct seq_file *m, void *v)
4136
{
4137
	struct trace_array *tr = m->private;
4138 4139 4140
	int i;

	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
L
Li Zefan 已提交
4141
		seq_printf(m,
4142
			"%s%s%s%s", i ? " " : "",
4143 4144
			i == tr->clock_id ? "[" : "", trace_clocks[i].name,
			i == tr->clock_id ? "]" : "");
L
Li Zefan 已提交
4145
	seq_putc(m, '\n');
4146

L
Li Zefan 已提交
4147
	return 0;
4148 4149 4150 4151 4152
}

static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
				   size_t cnt, loff_t *fpos)
{
4153 4154
	struct seq_file *m = filp->private_data;
	struct trace_array *tr = m->private;
4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177
	char buf[64];
	const char *clockstr;
	int i;

	if (cnt >= sizeof(buf))
		return -EINVAL;

	if (copy_from_user(&buf, ubuf, cnt))
		return -EFAULT;

	buf[cnt] = 0;

	clockstr = strstrip(buf);

	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
		if (strcmp(trace_clocks[i].name, clockstr) == 0)
			break;
	}
	if (i == ARRAY_SIZE(trace_clocks))
		return -EINVAL;

	mutex_lock(&trace_types_lock);

4178 4179
	tr->clock_id = i;

4180
	ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
4181

4182 4183 4184 4185
	/*
	 * New clock may not be consistent with the previous clock.
	 * Reset the buffer so that it doesn't have incomparable timestamps.
	 */
4186 4187 4188 4189 4190 4191 4192
	tracing_reset_online_cpus(&global_trace.trace_buffer);

#ifdef CONFIG_TRACER_MAX_TRACE
	if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
		ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
	tracing_reset_online_cpus(&global_trace.max_buffer);
#endif
4193

4194 4195 4196 4197 4198 4199 4200
	mutex_unlock(&trace_types_lock);

	*fpos += cnt;

	return cnt;
}

L
Li Zefan 已提交
4201 4202 4203 4204
static int tracing_clock_open(struct inode *inode, struct file *file)
{
	if (tracing_disabled)
		return -ENODEV;
4205 4206

	return single_open(file, tracing_clock_show, inode->i_private);
L
Li Zefan 已提交
4207 4208
}

4209 4210 4211
#ifdef CONFIG_TRACER_SNAPSHOT
static int tracing_snapshot_open(struct inode *inode, struct file *file)
{
4212
	struct trace_cpu *tc = inode->i_private;
4213
	struct trace_iterator *iter;
4214
	struct seq_file *m;
4215 4216 4217 4218 4219 4220
	int ret = 0;

	if (file->f_mode & FMODE_READ) {
		iter = __tracing_open(inode, file, true);
		if (IS_ERR(iter))
			ret = PTR_ERR(iter);
4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231
	} else {
		/* Writes still need the seq_file to hold the private data */
		m = kzalloc(sizeof(*m), GFP_KERNEL);
		if (!m)
			return -ENOMEM;
		iter = kzalloc(sizeof(*iter), GFP_KERNEL);
		if (!iter) {
			kfree(m);
			return -ENOMEM;
		}
		iter->tr = tc->tr;
4232
		iter->trace_buffer = &tc->tr->max_buffer;
4233
		iter->cpu_file = tc->cpu;
4234 4235
		m->private = iter;
		file->private_data = m;
4236
	}
4237

4238 4239 4240 4241 4242 4243 4244
	return ret;
}

static ssize_t
tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
		       loff_t *ppos)
{
4245 4246 4247
	struct seq_file *m = filp->private_data;
	struct trace_iterator *iter = m->private;
	struct trace_array *tr = iter->tr;
4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260
	unsigned long val;
	int ret;

	ret = tracing_update_buffers();
	if (ret < 0)
		return ret;

	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
	if (ret)
		return ret;

	mutex_lock(&trace_types_lock);

4261
	if (tr->current_trace->use_max_tr) {
4262 4263 4264 4265 4266 4267
		ret = -EBUSY;
		goto out;
	}

	switch (val) {
	case 0:
4268 4269 4270 4271
		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
			ret = -EINVAL;
			break;
		}
4272
		if (tr->current_trace->allocated_snapshot) {
4273
			/* free spare buffer */
4274
			ring_buffer_resize(tr->max_buffer.buffer, 1,
4275
					   RING_BUFFER_ALL_CPUS);
4276 4277
			set_buffer_entries(&tr->max_buffer, 1);
			tracing_reset_online_cpus(&tr->max_buffer);
4278
			tr->current_trace->allocated_snapshot = false;
4279 4280 4281
		}
		break;
	case 1:
4282 4283 4284 4285 4286 4287 4288
/* Only allow per-cpu swap if the ring buffer supports it */
#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
			ret = -EINVAL;
			break;
		}
#endif
4289
		if (!tr->current_trace->allocated_snapshot) {
4290
			/* allocate spare buffer */
4291 4292
			ret = resize_buffer_duplicate_size(&tr->max_buffer,
					&tr->trace_buffer, RING_BUFFER_ALL_CPUS);
4293 4294
			if (ret < 0)
				break;
4295
			tr->current_trace->allocated_snapshot = true;
4296 4297 4298
		}
		local_irq_disable();
		/* Now, we're going to swap */
4299 4300 4301 4302
		if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
			update_max_tr(&global_trace, current, smp_processor_id());
		else
			update_max_tr_single(&global_trace, current, iter->cpu_file);
4303 4304 4305
		local_irq_enable();
		break;
	default:
4306 4307 4308 4309 4310 4311
		if (tr->current_trace->allocated_snapshot) {
			if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
				tracing_reset_online_cpus(&tr->max_buffer);
			else
				tracing_reset(&tr->max_buffer, iter->cpu_file);
		}
4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322
		break;
	}

	if (ret >= 0) {
		*ppos += cnt;
		ret = cnt;
	}
out:
	mutex_unlock(&trace_types_lock);
	return ret;
}
4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338

static int tracing_snapshot_release(struct inode *inode, struct file *file)
{
	struct seq_file *m = file->private_data;

	if (file->f_mode & FMODE_READ)
		return tracing_release(inode, file);

	/* If write only, the seq_file is just a stub */
	if (m)
		kfree(m->private);
	kfree(m);

	return 0;
}

4339 4340 4341
#endif /* CONFIG_TRACER_SNAPSHOT */


4342
static const struct file_operations tracing_max_lat_fops = {
I
Ingo Molnar 已提交
4343 4344 4345
	.open		= tracing_open_generic,
	.read		= tracing_max_lat_read,
	.write		= tracing_max_lat_write,
4346
	.llseek		= generic_file_llseek,
4347 4348
};

4349
static const struct file_operations set_tracer_fops = {
I
Ingo Molnar 已提交
4350 4351 4352
	.open		= tracing_open_generic,
	.read		= tracing_set_trace_read,
	.write		= tracing_set_trace_write,
4353
	.llseek		= generic_file_llseek,
4354 4355
};

4356
static const struct file_operations tracing_pipe_fops = {
I
Ingo Molnar 已提交
4357
	.open		= tracing_open_pipe,
4358
	.poll		= tracing_poll_pipe,
I
Ingo Molnar 已提交
4359
	.read		= tracing_read_pipe,
4360
	.splice_read	= tracing_splice_read_pipe,
I
Ingo Molnar 已提交
4361
	.release	= tracing_release_pipe,
4362
	.llseek		= no_llseek,
4363 4364
};

4365
static const struct file_operations tracing_entries_fops = {
4366
	.open		= tracing_open_generic,
4367 4368
	.read		= tracing_entries_read,
	.write		= tracing_entries_write,
4369
	.llseek		= generic_file_llseek,
4370 4371
};

4372 4373 4374 4375 4376 4377
static const struct file_operations tracing_total_entries_fops = {
	.open		= tracing_open_generic,
	.read		= tracing_total_entries_read,
	.llseek		= generic_file_llseek,
};

4378 4379 4380 4381 4382
static const struct file_operations tracing_free_buffer_fops = {
	.write		= tracing_free_buffer_write,
	.release	= tracing_free_buffer_release,
};

4383
static const struct file_operations tracing_mark_fops = {
4384
	.open		= tracing_open_generic,
4385
	.write		= tracing_mark_write,
4386
	.llseek		= generic_file_llseek,
4387 4388
};

4389
static const struct file_operations trace_clock_fops = {
L
Li Zefan 已提交
4390 4391 4392 4393
	.open		= tracing_clock_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
4394 4395 4396
	.write		= tracing_clock_write,
};

4397 4398 4399 4400 4401 4402
#ifdef CONFIG_TRACER_SNAPSHOT
static const struct file_operations snapshot_fops = {
	.open		= tracing_snapshot_open,
	.read		= seq_read,
	.write		= tracing_snapshot_write,
	.llseek		= tracing_seek,
4403
	.release	= tracing_snapshot_release,
4404 4405 4406
};
#endif /* CONFIG_TRACER_SNAPSHOT */

4407
struct ftrace_buffer_info {
4408
	struct trace_iterator	iter;
4409 4410 4411 4412 4413 4414
	void			*spare;
	unsigned int		read;
};

static int tracing_buffers_open(struct inode *inode, struct file *filp)
{
4415 4416
	struct trace_cpu *tc = inode->i_private;
	struct trace_array *tr = tc->tr;
4417 4418 4419 4420 4421 4422 4423 4424 4425
	struct ftrace_buffer_info *info;

	if (tracing_disabled)
		return -ENODEV;

	info = kzalloc(sizeof(*info), GFP_KERNEL);
	if (!info)
		return -ENOMEM;

4426 4427
	info->iter.tr		= tr;
	info->iter.cpu_file	= tc->cpu;
4428
	info->iter.trace	= tr->current_trace;
4429
	info->iter.trace_buffer = &tr->trace_buffer;
4430
	info->spare		= NULL;
4431
	/* Force reading ring buffer for first read */
4432
	info->read		= (unsigned int)-1;
4433 4434 4435

	filp->private_data = info;

4436
	return nonseekable_open(inode, filp);
4437 4438
}

4439 4440 4441 4442 4443 4444 4445 4446 4447
static unsigned int
tracing_buffers_poll(struct file *filp, poll_table *poll_table)
{
	struct ftrace_buffer_info *info = filp->private_data;
	struct trace_iterator *iter = &info->iter;

	return trace_poll(iter, filp, poll_table);
}

4448 4449 4450 4451 4452
static ssize_t
tracing_buffers_read(struct file *filp, char __user *ubuf,
		     size_t count, loff_t *ppos)
{
	struct ftrace_buffer_info *info = filp->private_data;
4453
	struct trace_iterator *iter = &info->iter;
4454 4455 4456
	ssize_t ret;
	size_t size;

4457 4458 4459
	if (!count)
		return 0;

4460
	if (!info->spare)
4461 4462
		info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
							  iter->cpu_file);
4463 4464 4465
	if (!info->spare)
		return -ENOMEM;

4466 4467 4468 4469
	/* Do we have previous read data to read? */
	if (info->read < PAGE_SIZE)
		goto read;

4470
 again:
4471
	trace_access_lock(iter->cpu_file);
4472
	ret = ring_buffer_read_page(iter->trace_buffer->buffer,
4473 4474
				    &info->spare,
				    count,
4475 4476
				    iter->cpu_file, 0);
	trace_access_unlock(iter->cpu_file);
4477 4478 4479 4480 4481 4482 4483 4484 4485 4486

	if (ret < 0) {
		if (trace_empty(iter)) {
			if ((filp->f_flags & O_NONBLOCK))
				return -EAGAIN;
			iter->trace->wait_pipe(iter);
			if (signal_pending(current))
				return -EINTR;
			goto again;
		}
4487
		return 0;
4488
	}
4489

4490 4491
	info->read = 0;

4492
 read:
4493 4494 4495 4496 4497
	size = PAGE_SIZE - info->read;
	if (size > count)
		size = count;

	ret = copy_to_user(ubuf, info->spare + info->read, size);
4498
	if (ret == size)
4499
		return -EFAULT;
4500 4501
	size -= ret;

4502 4503 4504 4505 4506 4507 4508 4509 4510
	*ppos += size;
	info->read += size;

	return size;
}

static int tracing_buffers_release(struct inode *inode, struct file *file)
{
	struct ftrace_buffer_info *info = file->private_data;
4511
	struct trace_iterator *iter = &info->iter;
4512

4513
	if (info->spare)
4514
		ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547
	kfree(info);

	return 0;
}

struct buffer_ref {
	struct ring_buffer	*buffer;
	void			*page;
	int			ref;
};

static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
				    struct pipe_buffer *buf)
{
	struct buffer_ref *ref = (struct buffer_ref *)buf->private;

	if (--ref->ref)
		return;

	ring_buffer_free_read_page(ref->buffer, ref->page);
	kfree(ref);
	buf->private = 0;
}

static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
				struct pipe_buffer *buf)
{
	struct buffer_ref *ref = (struct buffer_ref *)buf->private;

	ref->ref++;
}

/* Pipe buffer operations for a buffer. */
4548
static const struct pipe_buf_operations buffer_pipe_buf_ops = {
4549 4550 4551 4552 4553
	.can_merge		= 0,
	.map			= generic_pipe_buf_map,
	.unmap			= generic_pipe_buf_unmap,
	.confirm		= generic_pipe_buf_confirm,
	.release		= buffer_pipe_buf_release,
4554
	.steal			= generic_pipe_buf_steal,
4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580
	.get			= buffer_pipe_buf_get,
};

/*
 * Callback from splice_to_pipe(), if we need to release some pages
 * at the end of the spd in case we error'ed out in filling the pipe.
 */
static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
{
	struct buffer_ref *ref =
		(struct buffer_ref *)spd->partial[i].private;

	if (--ref->ref)
		return;

	ring_buffer_free_read_page(ref->buffer, ref->page);
	kfree(ref);
	spd->partial[i].private = 0;
}

static ssize_t
tracing_buffers_splice_read(struct file *file, loff_t *ppos,
			    struct pipe_inode_info *pipe, size_t len,
			    unsigned int flags)
{
	struct ftrace_buffer_info *info = file->private_data;
4581
	struct trace_iterator *iter = &info->iter;
4582 4583
	struct partial_page partial_def[PIPE_DEF_BUFFERS];
	struct page *pages_def[PIPE_DEF_BUFFERS];
4584
	struct splice_pipe_desc spd = {
4585 4586
		.pages		= pages_def,
		.partial	= partial_def,
4587
		.nr_pages_max	= PIPE_DEF_BUFFERS,
4588 4589 4590 4591 4592
		.flags		= flags,
		.ops		= &buffer_pipe_buf_ops,
		.spd_release	= buffer_spd_release,
	};
	struct buffer_ref *ref;
4593
	int entries, size, i;
4594 4595
	size_t ret;

4596 4597 4598
	if (splice_grow_spd(pipe, &spd))
		return -ENOMEM;

4599
	if (*ppos & (PAGE_SIZE - 1)) {
4600 4601
		ret = -EINVAL;
		goto out;
4602 4603 4604
	}

	if (len & (PAGE_SIZE - 1)) {
4605 4606 4607 4608
		if (len < PAGE_SIZE) {
			ret = -EINVAL;
			goto out;
		}
4609 4610 4611
		len &= PAGE_MASK;
	}

4612 4613
 again:
	trace_access_lock(iter->cpu_file);
4614
	entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
4615

4616
	for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
4617 4618 4619 4620 4621 4622 4623
		struct page *page;
		int r;

		ref = kzalloc(sizeof(*ref), GFP_KERNEL);
		if (!ref)
			break;

4624
		ref->ref = 1;
4625
		ref->buffer = iter->trace_buffer->buffer;
4626
		ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
4627 4628 4629 4630 4631 4632
		if (!ref->page) {
			kfree(ref);
			break;
		}

		r = ring_buffer_read_page(ref->buffer, &ref->page,
4633
					  len, iter->cpu_file, 1);
4634
		if (r < 0) {
4635
			ring_buffer_free_read_page(ref->buffer, ref->page);
4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654
			kfree(ref);
			break;
		}

		/*
		 * zero out any left over data, this is going to
		 * user land.
		 */
		size = ring_buffer_page_len(ref->page);
		if (size < PAGE_SIZE)
			memset(ref->page + size, 0, PAGE_SIZE - size);

		page = virt_to_page(ref->page);

		spd.pages[i] = page;
		spd.partial[i].len = PAGE_SIZE;
		spd.partial[i].offset = 0;
		spd.partial[i].private = (unsigned long)ref;
		spd.nr_pages++;
4655
		*ppos += PAGE_SIZE;
4656

4657
		entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
4658 4659
	}

4660
	trace_access_unlock(iter->cpu_file);
4661 4662 4663 4664
	spd.nr_pages = i;

	/* did we read anything? */
	if (!spd.nr_pages) {
4665
		if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
4666
			ret = -EAGAIN;
4667 4668
			goto out;
		}
4669
		iter->trace->wait_pipe(iter);
4670 4671 4672 4673 4674
		if (signal_pending(current)) {
			ret = -EINTR;
			goto out;
		}
		goto again;
4675 4676 4677
	}

	ret = splice_to_pipe(pipe, &spd);
4678
	splice_shrink_spd(&spd);
4679
out:
4680 4681 4682 4683 4684 4685
	return ret;
}

static const struct file_operations tracing_buffers_fops = {
	.open		= tracing_buffers_open,
	.read		= tracing_buffers_read,
4686
	.poll		= tracing_buffers_poll,
4687 4688 4689 4690 4691
	.release	= tracing_buffers_release,
	.splice_read	= tracing_buffers_splice_read,
	.llseek		= no_llseek,
};

4692 4693 4694 4695
static ssize_t
tracing_stats_read(struct file *filp, char __user *ubuf,
		   size_t count, loff_t *ppos)
{
4696 4697
	struct trace_cpu *tc = filp->private_data;
	struct trace_array *tr = tc->tr;
4698
	struct trace_buffer *trace_buf = &tr->trace_buffer;
4699 4700
	struct trace_seq *s;
	unsigned long cnt;
4701 4702
	unsigned long long t;
	unsigned long usec_rem;
4703
	int cpu = tc->cpu;
4704

4705
	s = kmalloc(sizeof(*s), GFP_KERNEL);
4706
	if (!s)
4707
		return -ENOMEM;
4708 4709 4710

	trace_seq_init(s);

4711
	cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
4712 4713
	trace_seq_printf(s, "entries: %ld\n", cnt);

4714
	cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
4715 4716
	trace_seq_printf(s, "overrun: %ld\n", cnt);

4717
	cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
4718 4719
	trace_seq_printf(s, "commit overrun: %ld\n", cnt);

4720
	cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
4721 4722
	trace_seq_printf(s, "bytes: %ld\n", cnt);

4723 4724
	if (trace_clocks[trace_clock_id].in_ns) {
		/* local or global for trace_clock */
4725
		t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
4726 4727 4728 4729
		usec_rem = do_div(t, USEC_PER_SEC);
		trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
								t, usec_rem);

4730
		t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
4731 4732 4733 4734 4735
		usec_rem = do_div(t, USEC_PER_SEC);
		trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
	} else {
		/* counter or tsc mode for trace_clock */
		trace_seq_printf(s, "oldest event ts: %llu\n",
4736
				ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
4737

4738
		trace_seq_printf(s, "now ts: %llu\n",
4739
				ring_buffer_time_stamp(trace_buf->buffer, cpu));
4740
	}
4741

4742
	cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
4743 4744
	trace_seq_printf(s, "dropped events: %ld\n", cnt);

4745
	cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
4746 4747
	trace_seq_printf(s, "read events: %ld\n", cnt);

4748 4749 4750 4751 4752 4753 4754 4755 4756 4757
	count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);

	kfree(s);

	return count;
}

static const struct file_operations tracing_stats_fops = {
	.open		= tracing_open_generic,
	.read		= tracing_stats_read,
4758
	.llseek		= generic_file_llseek,
4759 4760
};

4761 4762
#ifdef CONFIG_DYNAMIC_FTRACE

S
Steven Rostedt 已提交
4763 4764 4765 4766 4767
int __weak ftrace_arch_read_dyn_info(char *buf, int size)
{
	return 0;
}

4768
static ssize_t
S
Steven Rostedt 已提交
4769
tracing_read_dyn_info(struct file *filp, char __user *ubuf,
4770 4771
		  size_t cnt, loff_t *ppos)
{
S
Steven Rostedt 已提交
4772 4773
	static char ftrace_dyn_info_buffer[1024];
	static DEFINE_MUTEX(dyn_info_mutex);
4774
	unsigned long *p = filp->private_data;
S
Steven Rostedt 已提交
4775
	char *buf = ftrace_dyn_info_buffer;
S
Steven Rostedt 已提交
4776
	int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
4777 4778
	int r;

S
Steven Rostedt 已提交
4779 4780
	mutex_lock(&dyn_info_mutex);
	r = sprintf(buf, "%ld ", *p);
I
Ingo Molnar 已提交
4781

S
Steven Rostedt 已提交
4782
	r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
S
Steven Rostedt 已提交
4783 4784 4785 4786 4787 4788 4789
	buf[r++] = '\n';

	r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);

	mutex_unlock(&dyn_info_mutex);

	return r;
4790 4791
}

4792
static const struct file_operations tracing_dyn_info_fops = {
I
Ingo Molnar 已提交
4793
	.open		= tracing_open_generic,
S
Steven Rostedt 已提交
4794
	.read		= tracing_read_dyn_info,
4795
	.llseek		= generic_file_llseek,
4796 4797 4798
};
#endif

4799
struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
4800 4801 4802
{
	static int once;

4803 4804
	if (tr->dir)
		return tr->dir;
4805

4806 4807 4808
	if (!debugfs_initialized())
		return NULL;

4809 4810
	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
		tr->dir = debugfs_create_dir("tracing", NULL);
4811

4812
	if (!tr->dir && !once) {
4813 4814 4815 4816 4817
		once = 1;
		pr_warning("Could not create debugfs directory 'tracing'\n");
		return NULL;
	}

4818
	return tr->dir;
4819 4820
}

4821 4822 4823 4824
struct dentry *tracing_init_dentry(void)
{
	return tracing_init_dentry_tr(&global_trace);
}
4825

4826
static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
4827 4828 4829
{
	struct dentry *d_tracer;

4830 4831
	if (tr->percpu_dir)
		return tr->percpu_dir;
4832

4833
	d_tracer = tracing_init_dentry_tr(tr);
4834 4835 4836
	if (!d_tracer)
		return NULL;

4837
	tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
4838

4839 4840
	WARN_ONCE(!tr->percpu_dir,
		  "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
4841

4842
	return tr->percpu_dir;
4843 4844
}

4845 4846
static void
tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
4847
{
4848
	struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
4849
	struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
4850
	struct dentry *d_cpu;
4851
	char cpu_dir[30]; /* 30 characters should be more than enough */
4852

4853 4854 4855
	if (!d_percpu)
		return;

4856
	snprintf(cpu_dir, 30, "cpu%ld", cpu);
4857 4858 4859 4860 4861
	d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
	if (!d_cpu) {
		pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
		return;
	}
4862

4863
	/* per cpu trace_pipe */
4864
	trace_create_file("trace_pipe", 0444, d_cpu,
4865
			(void *)&data->trace_cpu, &tracing_pipe_fops);
4866 4867

	/* per cpu trace */
4868
	trace_create_file("trace", 0644, d_cpu,
4869
			(void *)&data->trace_cpu, &tracing_fops);
4870

4871
	trace_create_file("trace_pipe_raw", 0444, d_cpu,
4872
			(void *)&data->trace_cpu, &tracing_buffers_fops);
4873

4874
	trace_create_file("stats", 0444, d_cpu,
4875
			(void *)&data->trace_cpu, &tracing_stats_fops);
4876 4877

	trace_create_file("buffer_size_kb", 0444, d_cpu,
4878
			(void *)&data->trace_cpu, &tracing_entries_fops);
4879 4880 4881 4882 4883

#ifdef CONFIG_TRACER_SNAPSHOT
	trace_create_file("snapshot", 0644, d_cpu,
			  (void *)&data->trace_cpu, &snapshot_fops);
#endif
4884 4885
}

S
Steven Rostedt 已提交
4886 4887 4888 4889 4890
#ifdef CONFIG_FTRACE_SELFTEST
/* Let selftest have access to static functions in this file */
#include "trace_selftest.c"
#endif

4891 4892 4893
struct trace_option_dentry {
	struct tracer_opt		*opt;
	struct tracer_flags		*flags;
4894
	struct trace_array		*tr;
4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920
	struct dentry			*entry;
};

static ssize_t
trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
			loff_t *ppos)
{
	struct trace_option_dentry *topt = filp->private_data;
	char *buf;

	if (topt->flags->val & topt->opt->bit)
		buf = "1\n";
	else
		buf = "0\n";

	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
}

static ssize_t
trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
			 loff_t *ppos)
{
	struct trace_option_dentry *topt = filp->private_data;
	unsigned long val;
	int ret;

4921 4922
	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
	if (ret)
4923 4924
		return ret;

L
Li Zefan 已提交
4925 4926
	if (val != 0 && val != 1)
		return -EINVAL;
4927

L
Li Zefan 已提交
4928
	if (!!(topt->flags->val & topt->opt->bit) != val) {
4929
		mutex_lock(&trace_types_lock);
4930
		ret = __set_tracer_option(topt->tr->current_trace, topt->flags,
4931
					  topt->opt, !val);
4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946
		mutex_unlock(&trace_types_lock);
		if (ret)
			return ret;
	}

	*ppos += cnt;

	return cnt;
}


static const struct file_operations trace_options_fops = {
	.open = tracing_open_generic,
	.read = trace_options_read,
	.write = trace_options_write,
4947
	.llseek	= generic_file_llseek,
4948 4949
};

4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968
static ssize_t
trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
			loff_t *ppos)
{
	long index = (long)filp->private_data;
	char *buf;

	if (trace_flags & (1 << index))
		buf = "1\n";
	else
		buf = "0\n";

	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
}

static ssize_t
trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
			 loff_t *ppos)
{
4969
	struct trace_array *tr = &global_trace;
4970 4971 4972 4973
	long index = (long)filp->private_data;
	unsigned long val;
	int ret;

4974 4975
	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
	if (ret)
4976 4977
		return ret;

4978
	if (val != 0 && val != 1)
4979
		return -EINVAL;
4980 4981

	mutex_lock(&trace_types_lock);
4982
	ret = set_tracer_flag(tr, 1 << index, val);
4983
	mutex_unlock(&trace_types_lock);
4984

4985 4986 4987
	if (ret < 0)
		return ret;

4988 4989 4990 4991 4992 4993 4994 4995 4996
	*ppos += cnt;

	return cnt;
}

static const struct file_operations trace_options_core_fops = {
	.open = tracing_open_generic,
	.read = trace_options_core_read,
	.write = trace_options_core_write,
4997
	.llseek = generic_file_llseek,
4998 4999
};

5000
struct dentry *trace_create_file(const char *name,
A
Al Viro 已提交
5001
				 umode_t mode,
5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015
				 struct dentry *parent,
				 void *data,
				 const struct file_operations *fops)
{
	struct dentry *ret;

	ret = debugfs_create_file(name, mode, parent, data, fops);
	if (!ret)
		pr_warning("Could not create debugfs '%s' entry\n", name);

	return ret;
}


5016
static struct dentry *trace_options_init_dentry(struct trace_array *tr)
5017 5018 5019
{
	struct dentry *d_tracer;

5020 5021
	if (tr->options)
		return tr->options;
5022

5023
	d_tracer = tracing_init_dentry_tr(tr);
5024 5025 5026
	if (!d_tracer)
		return NULL;

5027 5028
	tr->options = debugfs_create_dir("options", d_tracer);
	if (!tr->options) {
5029 5030 5031 5032
		pr_warning("Could not create debugfs directory 'options'\n");
		return NULL;
	}

5033
	return tr->options;
5034 5035
}

5036
static void
5037 5038
create_trace_option_file(struct trace_array *tr,
			 struct trace_option_dentry *topt,
5039 5040 5041 5042 5043
			 struct tracer_flags *flags,
			 struct tracer_opt *opt)
{
	struct dentry *t_options;

5044
	t_options = trace_options_init_dentry(tr);
5045 5046 5047 5048 5049
	if (!t_options)
		return;

	topt->flags = flags;
	topt->opt = opt;
5050
	topt->tr = tr;
5051

5052
	topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
5053 5054 5055 5056 5057
				    &trace_options_fops);

}

static struct trace_option_dentry *
5058
create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077
{
	struct trace_option_dentry *topts;
	struct tracer_flags *flags;
	struct tracer_opt *opts;
	int cnt;

	if (!tracer)
		return NULL;

	flags = tracer->flags;

	if (!flags || !flags->opts)
		return NULL;

	opts = flags->opts;

	for (cnt = 0; opts[cnt].name; cnt++)
		;

5078
	topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
5079 5080 5081 5082
	if (!topts)
		return NULL;

	for (cnt = 0; opts[cnt].name; cnt++)
5083
		create_trace_option_file(tr, &topts[cnt], flags,
5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104
					 &opts[cnt]);

	return topts;
}

static void
destroy_trace_option_files(struct trace_option_dentry *topts)
{
	int cnt;

	if (!topts)
		return;

	for (cnt = 0; topts[cnt].opt; cnt++) {
		if (topts[cnt].entry)
			debugfs_remove(topts[cnt].entry);
	}

	kfree(topts);
}

5105
static struct dentry *
5106 5107
create_trace_option_core_file(struct trace_array *tr,
			      const char *option, long index)
5108 5109 5110
{
	struct dentry *t_options;

5111
	t_options = trace_options_init_dentry(tr);
5112 5113 5114
	if (!t_options)
		return NULL;

5115
	return trace_create_file(option, 0644, t_options, (void *)index,
5116 5117 5118
				    &trace_options_core_fops);
}

5119
static __init void create_trace_options_dir(struct trace_array *tr)
5120 5121 5122 5123
{
	struct dentry *t_options;
	int i;

5124
	t_options = trace_options_init_dentry(tr);
5125 5126 5127
	if (!t_options)
		return;

5128
	for (i = 0; trace_options[i]; i++)
5129
		create_trace_option_core_file(tr, trace_options[i], i);
5130 5131
}

5132 5133 5134 5135
static ssize_t
rb_simple_read(struct file *filp, char __user *ubuf,
	       size_t cnt, loff_t *ppos)
{
5136
	struct trace_array *tr = filp->private_data;
5137
	struct ring_buffer *buffer = tr->trace_buffer.buffer;
5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154
	char buf[64];
	int r;

	if (buffer)
		r = ring_buffer_record_is_on(buffer);
	else
		r = 0;

	r = sprintf(buf, "%d\n", r);

	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}

static ssize_t
rb_simple_write(struct file *filp, const char __user *ubuf,
		size_t cnt, loff_t *ppos)
{
5155
	struct trace_array *tr = filp->private_data;
5156
	struct ring_buffer *buffer = tr->trace_buffer.buffer;
5157 5158 5159 5160 5161 5162 5163 5164
	unsigned long val;
	int ret;

	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
	if (ret)
		return ret;

	if (buffer) {
5165 5166
		mutex_lock(&trace_types_lock);
		if (val) {
5167
			ring_buffer_record_on(buffer);
5168 5169
			if (tr->current_trace->start)
				tr->current_trace->start(tr);
5170
		} else {
5171
			ring_buffer_record_off(buffer);
5172 5173
			if (tr->current_trace->stop)
				tr->current_trace->stop(tr);
5174 5175
		}
		mutex_unlock(&trace_types_lock);
5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189
	}

	(*ppos)++;

	return cnt;
}

static const struct file_operations rb_simple_fops = {
	.open		= tracing_open_generic,
	.read		= rb_simple_read,
	.write		= rb_simple_write,
	.llseek		= default_llseek,
};

5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227
struct dentry *trace_instance_dir;

static void
init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);

static int new_instance_create(const char *name)
{
	enum ring_buffer_flags rb_flags;
	struct trace_array *tr;
	int ret;
	int i;

	mutex_lock(&trace_types_lock);

	ret = -EEXIST;
	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
		if (tr->name && strcmp(tr->name, name) == 0)
			goto out_unlock;
	}

	ret = -ENOMEM;
	tr = kzalloc(sizeof(*tr), GFP_KERNEL);
	if (!tr)
		goto out_unlock;

	tr->name = kstrdup(name, GFP_KERNEL);
	if (!tr->name)
		goto out_free_tr;

	raw_spin_lock_init(&tr->start_lock);

	tr->current_trace = &nop_trace;

	INIT_LIST_HEAD(&tr->systems);
	INIT_LIST_HEAD(&tr->events);

	rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;

5228 5229
	tr->trace_buffer.buffer = ring_buffer_alloc(trace_buf_size, rb_flags);
	if (!tr->trace_buffer.buffer)
5230 5231
		goto out_free_tr;

5232 5233
	tr->trace_buffer.data = alloc_percpu(struct trace_array_cpu);
	if (!tr->trace_buffer.data)
5234 5235 5236
		goto out_free_tr;

	for_each_tracing_cpu(i) {
5237 5238 5239
		memset(per_cpu_ptr(tr->trace_buffer.data, i), 0, sizeof(struct trace_array_cpu));
		per_cpu_ptr(tr->trace_buffer.data, i)->trace_cpu.cpu = i;
		per_cpu_ptr(tr->trace_buffer.data, i)->trace_cpu.tr = tr;
5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262
	}

	/* Holder for file callbacks */
	tr->trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
	tr->trace_cpu.tr = tr;

	tr->dir = debugfs_create_dir(name, trace_instance_dir);
	if (!tr->dir)
		goto out_free_tr;

	ret = event_trace_add_tracer(tr->dir, tr);
	if (ret)
		goto out_free_tr;

	init_tracer_debugfs(tr, tr->dir);

	list_add(&tr->list, &ftrace_trace_arrays);

	mutex_unlock(&trace_types_lock);

	return 0;

 out_free_tr:
5263 5264
	if (tr->trace_buffer.buffer)
		ring_buffer_free(tr->trace_buffer.buffer);
5265 5266 5267 5268 5269 5270 5271 5272 5273 5274
	kfree(tr->name);
	kfree(tr);

 out_unlock:
	mutex_unlock(&trace_types_lock);

	return ret;

}

5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296
static int instance_delete(const char *name)
{
	struct trace_array *tr;
	int found = 0;
	int ret;

	mutex_lock(&trace_types_lock);

	ret = -ENODEV;
	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
		if (tr->name && strcmp(tr->name, name) == 0) {
			found = 1;
			break;
		}
	}
	if (!found)
		goto out_unlock;

	list_del(&tr->list);

	event_trace_del_tracer(tr);
	debugfs_remove_recursive(tr->dir);
5297 5298
	free_percpu(tr->trace_buffer.data);
	ring_buffer_free(tr->trace_buffer.buffer);
5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310

	kfree(tr->name);
	kfree(tr);

	ret = 0;

 out_unlock:
	mutex_unlock(&trace_types_lock);

	return ret;
}

5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337
static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
{
	struct dentry *parent;
	int ret;

	/* Paranoid: Make sure the parent is the "instances" directory */
	parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
	if (WARN_ON_ONCE(parent != trace_instance_dir))
		return -ENOENT;

	/*
	 * The inode mutex is locked, but debugfs_create_dir() will also
	 * take the mutex. As the instances directory can not be destroyed
	 * or changed in any other way, it is safe to unlock it, and
	 * let the dentry try. If two users try to make the same dir at
	 * the same time, then the new_instance_create() will determine the
	 * winner.
	 */
	mutex_unlock(&inode->i_mutex);

	ret = new_instance_create(dentry->d_iname);

	mutex_lock(&inode->i_mutex);

	return ret;
}

5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368
static int instance_rmdir(struct inode *inode, struct dentry *dentry)
{
	struct dentry *parent;
	int ret;

	/* Paranoid: Make sure the parent is the "instances" directory */
	parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
	if (WARN_ON_ONCE(parent != trace_instance_dir))
		return -ENOENT;

	/* The caller did a dget() on dentry */
	mutex_unlock(&dentry->d_inode->i_mutex);

	/*
	 * The inode mutex is locked, but debugfs_create_dir() will also
	 * take the mutex. As the instances directory can not be destroyed
	 * or changed in any other way, it is safe to unlock it, and
	 * let the dentry try. If two users try to make the same dir at
	 * the same time, then the instance_delete() will determine the
	 * winner.
	 */
	mutex_unlock(&inode->i_mutex);

	ret = instance_delete(dentry->d_iname);

	mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
	mutex_lock(&dentry->d_inode->i_mutex);

	return ret;
}

5369 5370 5371
static const struct inode_operations instance_dir_inode_operations = {
	.lookup		= simple_lookup,
	.mkdir		= instance_mkdir,
5372
	.rmdir		= instance_rmdir,
5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384
};

static __init void create_trace_instances(struct dentry *d_tracer)
{
	trace_instance_dir = debugfs_create_dir("instances", d_tracer);
	if (WARN_ON(!trace_instance_dir))
		return;

	/* Hijack the dir inode operations, to allow mkdir */
	trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
}

5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416
static void
init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
{

	trace_create_file("trace_options", 0644, d_tracer,
			  tr, &tracing_iter_fops);

	trace_create_file("trace", 0644, d_tracer,
			(void *)&tr->trace_cpu, &tracing_fops);

	trace_create_file("trace_pipe", 0444, d_tracer,
			(void *)&tr->trace_cpu, &tracing_pipe_fops);

	trace_create_file("buffer_size_kb", 0644, d_tracer,
			(void *)&tr->trace_cpu, &tracing_entries_fops);

	trace_create_file("buffer_total_size_kb", 0444, d_tracer,
			  tr, &tracing_total_entries_fops);

	trace_create_file("free_buffer", 0644, d_tracer,
			  tr, &tracing_free_buffer_fops);

	trace_create_file("trace_marker", 0220, d_tracer,
			  tr, &tracing_mark_fops);

	trace_create_file("trace_clock", 0644, d_tracer, tr,
			  &trace_clock_fops);

	trace_create_file("tracing_on", 0644, d_tracer,
			    tr, &rb_simple_fops);
}

5417
static __init int tracer_init_debugfs(void)
5418 5419
{
	struct dentry *d_tracer;
5420
	int cpu;
5421

5422 5423
	trace_access_lock_init();

5424 5425
	d_tracer = tracing_init_dentry();

5426
	init_tracer_debugfs(&global_trace, d_tracer);
5427

5428
	trace_create_file("tracing_cpumask", 0644, d_tracer,
5429
			&global_trace, &tracing_cpumask_fops);
5430

5431 5432 5433
	trace_create_file("available_tracers", 0444, d_tracer,
			&global_trace, &show_traces_fops);

5434
	trace_create_file("current_tracer", 0644, d_tracer,
5435 5436
			&global_trace, &set_tracer_fops);

5437
#ifdef CONFIG_TRACER_MAX_TRACE
5438 5439
	trace_create_file("tracing_max_latency", 0644, d_tracer,
			&tracing_max_latency, &tracing_max_lat_fops);
5440
#endif
5441 5442 5443

	trace_create_file("tracing_thresh", 0644, d_tracer,
			&tracing_thresh, &tracing_max_lat_fops);
5444

5445
	trace_create_file("README", 0444, d_tracer,
5446 5447
			NULL, &tracing_readme_fops);

5448 5449
	trace_create_file("saved_cmdlines", 0444, d_tracer,
			NULL, &tracing_saved_cmdlines_fops);
5450

5451
#ifdef CONFIG_DYNAMIC_FTRACE
5452 5453
	trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
			&ftrace_update_tot_cnt, &tracing_dyn_info_fops);
5454
#endif
5455

5456 5457
#ifdef CONFIG_TRACER_SNAPSHOT
	trace_create_file("snapshot", 0644, d_tracer,
5458
			  (void *)&global_trace.trace_cpu, &snapshot_fops);
5459 5460
#endif

5461 5462
	create_trace_instances(d_tracer);

5463
	create_trace_options_dir(&global_trace);
5464

5465
	for_each_tracing_cpu(cpu)
5466
		tracing_init_debugfs_percpu(&global_trace, cpu);
5467

5468
	return 0;
5469 5470
}

5471 5472 5473
static int trace_panic_handler(struct notifier_block *this,
			       unsigned long event, void *unused)
{
5474
	if (ftrace_dump_on_oops)
5475
		ftrace_dump(ftrace_dump_on_oops);
5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490
	return NOTIFY_OK;
}

static struct notifier_block trace_panic_notifier = {
	.notifier_call  = trace_panic_handler,
	.next           = NULL,
	.priority       = 150   /* priority: INT_MAX >= x >= 0 */
};

static int trace_die_handler(struct notifier_block *self,
			     unsigned long val,
			     void *data)
{
	switch (val) {
	case DIE_OOPS:
5491
		if (ftrace_dump_on_oops)
5492
			ftrace_dump(ftrace_dump_on_oops);
5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515
		break;
	default:
		break;
	}
	return NOTIFY_OK;
}

static struct notifier_block trace_die_notifier = {
	.notifier_call = trace_die_handler,
	.priority = 200
};

/*
 * printk is set to max of 1024, we really don't need it that big.
 * Nothing should be printing 1000 characters anyway.
 */
#define TRACE_MAX_PRINT		1000

/*
 * Define here KERN_TRACE so that we have one place to modify
 * it if we decide to change what log level the ftrace dump
 * should be at.
 */
5516
#define KERN_TRACE		KERN_EMERG
5517

5518
void
5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529
trace_printk_seq(struct trace_seq *s)
{
	/* Probably should print a warning here. */
	if (s->len >= 1000)
		s->len = 1000;

	/* should be zero ended, but we are paranoid. */
	s->buffer[s->len] = 0;

	printk(KERN_TRACE "%s", s->buffer);

5530
	trace_seq_init(s);
5531 5532
}

5533 5534 5535
void trace_init_global_iter(struct trace_iterator *iter)
{
	iter->tr = &global_trace;
5536
	iter->trace = iter->tr->current_trace;
5537
	iter->cpu_file = RING_BUFFER_ALL_CPUS;
5538
	iter->trace_buffer = &global_trace.trace_buffer;
5539 5540
}

5541 5542
static void
__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
5543
{
5544
	static arch_spinlock_t ftrace_dump_lock =
5545
		(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
5546 5547
	/* use static because iter can be a bit big for the stack */
	static struct trace_iterator iter;
5548
	unsigned int old_userobj;
5549
	static int dump_ran;
5550 5551
	unsigned long flags;
	int cnt = 0, cpu;
5552 5553

	/* only one dump */
5554
	local_irq_save(flags);
5555
	arch_spin_lock(&ftrace_dump_lock);
5556 5557 5558 5559 5560
	if (dump_ran)
		goto out;

	dump_ran = 1;

5561
	tracing_off();
5562

5563 5564 5565 5566 5567 5568
	/* Did function tracer already get disabled? */
	if (ftrace_is_dead()) {
		printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
		printk("#          MAY BE MISSING FUNCTION EVENTS\n");
	}

5569 5570
	if (disable_tracing)
		ftrace_kill();
5571

5572
	/* Simulate the iterator */
5573 5574
	trace_init_global_iter(&iter);

5575
	for_each_tracing_cpu(cpu) {
5576
		atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
5577 5578
	}

5579 5580
	old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;

5581 5582 5583
	/* don't look at user memory in panic mode */
	trace_flags &= ~TRACE_ITER_SYM_USEROBJ;

5584 5585
	switch (oops_dump_mode) {
	case DUMP_ALL:
5586
		iter.cpu_file = RING_BUFFER_ALL_CPUS;
5587 5588 5589 5590 5591 5592 5593 5594
		break;
	case DUMP_ORIG:
		iter.cpu_file = raw_smp_processor_id();
		break;
	case DUMP_NONE:
		goto out_enable;
	default:
		printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
5595
		iter.cpu_file = RING_BUFFER_ALL_CPUS;
5596 5597 5598
	}

	printk(KERN_TRACE "Dumping ftrace buffer:\n");
5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620

	/*
	 * We need to stop all tracing on all CPUS to read the
	 * the next buffer. This is a bit expensive, but is
	 * not done often. We fill all what we can read,
	 * and then release the locks again.
	 */

	while (!trace_empty(&iter)) {

		if (!cnt)
			printk(KERN_TRACE "---------------------------------\n");

		cnt++;

		/* reset all but tr, trace, and overruns */
		memset(&iter.seq, 0,
		       sizeof(struct trace_iterator) -
		       offsetof(struct trace_iterator, seq));
		iter.iter_flags |= TRACE_FILE_LAT_FMT;
		iter.pos = -1;

5621
		if (trace_find_next_entry_inc(&iter) != NULL) {
5622 5623 5624 5625 5626
			int ret;

			ret = print_trace_line(&iter);
			if (ret != TRACE_TYPE_NO_CONSUME)
				trace_consume(&iter);
5627
		}
5628
		touch_nmi_watchdog();
5629 5630 5631 5632 5633 5634 5635 5636 5637

		trace_printk_seq(&iter.seq);
	}

	if (!cnt)
		printk(KERN_TRACE "   (ftrace buffer empty)\n");
	else
		printk(KERN_TRACE "---------------------------------\n");

5638
 out_enable:
5639 5640 5641 5642 5643
	/* Re-enable tracing if requested */
	if (!disable_tracing) {
		trace_flags |= old_userobj;

		for_each_tracing_cpu(cpu) {
5644
			atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
5645 5646 5647 5648
		}
		tracing_on();
	}

5649
 out:
5650
	arch_spin_unlock(&ftrace_dump_lock);
5651
	local_irq_restore(flags);
5652 5653
}

5654
/* By default: disable tracing after the dump */
5655
void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
5656
{
5657
	__ftrace_dump(true, oops_dump_mode);
5658
}
5659
EXPORT_SYMBOL_GPL(ftrace_dump);
5660

5661
__init static int tracer_alloc_buffers(void)
5662
{
5663
	int ring_buf_size;
5664
	enum ring_buffer_flags rb_flags;
5665
	int i;
5666
	int ret = -ENOMEM;
5667

5668

5669 5670 5671 5672 5673
	if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
		goto out;

	if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
		goto out_free_buffer_mask;
5674

5675 5676
	/* Only allocate trace_printk buffers if a trace_printk exists */
	if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
5677
		/* Must be called before global_trace.buffer is allocated */
5678 5679
		trace_printk_init_buffers();

5680 5681 5682 5683 5684 5685
	/* To save memory, keep the ring buffer size to its minimum */
	if (ring_buffer_expanded)
		ring_buf_size = trace_buf_size;
	else
		ring_buf_size = 1;

5686 5687
	rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;

5688 5689 5690
	cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
	cpumask_copy(tracing_cpumask, cpu_all_mask);

5691 5692
	raw_spin_lock_init(&global_trace.start_lock);

5693
	/* TODO: make the number of buffers hot pluggable with CPUS */
5694 5695
	global_trace.trace_buffer.buffer = ring_buffer_alloc(ring_buf_size, rb_flags);
	if (!global_trace.trace_buffer.buffer) {
5696 5697
		printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
		WARN_ON(1);
5698
		goto out_free_cpumask;
5699
	}
5700

5701
	global_trace.trace_buffer.data = alloc_percpu(struct trace_array_cpu);
5702

5703
	if (!global_trace.trace_buffer.data) {
5704 5705 5706 5707 5708 5709
		printk(KERN_ERR "tracer: failed to allocate percpu memory!\n");
		WARN_ON(1);
		goto out_free_cpumask;
	}

	for_each_tracing_cpu(i) {
5710 5711 5712 5713
		memset(per_cpu_ptr(global_trace.trace_buffer.data, i), 0,
		       sizeof(struct trace_array_cpu));
		per_cpu_ptr(global_trace.trace_buffer.data, i)->trace_cpu.cpu = i;
		per_cpu_ptr(global_trace.trace_buffer.data, i)->trace_cpu.tr = &global_trace;
5714 5715
	}

5716 5717
	if (global_trace.buffer_disabled)
		tracing_off();
5718 5719

#ifdef CONFIG_TRACER_MAX_TRACE
5720 5721
	global_trace.max_buffer.data = alloc_percpu(struct trace_array_cpu);
	if (!global_trace.max_buffer.data) {
5722 5723 5724 5725
		printk(KERN_ERR "tracer: failed to allocate percpu memory!\n");
		WARN_ON(1);
		goto out_free_cpumask;
	}
5726 5727
	global_trace.max_buffer.buffer = ring_buffer_alloc(1, rb_flags);
	if (!global_trace.max_buffer.buffer) {
5728 5729
		printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
		WARN_ON(1);
5730
		ring_buffer_free(global_trace.trace_buffer.buffer);
5731
		goto out_free_cpumask;
5732
	}
5733 5734

	for_each_tracing_cpu(i) {
5735 5736 5737 5738
		memset(per_cpu_ptr(global_trace.max_buffer.data, i), 0,
		       sizeof(struct trace_array_cpu));
		per_cpu_ptr(global_trace.max_buffer.data, i)->trace_cpu.cpu = i;
		per_cpu_ptr(global_trace.max_buffer.data, i)->trace_cpu.tr = &global_trace;
5739
	}
5740
#endif
5741

5742
	/* Allocate the first page for all buffers */
5743 5744
	set_buffer_entries(&global_trace.trace_buffer,
			   ring_buffer_size(global_trace.trace_buffer.buffer, 0));
5745
#ifdef CONFIG_TRACER_MAX_TRACE
5746
	set_buffer_entries(&global_trace.max_buffer, 1);
5747
#endif
5748 5749 5750

	trace_init_cmdlines();

5751
	register_tracer(&nop_trace);
5752

5753 5754
	global_trace.current_trace = &nop_trace;

S
Steven Rostedt 已提交
5755 5756
	/* All seems OK, enable tracing */
	tracing_disabled = 0;
5757

5758 5759 5760 5761
	atomic_notifier_chain_register(&panic_notifier_list,
				       &trace_panic_notifier);

	register_die_notifier(&trace_die_notifier);
5762

5763 5764
	global_trace.flags = TRACE_ARRAY_FL_GLOBAL;

5765 5766 5767 5768
	/* Holder for file callbacks */
	global_trace.trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
	global_trace.trace_cpu.tr = &global_trace;

5769 5770 5771 5772
	INIT_LIST_HEAD(&global_trace.systems);
	INIT_LIST_HEAD(&global_trace.events);
	list_add(&global_trace.list, &ftrace_trace_arrays);

5773 5774 5775 5776
	while (trace_boot_options) {
		char *option;

		option = strsep(&trace_boot_options, ",");
5777
		trace_set_options(&global_trace, option);
5778 5779
	}

5780
	return 0;
5781

5782
out_free_cpumask:
5783 5784 5785 5786
	free_percpu(global_trace.trace_buffer.data);
#ifdef CONFIG_TRACER_MAX_TRACE
	free_percpu(global_trace.max_buffer.data);
#endif
5787 5788 5789 5790 5791
	free_cpumask_var(tracing_cpumask);
out_free_buffer_mask:
	free_cpumask_var(tracing_buffer_mask);
out:
	return ret;
5792
}
5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812

__init static int clear_boot_tracer(void)
{
	/*
	 * The default tracer at boot buffer is an init section.
	 * This function is called in lateinit. If we did not
	 * find the boot tracer, then clear it out, to prevent
	 * later registration from accessing the buffer that is
	 * about to be freed.
	 */
	if (!default_bootup_tracer)
		return 0;

	printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
	       default_bootup_tracer);
	default_bootup_tracer = NULL;

	return 0;
}

5813 5814
early_initcall(tracer_alloc_buffers);
fs_initcall(tracer_init_debugfs);
5815
late_initcall(clear_boot_tracer);