trace.h 51.1 KB
Newer Older
1

2 3 4 5
#ifndef _LINUX_KERNEL_TRACE_H
#define _LINUX_KERNEL_TRACE_H

#include <linux/fs.h>
A
Arun Sharma 已提交
6
#include <linux/atomic.h>
7 8
#include <linux/sched.h>
#include <linux/clocksource.h>
9
#include <linux/ring_buffer.h>
P
Pekka Paalanen 已提交
10
#include <linux/mmiotrace.h>
11
#include <linux/tracepoint.h>
12
#include <linux/ftrace.h>
13
#include <linux/hw_breakpoint.h>
14
#include <linux/trace_seq.h>
15
#include <linux/trace_events.h>
16
#include <linux/compiler.h>
17
#include <linux/trace_seq.h>
18

19 20 21 22 23
#ifdef CONFIG_FTRACE_SYSCALLS
#include <asm/unistd.h>		/* For NR_SYSCALLS	     */
#include <asm/syscall.h>	/* some archs define it here */
#endif

24 25 26 27 28 29 30
enum trace_type {
	__TRACE_FIRST_TYPE = 0,

	TRACE_FN,
	TRACE_CTX,
	TRACE_WAKE,
	TRACE_STACK,
31
	TRACE_PRINT,
32
	TRACE_BPRINT,
P
Pekka Paalanen 已提交
33 34
	TRACE_MMIO_RW,
	TRACE_MMIO_MAP,
35
	TRACE_BRANCH,
36 37
	TRACE_GRAPH_RET,
	TRACE_GRAPH_ENT,
38
	TRACE_USER_STACK,
39
	TRACE_BLK,
40
	TRACE_BPUTS,
41

42
	__TRACE_LAST_TYPE,
43 44
};

45

46 47
#undef __field
#define __field(type, item)		type	item;
I
Ingo Molnar 已提交
48

49 50
#undef __field_struct
#define __field_struct(type, item)	__field(type, item)
I
Ingo Molnar 已提交
51

52 53
#undef __field_desc
#define __field_desc(type, container, item)
54

55 56
#undef __array
#define __array(type, item, size)	type	item[size];
57

58 59
#undef __array_desc
#define __array_desc(type, container, item, size)
60

61 62
#undef __dynamic_array
#define __dynamic_array(type, item)	type	item[];
63

64 65
#undef F_STRUCT
#define F_STRUCT(args...)		args
66

67
#undef FTRACE_ENTRY
68 69 70 71
#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter)	\
	struct struct_name {						\
		struct trace_entry	ent;				\
		tstruct							\
72
	}
73

74
#undef FTRACE_ENTRY_DUP
75
#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
76

77
#undef FTRACE_ENTRY_REG
78 79 80 81
#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print,	\
			 filter, regfn) \
	FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
		     filter)
82

83
#include "trace_entries.h"
84

85 86 87 88
/*
 * syscalls are special, and need special handling, this is why
 * they are not included in trace_entries.h
 */
89 90 91 92 93 94 95 96 97
struct syscall_trace_enter {
	struct trace_entry	ent;
	int			nr;
	unsigned long		args[];
};

struct syscall_trace_exit {
	struct trace_entry	ent;
	int			nr;
98
	long			ret;
99 100
};

101
struct kprobe_trace_entry_head {
102 103 104 105
	struct trace_entry	ent;
	unsigned long		ip;
};

106
struct kretprobe_trace_entry_head {
107 108 109 110 111
	struct trace_entry	ent;
	unsigned long		func;
	unsigned long		ret_ip;
};

112 113 114
/*
 * trace_flag_type is an enumeration that holds different
 * states when a trace occurs. These are:
115
 *  IRQS_OFF		- interrupts were disabled
I
Ingo Molnar 已提交
116
 *  IRQS_NOSUPPORT	- arch does not support irqs_disabled_flags
L
Li Zefan 已提交
117
 *  NEED_RESCHED	- reschedule is requested
118 119
 *  HARDIRQ		- inside an interrupt handler
 *  SOFTIRQ		- inside a softirq handler
120 121 122
 */
enum trace_flag_type {
	TRACE_FLAG_IRQS_OFF		= 0x01,
123 124 125 126
	TRACE_FLAG_IRQS_NOSUPPORT	= 0x02,
	TRACE_FLAG_NEED_RESCHED		= 0x04,
	TRACE_FLAG_HARDIRQ		= 0x08,
	TRACE_FLAG_SOFTIRQ		= 0x10,
127
	TRACE_FLAG_PREEMPT_RESCHED	= 0x20,
128
	TRACE_FLAG_NMI			= 0x40,
129 130
};

131
#define TRACE_BUF_SIZE		1024
132

133 134
struct trace_array;

135 136 137 138 139 140 141
/*
 * The CPU trace array - it consists of thousands of trace entries
 * plus some other descriptor data: (for example which task started
 * the trace, etc.)
 */
struct trace_array_cpu {
	atomic_t		disabled;
142
	void			*buffer_page;	/* ring buffer spare */
I
Ingo Molnar 已提交
143

144
	unsigned long		entries;
145 146 147 148 149 150 151
	unsigned long		saved_latency;
	unsigned long		critical_start;
	unsigned long		critical_end;
	unsigned long		critical_sequence;
	unsigned long		nice;
	unsigned long		policy;
	unsigned long		rt_priority;
152
	unsigned long		skipped_entries;
153 154
	cycle_t			preempt_timestamp;
	pid_t			pid;
155
	kuid_t			uid;
156
	char			comm[TASK_COMM_LEN];
157 158

	bool			ignore_pid;
159 160
};

161
struct tracer;
162
struct trace_option_dentry;
163

164 165 166 167 168 169 170 171
struct trace_buffer {
	struct trace_array		*tr;
	struct ring_buffer		*buffer;
	struct trace_array_cpu __percpu	*data;
	cycle_t				time_start;
	int				cpu;
};

172 173
#define TRACE_FLAGS_MAX_SIZE		32

174 175 176 177 178
struct trace_options {
	struct tracer			*tracer;
	struct trace_option_dentry	*topts;
};

179
struct trace_pid_list {
180 181
	int				pid_max;
	unsigned long			*pids;
182 183
};

184 185 186 187 188 189
/*
 * The trace array - an array of per-CPU trace arrays. This is the
 * highest level data structure that individual tracers deal with.
 * They have on/off state as well:
 */
struct trace_array {
190
	struct list_head	list;
191
	char			*name;
192 193 194 195 196 197 198 199 200 201 202 203 204 205
	struct trace_buffer	trace_buffer;
#ifdef CONFIG_TRACER_MAX_TRACE
	/*
	 * The max_buffer is used to snapshot the trace when a maximum
	 * latency is reached, or when the user initiates a snapshot.
	 * Some tracers will use this to store a maximum trace while
	 * it continues examining live traces.
	 *
	 * The buffers for the max_buffer are set up the same as the trace_buffer
	 * When a snapshot is taken, the buffer of the max_buffer is swapped
	 * with the buffer of the trace_buffer and the buffers are reset for
	 * the trace_buffer so the tracing can continue.
	 */
	struct trace_buffer	max_buffer;
206
	bool			allocated_snapshot;
207
	unsigned long		max_latency;
208
#endif
209
	struct trace_pid_list	__rcu *filtered_pids;
210 211 212 213 214 215 216 217 218 219 220 221 222 223
	/*
	 * max_lock is used to protect the swapping of buffers
	 * when taking a max snapshot. The buffers themselves are
	 * protected by per_cpu spinlocks. But the action of the swap
	 * needs its own lock.
	 *
	 * This is defined as a arch_spinlock_t in order to help
	 * with performance when lockdep debugging is enabled.
	 *
	 * It is also used in other places outside the update_max_tr
	 * so it needs to be defined outside of the
	 * CONFIG_TRACER_MAX_TRACE.
	 */
	arch_spinlock_t		max_lock;
224
	int			buffer_disabled;
225 226 227
#ifdef CONFIG_FTRACE_SYSCALLS
	int			sys_refcount_enter;
	int			sys_refcount_exit;
228 229
	struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
	struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
230
#endif
231 232
	int			stop_count;
	int			clock_id;
233
	int			nr_topts;
234
	struct tracer		*current_trace;
235
	unsigned int		trace_flags;
236
	unsigned char		trace_flags_index[TRACE_FLAGS_MAX_SIZE];
237
	unsigned int		flags;
238
	raw_spinlock_t		start_lock;
239
	struct dentry		*dir;
240 241
	struct dentry		*options;
	struct dentry		*percpu_dir;
242
	struct dentry		*event_dir;
243
	struct trace_options	*topts;
244 245
	struct list_head	systems;
	struct list_head	events;
246
	cpumask_var_t		tracing_cpumask; /* only trace on set CPUs */
247
	int			ref;
248 249 250 251 252
#ifdef CONFIG_FUNCTION_TRACER
	struct ftrace_ops	*ops;
	/* function tracing enabled */
	int			function_enabled;
#endif
253 254
};

255 256 257 258 259 260
enum {
	TRACE_ARRAY_FL_GLOBAL	= (1 << 0)
};

extern struct list_head ftrace_trace_arrays;

261 262
extern struct mutex trace_types_lock;

263 264 265
extern int trace_array_get(struct trace_array *tr);
extern void trace_array_put(struct trace_array *tr);

266 267 268 269 270 271 272 273
/*
 * The global tracer (top) should be the first trace array added,
 * but we check the flag anyway.
 */
static inline struct trace_array *top_trace_array(void)
{
	struct trace_array *tr;

274
	if (list_empty(&ftrace_trace_arrays))
275 276
		return NULL;

277 278 279 280 281 282
	tr = list_entry(ftrace_trace_arrays.prev,
			typeof(*tr), list);
	WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
	return tr;
}

S
Steven Rostedt 已提交
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
#define FTRACE_CMP_TYPE(var, type) \
	__builtin_types_compatible_p(typeof(var), type *)

#undef IF_ASSIGN
#define IF_ASSIGN(var, entry, etype, id)		\
	if (FTRACE_CMP_TYPE(var, etype)) {		\
		var = (typeof(var))(entry);		\
		WARN_ON(id && (entry)->type != id);	\
		break;					\
	}

/* Will cause compile errors if type is not found. */
extern void __ftrace_bad_type(void);

/*
 * The trace_assign_type is a verifier that the entry type is
 * the same as the type being assigned. To add new types simply
 * add a line with the following format:
 *
 * IF_ASSIGN(var, ent, type, id);
 *
 *  Where "type" is the trace type that includes the trace_entry
 *  as the "ent" item. And "id" is the trace identifier that is
 *  used in the trace_type enum.
 *
 *  If the type can have more than one id, then use zero.
 */
#define trace_assign_type(var, ent)					\
	do {								\
		IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN);	\
		IF_ASSIGN(var, ent, struct ctx_switch_entry, 0);	\
		IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK);	\
315
		IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
S
Steven Rostedt 已提交
316
		IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT);	\
317
		IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT);	\
318
		IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS);	\
S
Steven Rostedt 已提交
319 320 321 322
		IF_ASSIGN(var, ent, struct trace_mmiotrace_rw,		\
			  TRACE_MMIO_RW);				\
		IF_ASSIGN(var, ent, struct trace_mmiotrace_map,		\
			  TRACE_MMIO_MAP);				\
323
		IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
324 325 326 327
		IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry,	\
			  TRACE_GRAPH_ENT);		\
		IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry,	\
			  TRACE_GRAPH_RET);		\
S
Steven Rostedt 已提交
328 329
		__ftrace_bad_type();					\
	} while (0)
330

331 332 333 334 335 336
/*
 * An option specific to a tracer. This is a boolean value.
 * The bit is the bit index that sets its value on the
 * flags value in struct tracer_flags.
 */
struct tracer_opt {
I
Ingo Molnar 已提交
337 338
	const char	*name; /* Will appear on the trace_options file */
	u32		bit; /* Mask assigned in val field in tracer_flags */
339 340 341 342 343 344 345 346
};

/*
 * The set of specific options for a tracer. Your tracer
 * have to set the initial value of the flags val.
 */
struct tracer_flags {
	u32			val;
I
Ingo Molnar 已提交
347
	struct tracer_opt	*opts;
348
	struct tracer		*trace;
349 350 351 352 353
};

/* Makes more easy to define a tracer opt */
#define TRACER_OPT(s, b)	.name = #s, .bit = b

354

355 356 357 358 359 360 361
struct trace_option_dentry {
	struct tracer_opt		*opt;
	struct tracer_flags		*flags;
	struct trace_array		*tr;
	struct dentry			*entry;
};

362
/**
363
 * struct tracer - a specific tracer and its callbacks to interact with tracefs
364 365 366
 * @name: the name chosen to select it on the available_tracers file
 * @init: called when one switches to this tracer (echo name > current_tracer)
 * @reset: called when one switches to another tracer
367 368
 * @start: called when tracing is unpaused (echo 1 > tracing_on)
 * @stop: called when tracing is paused (echo 0 > tracing_on)
369
 * @update_thresh: called when tracing_thresh is updated
370 371 372
 * @open: called when the trace file is opened
 * @pipe_open: called when the trace_pipe file is opened
 * @close: called when the trace file is released
S
Steven Rostedt 已提交
373
 * @pipe_close: called when the trace_pipe file is released
374 375 376 377 378 379 380
 * @read: override the default read callback on trace_pipe
 * @splice_read: override the default splice_read callback on trace_pipe
 * @selftest: selftest to run on boot (see trace_selftest.c)
 * @print_headers: override the first lines that describe your columns
 * @print_line: callback that prints a trace
 * @set_flag: signals one of your private flags changed (trace_options file)
 * @flags: your private flags
381 382 383
 */
struct tracer {
	const char		*name;
384
	int			(*init)(struct trace_array *tr);
385
	void			(*reset)(struct trace_array *tr);
386 387
	void			(*start)(struct trace_array *tr);
	void			(*stop)(struct trace_array *tr);
388
	int			(*update_thresh)(struct trace_array *tr);
389
	void			(*open)(struct trace_iterator *iter);
390
	void			(*pipe_open)(struct trace_iterator *iter);
391
	void			(*close)(struct trace_iterator *iter);
S
Steven Rostedt 已提交
392
	void			(*pipe_close)(struct trace_iterator *iter);
393 394 395
	ssize_t			(*read)(struct trace_iterator *iter,
					struct file *filp, char __user *ubuf,
					size_t cnt, loff_t *ppos);
396 397 398 399 400 401
	ssize_t			(*splice_read)(struct trace_iterator *iter,
					       struct file *filp,
					       loff_t *ppos,
					       struct pipe_inode_info *pipe,
					       size_t len,
					       unsigned int flags);
S
Steven Rostedt 已提交
402 403 404 405
#ifdef CONFIG_FTRACE_STARTUP_TEST
	int			(*selftest)(struct tracer *trace,
					    struct trace_array *tr);
#endif
406
	void			(*print_header)(struct seq_file *m);
407
	enum print_line_t	(*print_line)(struct trace_iterator *iter);
408
	/* If you handled the flag setting, return 0 */
409 410
	int			(*set_flag)(struct trace_array *tr,
					    u32 old_flags, u32 bit, int set);
411
	/* Return 0 if OK with change, else return non-zero */
412
	int			(*flag_changed)(struct trace_array *tr,
413
						u32 mask, int set);
414
	struct tracer		*next;
I
Ingo Molnar 已提交
415
	struct tracer_flags	*flags;
416
	int			enabled;
417
	int			ref;
418
	bool			print_max;
419
	bool			allow_instances;
420
#ifdef CONFIG_TRACER_MAX_TRACE
421
	bool			use_max_tr;
422
#endif
423 424
};

425

426 427
/* Only current can touch trace_recursion */

428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452
/*
 * For function tracing recursion:
 *  The order of these bits are important.
 *
 *  When function tracing occurs, the following steps are made:
 *   If arch does not support a ftrace feature:
 *    call internal function (uses INTERNAL bits) which calls...
 *   If callback is registered to the "global" list, the list
 *    function is called and recursion checks the GLOBAL bits.
 *    then this function calls...
 *   The function callback, which can use the FTRACE bits to
 *    check for recursion.
 *
 * Now if the arch does not suppport a feature, and it calls
 * the global list function which calls the ftrace callback
 * all three of these steps will do a recursion protection.
 * There's no reason to do one if the previous caller already
 * did. The recursion that we are protecting against will
 * go through the same steps again.
 *
 * To prevent the multiple recursion checks, if a recursion
 * bit is set that is higher than the MAX bit of the current
 * check, then we know that the check was made by the previous
 * caller, and we can skip the current check.
 */
453
enum {
454 455 456 457 458 459 460
	TRACE_BUFFER_BIT,
	TRACE_BUFFER_NMI_BIT,
	TRACE_BUFFER_IRQ_BIT,
	TRACE_BUFFER_SIRQ_BIT,

	/* Start of function recursion bits */
	TRACE_FTRACE_BIT,
461 462 463
	TRACE_FTRACE_NMI_BIT,
	TRACE_FTRACE_IRQ_BIT,
	TRACE_FTRACE_SIRQ_BIT,
464

465
	/* INTERNAL_BITs must be greater than FTRACE_BITs */
466 467 468 469 470
	TRACE_INTERNAL_BIT,
	TRACE_INTERNAL_NMI_BIT,
	TRACE_INTERNAL_IRQ_BIT,
	TRACE_INTERNAL_SIRQ_BIT,

471
	TRACE_BRANCH_BIT,
472 473 474 475 476 477 478
/*
 * Abuse of the trace_recursion.
 * As we need a way to maintain state if we are tracing the function
 * graph in irq because we want to trace a particular function that
 * was called in irq context but we have irq tracing off. Since this
 * can only be modified by current, we can reuse trace_recursion.
 */
479 480
	TRACE_IRQ_BIT,
};
481

482 483 484
#define trace_recursion_set(bit)	do { (current)->trace_recursion |= (1<<(bit)); } while (0)
#define trace_recursion_clear(bit)	do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
#define trace_recursion_test(bit)	((current)->trace_recursion & (1<<(bit)))
485

486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
#define TRACE_CONTEXT_BITS	4

#define TRACE_FTRACE_START	TRACE_FTRACE_BIT
#define TRACE_FTRACE_MAX	((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)

#define TRACE_LIST_START	TRACE_INTERNAL_BIT
#define TRACE_LIST_MAX		((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)

#define TRACE_CONTEXT_MASK	TRACE_LIST_MAX

static __always_inline int trace_get_context_bit(void)
{
	int bit;

	if (in_interrupt()) {
		if (in_nmi())
			bit = 0;

		else if (in_irq())
			bit = 1;
		else
			bit = 2;
	} else
		bit = 3;

	return bit;
}

static __always_inline int trace_test_and_set_recursion(int start, int max)
{
	unsigned int val = current->trace_recursion;
	int bit;

	/* A previous recursion check was made */
	if ((val & TRACE_CONTEXT_MASK) > max)
		return 0;

	bit = trace_get_context_bit() + start;
	if (unlikely(val & (1 << bit)))
		return -1;

	val |= 1 << bit;
	current->trace_recursion = val;
	barrier();

	return bit;
}

static __always_inline void trace_clear_recursion(int bit)
{
	unsigned int val = current->trace_recursion;

	if (!bit)
		return;

	bit = 1 << bit;
	val &= ~bit;

	barrier();
	current->trace_recursion = val;
}

548 549 550 551 552 553 554 555
static inline struct ring_buffer_iter *
trace_buffer_iter(struct trace_iterator *iter, int cpu)
{
	if (iter->buffer_iter && iter->buffer_iter[cpu])
		return iter->buffer_iter[cpu];
	return NULL;
}

556
int tracer_init(struct tracer *t, struct trace_array *tr);
557
int tracing_is_enabled(void);
558 559
void tracing_reset(struct trace_buffer *buf, int cpu);
void tracing_reset_online_cpus(struct trace_buffer *buf);
560
void tracing_reset_current(int cpu);
561
void tracing_reset_all_online_cpus(void);
562
int tracing_open_generic(struct inode *inode, struct file *filp);
563
bool tracing_is_disabled(void);
564
struct dentry *trace_create_file(const char *name,
A
Al Viro 已提交
565
				 umode_t mode,
566 567 568 569
				 struct dentry *parent,
				 void *data,
				 const struct file_operations *fops);

570
struct dentry *tracing_init_dentry(void);
I
Ingo Molnar 已提交
571

572 573
struct ring_buffer_event;

574 575 576 577 578 579
struct ring_buffer_event *
trace_buffer_lock_reserve(struct ring_buffer *buffer,
			  int type,
			  unsigned long len,
			  unsigned long flags,
			  int pc);
580

581 582
struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
						struct trace_array_cpu *data);
583 584 585 586

struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
					  int *ent_cpu, u64 *ent_ts);

587 588 589
void __buffer_unlock_commit(struct ring_buffer *buffer,
			    struct ring_buffer_event *event);

590 591 592 593 594 595 596 597
int trace_empty(struct trace_iterator *iter);

void *trace_find_next_entry_inc(struct trace_iterator *iter);

void trace_init_global_iter(struct trace_iterator *iter);

void tracing_iter_reset(struct trace_iterator *iter, int cpu);

598 599 600
void trace_function(struct trace_array *tr,
		    unsigned long ip,
		    unsigned long parent_ip,
601
		    unsigned long flags, int pc);
602 603 604 605
void trace_graph_function(struct trace_array *tr,
		    unsigned long ip,
		    unsigned long parent_ip,
		    unsigned long flags, int pc);
606
void trace_latency_header(struct seq_file *m);
607 608 609
void trace_default_header(struct seq_file *m);
void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
int trace_empty(struct trace_iterator *iter);
610

611
void trace_graph_return(struct ftrace_graph_ret *trace);
612
int trace_graph_entry(struct ftrace_graph_ent *trace);
613
void set_graph_array(struct trace_array *tr);
614

615 616
void tracing_start_cmdline_record(void);
void tracing_stop_cmdline_record(void);
617
int register_tracer(struct tracer *type);
618
int is_tracing_stopped(void);
619

620 621
loff_t tracing_lseek(struct file *file, loff_t offset, int whence);

622 623 624 625
extern cpumask_var_t __read_mostly tracing_buffer_mask;

#define for_each_tracing_cpu(cpu)	\
	for_each_cpu(cpu, tracing_buffer_mask)
626 627 628

extern unsigned long nsecs_to_usecs(unsigned long nsecs);

629 630
extern unsigned long tracing_thresh;

631
/* PID filtering */
632 633 634

extern int pid_max;

635 636 637 638 639 640 641
bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
			     pid_t search_pid);
bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
			    struct task_struct *task);
void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
				  struct task_struct *self,
				  struct task_struct *task);
642 643 644
void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
int trace_pid_show(struct seq_file *m, void *v);
645 646 647 648
void trace_free_pid_list(struct trace_pid_list *pid_list);
int trace_pid_write(struct trace_pid_list *filtered_pids,
		    struct trace_pid_list **new_pid_list,
		    const char __user *ubuf, size_t cnt);
649

650
#ifdef CONFIG_TRACER_MAX_TRACE
651 652 653
void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
void update_max_tr_single(struct trace_array *tr,
			  struct task_struct *tsk, int cpu);
654
#endif /* CONFIG_TRACER_MAX_TRACE */
655

656
#ifdef CONFIG_STACKTRACE
657
void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
658 659 660 661 662
			    int pc);

void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
		   int pc);
#else
663
static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
664 665 666 667 668 669 670 671 672
					  unsigned long flags, int pc)
{
}

static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
				 int skip, int pc)
{
}
#endif /* CONFIG_STACKTRACE */
673

I
Ingo Molnar 已提交
674
extern cycle_t ftrace_now(int cpu);
675

676
extern void trace_find_cmdline(int pid, char comm[]);
677
extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
678

679 680
#ifdef CONFIG_DYNAMIC_FTRACE
extern unsigned long ftrace_update_tot_cnt;
681
#endif
682 683
#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
extern int DYN_FTRACE_TEST_NAME(void);
684 685
#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
extern int DYN_FTRACE_TEST_NAME2(void);
686

687
extern bool ring_buffer_expanded;
688 689
extern bool tracing_selftest_disabled;

S
Steven Rostedt 已提交
690 691 692
#ifdef CONFIG_FTRACE_STARTUP_TEST
extern int trace_selftest_startup_function(struct tracer *trace,
					   struct trace_array *tr);
693 694
extern int trace_selftest_startup_function_graph(struct tracer *trace,
						 struct trace_array *tr);
S
Steven Rostedt 已提交
695 696 697 698 699 700 701 702
extern int trace_selftest_startup_irqsoff(struct tracer *trace,
					  struct trace_array *tr);
extern int trace_selftest_startup_preemptoff(struct tracer *trace,
					     struct trace_array *tr);
extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
						 struct trace_array *tr);
extern int trace_selftest_startup_wakeup(struct tracer *trace,
					 struct trace_array *tr);
S
Steven Noonan 已提交
703 704
extern int trace_selftest_startup_nop(struct tracer *trace,
					 struct trace_array *tr);
S
Steven Rostedt 已提交
705 706
extern int trace_selftest_startup_sched_switch(struct tracer *trace,
					       struct trace_array *tr);
S
Steven Rostedt 已提交
707 708
extern int trace_selftest_startup_branch(struct tracer *trace,
					 struct trace_array *tr);
709 710 711 712 713 714 715 716 717
/*
 * Tracer data references selftest functions that only occur
 * on boot up. These can be __init functions. Thus, when selftests
 * are enabled, then the tracers need to reference __init functions.
 */
#define __tracer_data		__refdata
#else
/* Tracers are seldom changed. Optimize when selftests are disabled. */
#define __tracer_data		__read_mostly
S
Steven Rostedt 已提交
718 719
#endif /* CONFIG_FTRACE_STARTUP_TEST */

I
Ingo Molnar 已提交
720
extern void *head_page(struct trace_array_cpu *data);
721
extern unsigned long long ns2usecs(cycle_t nsec);
722
extern int
723
trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
724
extern int
725
trace_vprintk(unsigned long ip, const char *fmt, va_list args);
726 727 728 729 730
extern int
trace_array_vprintk(struct trace_array *tr,
		    unsigned long ip, const char *fmt, va_list args);
int trace_array_printk(struct trace_array *tr,
		       unsigned long ip, const char *fmt, ...);
731 732
int trace_array_printk_buf(struct ring_buffer *buffer,
			   unsigned long ip, const char *fmt, ...);
733 734
void trace_printk_seq(struct trace_seq *s);
enum print_line_t print_trace_line(struct trace_iterator *iter);
I
Ingo Molnar 已提交
735

736 737
extern char trace_find_mark(unsigned long long duration);

738
/* Standard output formatting function used for function return traces */
739
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
740 741 742 743 744 745 746 747

/* Flag options */
#define TRACE_GRAPH_PRINT_OVERRUN       0x1
#define TRACE_GRAPH_PRINT_CPU           0x2
#define TRACE_GRAPH_PRINT_OVERHEAD      0x4
#define TRACE_GRAPH_PRINT_PROC          0x8
#define TRACE_GRAPH_PRINT_DURATION      0x10
#define TRACE_GRAPH_PRINT_ABS_TIME      0x20
748
#define TRACE_GRAPH_PRINT_IRQS          0x40
749
#define TRACE_GRAPH_PRINT_TAIL          0x80
750 751
#define TRACE_GRAPH_SLEEP_TIME		0x100
#define TRACE_GRAPH_GRAPH_TIME		0x200
752 753
#define TRACE_GRAPH_PRINT_FILL_SHIFT	28
#define TRACE_GRAPH_PRINT_FILL_MASK	(0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
754

755 756 757
extern void ftrace_graph_sleep_time_control(bool enable);
extern void ftrace_graph_graph_time_control(bool enable);

758 759 760
extern enum print_line_t
print_graph_function_flags(struct trace_iterator *iter, u32 flags);
extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
761
extern void
762
trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
763 764 765 766 767 768 769 770 771
extern void graph_trace_open(struct trace_iterator *iter);
extern void graph_trace_close(struct trace_iterator *iter);
extern int __trace_graph_entry(struct trace_array *tr,
			       struct ftrace_graph_ent *trace,
			       unsigned long flags, int pc);
extern void __trace_graph_return(struct trace_array *tr,
				 struct ftrace_graph_ret *trace,
				 unsigned long flags, int pc);

772 773 774 775 776 777

#ifdef CONFIG_DYNAMIC_FTRACE
/* TODO: make this variable */
#define FTRACE_GRAPH_MAX_FUNCS		32
extern int ftrace_graph_count;
extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
778 779
extern int ftrace_graph_notrace_count;
extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS];
780 781 782 783 784

static inline int ftrace_graph_addr(unsigned long addr)
{
	int i;

785
	if (!ftrace_graph_count)
786 787 788
		return 1;

	for (i = 0; i < ftrace_graph_count; i++) {
789 790 791 792 793 794 795 796 797 798
		if (addr == ftrace_graph_funcs[i]) {
			/*
			 * If no irqs are to be traced, but a set_graph_function
			 * is set, and called by an interrupt handler, we still
			 * want to trace it.
			 */
			if (in_irq())
				trace_recursion_set(TRACE_IRQ_BIT);
			else
				trace_recursion_clear(TRACE_IRQ_BIT);
799
			return 1;
800
		}
801 802 803 804
	}

	return 0;
}
805 806 807 808 809 810 811 812 813 814 815 816 817 818 819

static inline int ftrace_graph_notrace_addr(unsigned long addr)
{
	int i;

	if (!ftrace_graph_notrace_count)
		return 0;

	for (i = 0; i < ftrace_graph_notrace_count; i++) {
		if (addr == ftrace_graph_notrace_funcs[i])
			return 1;
	}

	return 0;
}
820
#else
821 822 823
static inline int ftrace_graph_addr(unsigned long addr)
{
	return 1;
824
}
825 826 827 828 829

static inline int ftrace_graph_notrace_addr(unsigned long addr)
{
	return 0;
}
830 831
#endif /* CONFIG_DYNAMIC_FTRACE */
#else /* CONFIG_FUNCTION_GRAPH_TRACER */
832
static inline enum print_line_t
833
print_graph_function_flags(struct trace_iterator *iter, u32 flags)
834 835 836
{
	return TRACE_TYPE_UNHANDLED;
}
837
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
838

839
extern struct list_head ftrace_pids;
840

841
#ifdef CONFIG_FUNCTION_TRACER
842
extern bool ftrace_filter_param __initdata;
843 844
static inline int ftrace_trace_task(struct task_struct *task)
{
845
	if (list_empty(&ftrace_pids))
846 847 848 849
		return 1;

	return test_tsk_trace_trace(task);
}
850
extern int ftrace_is_dead(void);
851 852 853
int ftrace_create_function_files(struct trace_array *tr,
				 struct dentry *parent);
void ftrace_destroy_function_files(struct trace_array *tr);
854 855 856
void ftrace_init_global_array_ops(struct trace_array *tr);
void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
void ftrace_reset_array_ops(struct trace_array *tr);
857
int using_ftrace_ops_list_func(void);
858 859 860 861 862
#else
static inline int ftrace_trace_task(struct task_struct *task)
{
	return 1;
}
863
static inline int ftrace_is_dead(void) { return 0; }
864 865 866 867 868 869 870
static inline int
ftrace_create_function_files(struct trace_array *tr,
			     struct dentry *parent)
{
	return 0;
}
static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
871 872 873 874 875
static inline __init void
ftrace_init_global_array_ops(struct trace_array *tr) { }
static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
/* ftace_func_t type is not defined, use macro instead of static inline */
#define ftrace_init_array_ops(tr, func) do { } while (0)
876 877 878 879 880 881 882 883 884 885 886 887 888 889
#endif /* CONFIG_FUNCTION_TRACER */

#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
void ftrace_create_filter_files(struct ftrace_ops *ops,
				struct dentry *parent);
void ftrace_destroy_filter_files(struct ftrace_ops *ops);
#else
/*
 * The ops parameter passed in is usually undefined.
 * This must be a macro.
 */
#define ftrace_create_filter_files(ops, parent) do { } while (0)
#define ftrace_destroy_filter_files(ops) do { } while (0)
#endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
890

891
bool ftrace_event_is_function(struct trace_event_call *call);
892

893 894 895 896
/*
 * struct trace_parser - servers for reading the user input separated by spaces
 * @cont: set if the input is not complete - no final space char was found
 * @buffer: holds the parsed user input
897
 * @idx: user input length
898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927
 * @size: buffer size
 */
struct trace_parser {
	bool		cont;
	char		*buffer;
	unsigned	idx;
	unsigned	size;
};

static inline bool trace_parser_loaded(struct trace_parser *parser)
{
	return (parser->idx != 0);
}

static inline bool trace_parser_cont(struct trace_parser *parser)
{
	return parser->cont;
}

static inline void trace_parser_clear(struct trace_parser *parser)
{
	parser->cont = false;
	parser->idx = 0;
}

extern int trace_parser_get_init(struct trace_parser *parser, int size);
extern void trace_parser_put(struct trace_parser *parser);
extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
	size_t cnt, loff_t *ppos);

928 929 930 931 932 933 934 935 936 937
/*
 * Only create function graph options if function graph is configured.
 */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
# define FGRAPH_FLAGS						\
		C(DISPLAY_GRAPH,	"display-graph"),
#else
# define FGRAPH_FLAGS
#endif

938 939 940 941 942 943 944
#ifdef CONFIG_BRANCH_TRACER
# define BRANCH_FLAGS					\
		C(BRANCH,		"branch"),
#else
# define BRANCH_FLAGS
#endif

945 946 947 948 949 950 951 952 953
#ifdef CONFIG_FUNCTION_TRACER
# define FUNCTION_FLAGS						\
		C(FUNCTION,		"function-trace"),
# define FUNCTION_DEFAULT_FLAGS		TRACE_ITER_FUNCTION
#else
# define FUNCTION_FLAGS
# define FUNCTION_DEFAULT_FLAGS		0UL
#endif

954 955 956 957 958 959 960
#ifdef CONFIG_STACKTRACE
# define STACK_FLAGS				\
		C(STACKTRACE,		"stacktrace"),
#else
# define STACK_FLAGS
#endif

S
Steven Rostedt 已提交
961 962 963 964 965
/*
 * trace_iterator_flags is an enumeration that defines bit
 * positions into trace_flags that controls the output.
 *
 * NOTE: These bits must match the trace_options array in
966
 *       trace.c (this macro guarantees it).
S
Steven Rostedt 已提交
967
 */
968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988
#define TRACE_FLAGS						\
		C(PRINT_PARENT,		"print-parent"),	\
		C(SYM_OFFSET,		"sym-offset"),		\
		C(SYM_ADDR,		"sym-addr"),		\
		C(VERBOSE,		"verbose"),		\
		C(RAW,			"raw"),			\
		C(HEX,			"hex"),			\
		C(BIN,			"bin"),			\
		C(BLOCK,		"block"),		\
		C(PRINTK,		"trace_printk"),	\
		C(ANNOTATE,		"annotate"),		\
		C(USERSTACKTRACE,	"userstacktrace"),	\
		C(SYM_USEROBJ,		"sym-userobj"),		\
		C(PRINTK_MSGONLY,	"printk-msg-only"),	\
		C(CONTEXT_INFO,		"context-info"),   /* Print pid/cpu/time */ \
		C(LATENCY_FMT,		"latency-format"),	\
		C(RECORD_CMD,		"record-cmd"),		\
		C(OVERWRITE,		"overwrite"),		\
		C(STOP_ON_FREE,		"disable_on_free"),	\
		C(IRQ_INFO,		"irq-info"),		\
		C(MARKERS,		"markers"),		\
989
		C(EVENT_FORK,		"event-fork"),		\
990
		FUNCTION_FLAGS					\
991
		FGRAPH_FLAGS					\
992
		STACK_FLAGS					\
993
		BRANCH_FLAGS
994

995 996 997 998 999 1000 1001
/*
 * By defining C, we can make TRACE_FLAGS a list of bit names
 * that will define the bits for the flag masks.
 */
#undef C
#define C(a, b) TRACE_ITER_##a##_BIT

1002 1003 1004 1005 1006
enum trace_iterator_bits {
	TRACE_FLAGS
	/* Make sure we don't go more than we have bits for */
	TRACE_ITER_LAST_BIT
};
1007 1008 1009 1010 1011 1012 1013 1014 1015

/*
 * By redefining C, we can make TRACE_FLAGS a list of masks that
 * use the bits as defined above.
 */
#undef C
#define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)

enum trace_iterator_flags { TRACE_FLAGS };
1016

1017 1018 1019 1020 1021 1022 1023
/*
 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
 * control the output of kernel symbols.
 */
#define TRACE_ITER_SYM_MASK \
	(TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)

1024 1025
extern struct tracer nop_trace;

1026
#ifdef CONFIG_BRANCH_TRACER
1027 1028 1029
extern int enable_branch_tracing(struct trace_array *tr);
extern void disable_branch_tracing(void);
static inline int trace_branch_enable(struct trace_array *tr)
1030
{
1031
	if (tr->trace_flags & TRACE_ITER_BRANCH)
1032
		return enable_branch_tracing(tr);
1033 1034
	return 0;
}
1035
static inline void trace_branch_disable(void)
1036 1037
{
	/* due to races, always disable */
1038
	disable_branch_tracing();
1039 1040
}
#else
1041
static inline int trace_branch_enable(struct trace_array *tr)
1042 1043 1044
{
	return 0;
}
1045
static inline void trace_branch_disable(void)
1046 1047
{
}
1048
#endif /* CONFIG_BRANCH_TRACER */
1049

1050 1051 1052
/* set ring buffers to default size if not already done so */
int tracing_update_buffers(void);

1053 1054
struct ftrace_event_field {
	struct list_head	link;
1055 1056
	const char		*name;
	const char		*type;
1057
	int			filter_type;
1058 1059
	int			offset;
	int			size;
1060
	int			is_signed;
1061 1062
};

1063
struct event_filter {
1064 1065
	int			n_preds;	/* Number assigned */
	int			a_preds;	/* allocated */
1066
	struct filter_pred	*preds;
1067
	struct filter_pred	*root;
1068
	char			*filter_string;
1069 1070
};

1071 1072 1073
struct event_subsystem {
	struct list_head	list;
	const char		*name;
1074
	struct event_filter	*filter;
1075
	int			ref_count;
1076 1077
};

1078
struct trace_subsystem_dir {
1079 1080 1081 1082 1083 1084 1085 1086
	struct list_head		list;
	struct event_subsystem		*subsystem;
	struct trace_array		*tr;
	struct dentry			*entry;
	int				ref_count;
	int				nr_events;
};

1087 1088 1089
extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
				     struct ring_buffer *buffer,
				     struct ring_buffer_event *event);
1090 1091 1092 1093 1094 1095

void trace_buffer_unlock_commit_regs(struct trace_array *tr,
				     struct ring_buffer *buffer,
				     struct ring_buffer_event *event,
				     unsigned long flags, int pc,
				     struct pt_regs *regs);
1096 1097 1098 1099 1100 1101 1102 1103 1104

static inline void trace_buffer_unlock_commit(struct trace_array *tr,
					      struct ring_buffer *buffer,
					      struct ring_buffer_event *event,
					      unsigned long flags, int pc)
{
	trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL);
}

1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
DECLARE_PER_CPU(int, trace_buffered_event_cnt);
void trace_buffered_event_disable(void);
void trace_buffered_event_enable(void);

static inline void
__trace_event_discard_commit(struct ring_buffer *buffer,
			     struct ring_buffer_event *event)
{
	if (this_cpu_read(trace_buffered_event) == event) {
		/* Simply release the temp buffer */
		this_cpu_dec(trace_buffered_event_cnt);
		return;
	}
	ring_buffer_discard_commit(buffer, event);
}

1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
/*
 * Helper function for event_trigger_unlock_commit{_regs}().
 * If there are event triggers attached to this event that requires
 * filtering against its fields, then they wil be called as the
 * entry already holds the field information of the current event.
 *
 * It also checks if the event should be discarded or not.
 * It is to be discarded if the event is soft disabled and the
 * event was only recorded to process triggers, or if the event
 * filter is active and this event did not match the filters.
 *
 * Returns true if the event is discarded, false otherwise.
 */
static inline bool
__event_trigger_test_discard(struct trace_event_file *file,
			     struct ring_buffer *buffer,
			     struct ring_buffer_event *event,
			     void *entry,
			     enum event_trigger_type *tt)
{
	unsigned long eflags = file->flags;

	if (eflags & EVENT_FILE_FL_TRIGGER_COND)
		*tt = event_triggers_call(file, entry);

1147 1148 1149
	if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
	    (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
	     !filter_match_preds(file->filter, entry))) {
1150
		__trace_event_discard_commit(buffer, event);
1151 1152
		return true;
	}
1153

1154
	return false;
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217
}

/**
 * event_trigger_unlock_commit - handle triggers and finish event commit
 * @file: The file pointer assoctiated to the event
 * @buffer: The ring buffer that the event is being written to
 * @event: The event meta data in the ring buffer
 * @entry: The event itself
 * @irq_flags: The state of the interrupts at the start of the event
 * @pc: The state of the preempt count at the start of the event.
 *
 * This is a helper function to handle triggers that require data
 * from the event itself. It also tests the event against filters and
 * if the event is soft disabled and should be discarded.
 */
static inline void
event_trigger_unlock_commit(struct trace_event_file *file,
			    struct ring_buffer *buffer,
			    struct ring_buffer_event *event,
			    void *entry, unsigned long irq_flags, int pc)
{
	enum event_trigger_type tt = ETT_NONE;

	if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
		trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);

	if (tt)
		event_triggers_post_call(file, tt, entry);
}

/**
 * event_trigger_unlock_commit_regs - handle triggers and finish event commit
 * @file: The file pointer assoctiated to the event
 * @buffer: The ring buffer that the event is being written to
 * @event: The event meta data in the ring buffer
 * @entry: The event itself
 * @irq_flags: The state of the interrupts at the start of the event
 * @pc: The state of the preempt count at the start of the event.
 *
 * This is a helper function to handle triggers that require data
 * from the event itself. It also tests the event against filters and
 * if the event is soft disabled and should be discarded.
 *
 * Same as event_trigger_unlock_commit() but calls
 * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
 */
static inline void
event_trigger_unlock_commit_regs(struct trace_event_file *file,
				 struct ring_buffer *buffer,
				 struct ring_buffer_event *event,
				 void *entry, unsigned long irq_flags, int pc,
				 struct pt_regs *regs)
{
	enum event_trigger_type tt = ETT_NONE;

	if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
		trace_buffer_unlock_commit_regs(file->tr, buffer, event,
						irq_flags, pc, regs);

	if (tt)
		event_triggers_post_call(file, tt, entry);
}

1218 1219
#define FILTER_PRED_INVALID	((unsigned short)-1)
#define FILTER_PRED_IS_RIGHT	(1 << 15)
1220
#define FILTER_PRED_FOLD	(1 << 15)
1221

1222 1223 1224 1225 1226 1227 1228 1229
/*
 * The max preds is the size of unsigned short with
 * two flags at the MSBs. One bit is used for both the IS_RIGHT
 * and FOLD flags. The other is reserved.
 *
 * 2^14 preds is way more than enough.
 */
#define MAX_FILTER_PRED		16384
1230

T
Tom Zanussi 已提交
1231
struct filter_pred;
1232
struct regex;
T
Tom Zanussi 已提交
1233

1234
typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
T
Tom Zanussi 已提交
1235

1236 1237
typedef int (*regex_match_func)(char *str, struct regex *r, int len);

1238
enum regex_type {
1239
	MATCH_FULL = 0,
1240 1241 1242 1243 1244
	MATCH_FRONT_ONLY,
	MATCH_MIDDLE_ONLY,
	MATCH_END_ONLY,
};

1245 1246 1247 1248 1249 1250 1251
struct regex {
	char			pattern[MAX_FILTER_STR_VAL];
	int			len;
	int			field_len;
	regex_match_func	match;
};

T
Tom Zanussi 已提交
1252
struct filter_pred {
1253 1254 1255
	filter_pred_fn_t 	fn;
	u64 			val;
	struct regex		regex;
1256
	unsigned short		*ops;
1257
	struct ftrace_event_field *field;
1258 1259 1260
	int 			offset;
	int 			not;
	int 			op;
1261 1262 1263 1264
	unsigned short		index;
	unsigned short		parent;
	unsigned short		left;
	unsigned short		right;
T
Tom Zanussi 已提交
1265 1266
};

1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278
static inline bool is_string_field(struct ftrace_event_field *field)
{
	return field->filter_type == FILTER_DYN_STRING ||
	       field->filter_type == FILTER_STATIC_STRING ||
	       field->filter_type == FILTER_PTR_STRING;
}

static inline bool is_function_field(struct ftrace_event_field *field)
{
	return field->filter_type == FILTER_TRACE_FN;
}

1279 1280
extern enum regex_type
filter_parse_regex(char *buff, int len, char **search, int *not);
1281
extern void print_event_filter(struct trace_event_file *file,
1282
			       struct trace_seq *s);
1283
extern int apply_event_filter(struct trace_event_file *file,
1284
			      char *filter_string);
1285
extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
1286 1287
					char *filter_string);
extern void print_subsystem_event_filter(struct event_subsystem *system,
1288
					 struct trace_seq *s);
1289
extern int filter_assign_type(const char *type);
1290
extern int create_event_filter(struct trace_event_call *call,
1291 1292 1293
			       char *filter_str, bool set_str,
			       struct event_filter **filterp);
extern void free_event_filter(struct event_filter *filter);
T
Tom Zanussi 已提交
1294

1295
struct ftrace_event_field *
1296
trace_find_event_field(struct trace_event_call *call, char *name);
1297

1298
extern void trace_event_enable_cmd_record(bool enable);
1299
extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1300
extern int event_trace_del_tracer(struct trace_array *tr);
1301

1302 1303 1304
extern struct trace_event_file *find_event_file(struct trace_array *tr,
						const char *system,
						const char *event);
1305

1306 1307 1308 1309 1310
static inline void *event_file_data(struct file *filp)
{
	return ACCESS_ONCE(file_inode(filp)->i_private);
}

1311
extern struct mutex event_mutex;
1312
extern struct list_head ftrace_events;
P
Peter Zijlstra 已提交
1313

1314
extern const struct file_operations event_trigger_fops;
1315 1316 1317 1318
extern const struct file_operations event_hist_fops;

#ifdef CONFIG_HIST_TRIGGERS
extern int register_trigger_hist_cmd(void);
1319
extern int register_trigger_hist_enable_disable_cmds(void);
1320 1321
#else
static inline int register_trigger_hist_cmd(void) { return 0; }
1322
static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
1323
#endif
1324 1325 1326 1327 1328 1329 1330 1331 1332

extern int register_trigger_cmds(void);
extern void clear_event_triggers(struct trace_array *tr);

struct event_trigger_data {
	unsigned long			count;
	int				ref;
	struct event_trigger_ops	*ops;
	struct event_command		*cmd_ops;
1333
	struct event_filter __rcu	*filter;
1334 1335
	char				*filter_str;
	void				*private_data;
1336
	bool				paused;
1337
	bool				paused_tmp;
1338
	struct list_head		list;
1339 1340 1341
	char				*name;
	struct list_head		named_list;
	struct event_trigger_data	*named_data;
1342 1343
};

1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371
/* Avoid typos */
#define ENABLE_EVENT_STR	"enable_event"
#define DISABLE_EVENT_STR	"disable_event"
#define ENABLE_HIST_STR		"enable_hist"
#define DISABLE_HIST_STR	"disable_hist"

struct enable_trigger_data {
	struct trace_event_file		*file;
	bool				enable;
	bool				hist;
};

extern int event_enable_trigger_print(struct seq_file *m,
				      struct event_trigger_ops *ops,
				      struct event_trigger_data *data);
extern void event_enable_trigger_free(struct event_trigger_ops *ops,
				      struct event_trigger_data *data);
extern int event_enable_trigger_func(struct event_command *cmd_ops,
				     struct trace_event_file *file,
				     char *glob, char *cmd, char *param);
extern int event_enable_register_trigger(char *glob,
					 struct event_trigger_ops *ops,
					 struct event_trigger_data *data,
					 struct trace_event_file *file);
extern void event_enable_unregister_trigger(char *glob,
					    struct event_trigger_ops *ops,
					    struct event_trigger_data *test,
					    struct trace_event_file *file);
1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383
extern void trigger_data_free(struct event_trigger_data *data);
extern int event_trigger_init(struct event_trigger_ops *ops,
			      struct event_trigger_data *data);
extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
					      int trigger_enable);
extern void update_cond_flag(struct trace_event_file *file);
extern void unregister_trigger(char *glob, struct event_trigger_ops *ops,
			       struct event_trigger_data *test,
			       struct trace_event_file *file);
extern int set_trigger_filter(char *filter_str,
			      struct event_trigger_data *trigger_data,
			      struct trace_event_file *file);
1384 1385 1386 1387 1388 1389 1390 1391 1392
extern struct event_trigger_data *find_named_trigger(const char *name);
extern bool is_named_trigger(struct event_trigger_data *test);
extern int save_named_trigger(const char *name,
			      struct event_trigger_data *data);
extern void del_named_trigger(struct event_trigger_data *data);
extern void pause_named_trigger(struct event_trigger_data *data);
extern void unpause_named_trigger(struct event_trigger_data *data);
extern void set_named_trigger_data(struct event_trigger_data *data,
				   struct event_trigger_data *named_data);
1393
extern int register_event_command(struct event_command *cmd);
1394 1395
extern int unregister_event_command(struct event_command *cmd);
extern int register_trigger_hist_enable_disable_cmds(void);
1396

1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408
/**
 * struct event_trigger_ops - callbacks for trace event triggers
 *
 * The methods in this structure provide per-event trigger hooks for
 * various trigger operations.
 *
 * All the methods below, except for @init() and @free(), must be
 * implemented.
 *
 * @func: The trigger 'probe' function called when the triggering
 *	event occurs.  The data passed into this callback is the data
 *	that was supplied to the event_command @reg() function that
1409 1410
 *	registered the trigger (see struct event_command) along with
 *	the trace record, rec.
1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434
 *
 * @init: An optional initialization function called for the trigger
 *	when the trigger is registered (via the event_command reg()
 *	function).  This can be used to perform per-trigger
 *	initialization such as incrementing a per-trigger reference
 *	count, for instance.  This is usually implemented by the
 *	generic utility function @event_trigger_init() (see
 *	trace_event_triggers.c).
 *
 * @free: An optional de-initialization function called for the
 *	trigger when the trigger is unregistered (via the
 *	event_command @reg() function).  This can be used to perform
 *	per-trigger de-initialization such as decrementing a
 *	per-trigger reference count and freeing corresponding trigger
 *	data, for instance.  This is usually implemented by the
 *	generic utility function @event_trigger_free() (see
 *	trace_event_triggers.c).
 *
 * @print: The callback function invoked to have the trigger print
 *	itself.  This is usually implemented by a wrapper function
 *	that calls the generic utility function @event_trigger_print()
 *	(see trace_event_triggers.c).
 */
struct event_trigger_ops {
1435 1436
	void			(*func)(struct event_trigger_data *data,
					void *rec);
1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477
	int			(*init)(struct event_trigger_ops *ops,
					struct event_trigger_data *data);
	void			(*free)(struct event_trigger_ops *ops,
					struct event_trigger_data *data);
	int			(*print)(struct seq_file *m,
					 struct event_trigger_ops *ops,
					 struct event_trigger_data *data);
};

/**
 * struct event_command - callbacks and data members for event commands
 *
 * Event commands are invoked by users by writing the command name
 * into the 'trigger' file associated with a trace event.  The
 * parameters associated with a specific invocation of an event
 * command are used to create an event trigger instance, which is
 * added to the list of trigger instances associated with that trace
 * event.  When the event is hit, the set of triggers associated with
 * that event is invoked.
 *
 * The data members in this structure provide per-event command data
 * for various event commands.
 *
 * All the data members below, except for @post_trigger, must be set
 * for each event command.
 *
 * @name: The unique name that identifies the event command.  This is
 *	the name used when setting triggers via trigger files.
 *
 * @trigger_type: A unique id that identifies the event command
 *	'type'.  This value has two purposes, the first to ensure that
 *	only one trigger of the same type can be set at a given time
 *	for a particular event e.g. it doesn't make sense to have both
 *	a traceon and traceoff trigger attached to a single event at
 *	the same time, so traceon and traceoff have the same type
 *	though they have different names.  The @trigger_type value is
 *	also used as a bit value for deferring the actual trigger
 *	action until after the current event is finished.  Some
 *	commands need to do this if they themselves log to the trace
 *	buffer (see the @post_trigger() member below).  @trigger_type
 *	values are defined by adding new values to the trigger_type
1478
 *	enum in include/linux/trace_events.h.
1479
 *
1480
 * @flags: See the enum event_command_flags below.
1481
 *
1482 1483
 * All the methods below, except for @set_filter() and @unreg_all(),
 * must be implemented.
1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507
 *
 * @func: The callback function responsible for parsing and
 *	registering the trigger written to the 'trigger' file by the
 *	user.  It allocates the trigger instance and registers it with
 *	the appropriate trace event.  It makes use of the other
 *	event_command callback functions to orchestrate this, and is
 *	usually implemented by the generic utility function
 *	@event_trigger_callback() (see trace_event_triggers.c).
 *
 * @reg: Adds the trigger to the list of triggers associated with the
 *	event, and enables the event trigger itself, after
 *	initializing it (via the event_trigger_ops @init() function).
 *	This is also where commands can use the @trigger_type value to
 *	make the decision as to whether or not multiple instances of
 *	the trigger should be allowed.  This is usually implemented by
 *	the generic utility function @register_trigger() (see
 *	trace_event_triggers.c).
 *
 * @unreg: Removes the trigger from the list of triggers associated
 *	with the event, and disables the event trigger itself, after
 *	initializing it (via the event_trigger_ops @free() function).
 *	This is usually implemented by the generic utility function
 *	@unregister_trigger() (see trace_event_triggers.c).
 *
1508 1509 1510 1511
 * @unreg_all: An optional function called to remove all the triggers
 *	from the list of triggers associated with the event.  Called
 *	when a trigger file is opened in truncate mode.
 *
1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524
 * @set_filter: An optional function called to parse and set a filter
 *	for the trigger.  If no @set_filter() method is set for the
 *	event command, filters set by the user for the command will be
 *	ignored.  This is usually implemented by the generic utility
 *	function @set_trigger_filter() (see trace_event_triggers.c).
 *
 * @get_trigger_ops: The callback function invoked to retrieve the
 *	event_trigger_ops implementation associated with the command.
 */
struct event_command {
	struct list_head	list;
	char			*name;
	enum event_trigger_type	trigger_type;
1525
	int			flags;
1526
	int			(*func)(struct event_command *cmd_ops,
1527
					struct trace_event_file *file,
1528 1529 1530 1531
					char *glob, char *cmd, char *params);
	int			(*reg)(char *glob,
				       struct event_trigger_ops *ops,
				       struct event_trigger_data *data,
1532
				       struct trace_event_file *file);
1533 1534 1535
	void			(*unreg)(char *glob,
					 struct event_trigger_ops *ops,
					 struct event_trigger_data *data,
1536
					 struct trace_event_file *file);
1537
	void			(*unreg_all)(struct trace_event_file *file);
1538 1539
	int			(*set_filter)(char *filter_str,
					      struct event_trigger_data *data,
1540
					      struct trace_event_file *file);
1541 1542 1543
	struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
};

1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586
/**
 * enum event_command_flags - flags for struct event_command
 *
 * @POST_TRIGGER: A flag that says whether or not this command needs
 *	to have its action delayed until after the current event has
 *	been closed.  Some triggers need to avoid being invoked while
 *	an event is currently in the process of being logged, since
 *	the trigger may itself log data into the trace buffer.  Thus
 *	we make sure the current event is committed before invoking
 *	those triggers.  To do that, the trigger invocation is split
 *	in two - the first part checks the filter using the current
 *	trace record; if a command has the @post_trigger flag set, it
 *	sets a bit for itself in the return value, otherwise it
 *	directly invokes the trigger.  Once all commands have been
 *	either invoked or set their return flag, the current record is
 *	either committed or discarded.  At that point, if any commands
 *	have deferred their triggers, those commands are finally
 *	invoked following the close of the current event.  In other
 *	words, if the event_trigger_ops @func() probe implementation
 *	itself logs to the trace buffer, this flag should be set,
 *	otherwise it can be left unspecified.
 *
 * @NEEDS_REC: A flag that says whether or not this command needs
 *	access to the trace record in order to perform its function,
 *	regardless of whether or not it has a filter associated with
 *	it (filters make a trigger require access to the trace record
 *	but are not always present).
 */
enum event_command_flags {
	EVENT_CMD_FL_POST_TRIGGER	= 1,
	EVENT_CMD_FL_NEEDS_REC		= 2,
};

static inline bool event_command_post_trigger(struct event_command *cmd_ops)
{
	return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
}

static inline bool event_command_needs_rec(struct event_command *cmd_ops)
{
	return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
}

1587
extern int trace_event_enable_disable(struct trace_event_file *file,
1588
				      int enable, int soft_disable);
1589
extern int tracing_alloc_snapshot(void);
1590

1591 1592 1593
extern const char *__start___trace_bprintk_fmt[];
extern const char *__stop___trace_bprintk_fmt[];

1594 1595 1596
extern const char *__start___tracepoint_str[];
extern const char *__stop___tracepoint_str[];

1597
void trace_printk_control(bool enabled);
1598
void trace_printk_init_buffers(void);
1599
void trace_printk_start_comm(void);
1600
int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1601
int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1602

1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613
/*
 * Normal trace_printk() and friends allocates special buffers
 * to do the manipulation, as well as saves the print formats
 * into sections to display. But the trace infrastructure wants
 * to use these without the added overhead at the price of being
 * a bit slower (used mainly for warnings, where we don't care
 * about performance). The internal_trace_puts() is for such
 * a purpose.
 */
#define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))

1614
#undef FTRACE_ENTRY
1615
#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter)	\
1616
	extern struct trace_event_call					\
1617
	__aligned(4) event_##call;
1618
#undef FTRACE_ENTRY_DUP
1619 1620 1621
#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter)	\
	FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
		     filter)
1622
#include "trace_entries.h"
1623

1624
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1625
int perf_ftrace_event_register(struct trace_event_call *call,
1626 1627 1628
			       enum trace_reg type, void *data);
#else
#define perf_ftrace_event_register NULL
1629
#endif
1630

1631 1632
#ifdef CONFIG_FTRACE_SYSCALLS
void init_ftrace_syscalls(void);
T
Tom Zanussi 已提交
1633
const char *get_syscall_name(int syscall);
1634 1635
#else
static inline void init_ftrace_syscalls(void) { }
T
Tom Zanussi 已提交
1636 1637 1638 1639
static inline const char *get_syscall_name(int syscall)
{
	return NULL;
}
1640 1641 1642 1643
#endif

#ifdef CONFIG_EVENT_TRACING
void trace_event_init(void);
1644
void trace_event_enum_update(struct trace_enum_map **map, int len);
1645 1646
#else
static inline void __init trace_event_init(void) { }
1647
static inline void trace_event_enum_update(struct trace_enum_map **map, int len) { }
1648 1649
#endif

1650
extern struct trace_iterator *tracepoint_print_iter;
1651

1652
#endif /* _LINUX_KERNEL_TRACE_H */