trace.h 14.1 KB
Newer Older
1 2 3 4 5 6 7
#ifndef _LINUX_KERNEL_TRACE_H
#define _LINUX_KERNEL_TRACE_H

#include <linux/fs.h>
#include <asm/atomic.h>
#include <linux/sched.h>
#include <linux/clocksource.h>
8
#include <linux/ring_buffer.h>
P
Pekka Paalanen 已提交
9
#include <linux/mmiotrace.h>
10
#include <linux/ftrace.h>
11
#include <trace/boot.h>
12

13 14 15 16 17 18
enum trace_type {
	__TRACE_FIRST_TYPE = 0,

	TRACE_FN,
	TRACE_CTX,
	TRACE_WAKE,
19
	TRACE_CONT,
20
	TRACE_STACK,
21
	TRACE_PRINT,
22
	TRACE_SPECIAL,
P
Pekka Paalanen 已提交
23 24
	TRACE_MMIO_RW,
	TRACE_MMIO_MAP,
25 26
	TRACE_BOOT_CALL,
	TRACE_BOOT_RET,
27
	TRACE_FN_RET,
28 29 30 31

	__TRACE_LAST_TYPE
};

32 33 34 35 36 37 38 39 40 41 42 43 44 45
/*
 * The trace entry - the most basic unit of tracing. This is what
 * is printed in the end as a single line in the trace output, such as:
 *
 *     bash-15816 [01]   235.197585: idle_cpu <- irq_enter
 */
struct trace_entry {
	unsigned char		type;
	unsigned char		cpu;
	unsigned char		flags;
	unsigned char		preempt_count;
	int			pid;
};

46 47 48 49
/*
 * Function trace entry - function address and parent function addres:
 */
struct ftrace_entry {
50
	struct trace_entry	ent;
51 52 53
	unsigned long		ip;
	unsigned long		parent_ip;
};
54 55 56 57 58 59 60 61 62

/* Function return entry */
struct ftrace_ret_entry {
	struct trace_entry	ent;
	unsigned long		ip;
	unsigned long		parent_ip;
	unsigned long long	calltime;
	unsigned long long	rettime;
};
63
extern struct tracer boot_tracer;
64 65 66 67 68

/*
 * Context switch trace entry - which task (and prio) we switched from/to:
 */
struct ctx_switch_entry {
69
	struct trace_entry	ent;
70 71 72 73 74
	unsigned int		prev_pid;
	unsigned char		prev_prio;
	unsigned char		prev_state;
	unsigned int		next_pid;
	unsigned char		next_prio;
P
Peter Zijlstra 已提交
75
	unsigned char		next_state;
76
	unsigned int		next_cpu;
77 78
};

I
Ingo Molnar 已提交
79 80 81 82
/*
 * Special (free-form) trace entry:
 */
struct special_entry {
83
	struct trace_entry	ent;
I
Ingo Molnar 已提交
84 85 86 87 88
	unsigned long		arg1;
	unsigned long		arg2;
	unsigned long		arg3;
};

I
Ingo Molnar 已提交
89 90 91 92
/*
 * Stack-trace entry:
 */

I
Ingo Molnar 已提交
93
#define FTRACE_STACK_ENTRIES	8
I
Ingo Molnar 已提交
94 95

struct stack_entry {
96
	struct trace_entry	ent;
I
Ingo Molnar 已提交
97 98 99
	unsigned long		caller[FTRACE_STACK_ENTRIES];
};

100 101 102 103
/*
 * ftrace_printk entry:
 */
struct print_entry {
104
	struct trace_entry	ent;
105 106 107 108
	unsigned long		ip;
	char			buf[];
};

109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
#define TRACE_OLD_SIZE		88

struct trace_field_cont {
	unsigned char		type;
	/* Temporary till we get rid of this completely */
	char			buf[TRACE_OLD_SIZE - 1];
};

struct trace_mmiotrace_rw {
	struct trace_entry	ent;
	struct mmiotrace_rw	rw;
};

struct trace_mmiotrace_map {
	struct trace_entry	ent;
	struct mmiotrace_map	map;
};

127
struct trace_boot_call {
128
	struct trace_entry	ent;
129 130 131 132 133 134
	struct boot_trace_call boot_call;
};

struct trace_boot_ret {
	struct trace_entry	ent;
	struct boot_trace_ret boot_ret;
135 136
};

137 138 139
/*
 * trace_flag_type is an enumeration that holds different
 * states when a trace occurs. These are:
140 141 142 143 144 145
 *  IRQS_OFF		- interrupts were disabled
 *  IRQS_NOSUPPORT 	- arch does not support irqs_disabled_flags
 *  NEED_RESCED		- reschedule is requested
 *  HARDIRQ		- inside an interrupt handler
 *  SOFTIRQ		- inside a softirq handler
 *  CONT		- multiple entries hold the trace item
146 147 148
 */
enum trace_flag_type {
	TRACE_FLAG_IRQS_OFF		= 0x01,
149 150 151 152 153
	TRACE_FLAG_IRQS_NOSUPPORT	= 0x02,
	TRACE_FLAG_NEED_RESCHED		= 0x04,
	TRACE_FLAG_HARDIRQ		= 0x08,
	TRACE_FLAG_SOFTIRQ		= 0x10,
	TRACE_FLAG_CONT			= 0x20,
154 155
};

156
#define TRACE_BUF_SIZE		1024
157 158 159 160 161 162 163 164

/*
 * The CPU trace array - it consists of thousands of trace entries
 * plus some other descriptor data: (for example which task started
 * the trace, etc.)
 */
struct trace_array_cpu {
	atomic_t		disabled;
I
Ingo Molnar 已提交
165

I
Ingo Molnar 已提交
166 167
	/* these fields get copied into max-trace: */
	unsigned long		trace_idx;
168
	unsigned long		overrun;
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
	unsigned long		saved_latency;
	unsigned long		critical_start;
	unsigned long		critical_end;
	unsigned long		critical_sequence;
	unsigned long		nice;
	unsigned long		policy;
	unsigned long		rt_priority;
	cycle_t			preempt_timestamp;
	pid_t			pid;
	uid_t			uid;
	char			comm[TASK_COMM_LEN];
};

struct trace_iterator;

/*
 * The trace array - an array of per-CPU trace arrays. This is the
 * highest level data structure that individual tracers deal with.
 * They have on/off state as well:
 */
struct trace_array {
190
	struct ring_buffer	*buffer;
191 192 193
	unsigned long		entries;
	int			cpu;
	cycle_t			time_start;
194
	struct task_struct	*waiter;
195 196 197
	struct trace_array_cpu	*data[NR_CPUS];
};

S
Steven Rostedt 已提交
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
#define FTRACE_CMP_TYPE(var, type) \
	__builtin_types_compatible_p(typeof(var), type *)

#undef IF_ASSIGN
#define IF_ASSIGN(var, entry, etype, id)		\
	if (FTRACE_CMP_TYPE(var, etype)) {		\
		var = (typeof(var))(entry);		\
		WARN_ON(id && (entry)->type != id);	\
		break;					\
	}

/* Will cause compile errors if type is not found. */
extern void __ftrace_bad_type(void);

/*
 * The trace_assign_type is a verifier that the entry type is
 * the same as the type being assigned. To add new types simply
 * add a line with the following format:
 *
 * IF_ASSIGN(var, ent, type, id);
 *
 *  Where "type" is the trace type that includes the trace_entry
 *  as the "ent" item. And "id" is the trace identifier that is
 *  used in the trace_type enum.
 *
 *  If the type can have more than one id, then use zero.
 */
#define trace_assign_type(var, ent)					\
	do {								\
		IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN);	\
		IF_ASSIGN(var, ent, struct ctx_switch_entry, 0);	\
		IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \
		IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK);	\
		IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT);	\
		IF_ASSIGN(var, ent, struct special_entry, 0);		\
		IF_ASSIGN(var, ent, struct trace_mmiotrace_rw,		\
			  TRACE_MMIO_RW);				\
		IF_ASSIGN(var, ent, struct trace_mmiotrace_map,		\
			  TRACE_MMIO_MAP);				\
237 238 239
		IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
		IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
		IF_ASSIGN(var, ent, struct ftrace_ret_entry, TRACE_FN_RET);\
S
Steven Rostedt 已提交
240 241
		__ftrace_bad_type();					\
	} while (0)
242 243 244 245 246 247 248 249

/* Return values for print_line callback */
enum print_line_t {
	TRACE_TYPE_PARTIAL_LINE	= 0,	/* Retry after flushing the seq */
	TRACE_TYPE_HANDLED	= 1,
	TRACE_TYPE_UNHANDLED	= 2	/* Relay to other output functions */
};

250 251 252 253 254 255 256
/*
 * A specific tracer, represented by methods that operate on a trace array:
 */
struct tracer {
	const char		*name;
	void			(*init)(struct trace_array *tr);
	void			(*reset)(struct trace_array *tr);
257 258
	void			(*start)(struct trace_array *tr);
	void			(*stop)(struct trace_array *tr);
259
	void			(*open)(struct trace_iterator *iter);
260
	void			(*pipe_open)(struct trace_iterator *iter);
261
	void			(*close)(struct trace_iterator *iter);
262 263 264
	ssize_t			(*read)(struct trace_iterator *iter,
					struct file *filp, char __user *ubuf,
					size_t cnt, loff_t *ppos);
S
Steven Rostedt 已提交
265 266 267 268
#ifdef CONFIG_FTRACE_STARTUP_TEST
	int			(*selftest)(struct tracer *trace,
					    struct trace_array *tr);
#endif
269
	enum print_line_t	(*print_line)(struct trace_iterator *iter);
270 271 272 273
	struct tracer		*next;
	int			print_max;
};

S
Steven Rostedt 已提交
274 275 276
struct trace_seq {
	unsigned char		buffer[PAGE_SIZE];
	unsigned int		len;
277
	unsigned int		readpos;
S
Steven Rostedt 已提交
278 279
};

280 281 282 283 284 285 286
/*
 * Trace iterator - used by printout routines who present trace
 * results to users and which routines might sleep, etc:
 */
struct trace_iterator {
	struct trace_array	*tr;
	struct tracer		*trace;
287
	void			*private;
288
	struct ring_buffer_iter	*buffer_iter[NR_CPUS];
I
Ingo Molnar 已提交
289

290 291
	/* The below is zeroed out in pipe_read */
	struct trace_seq	seq;
292
	struct trace_entry	*ent;
I
Ingo Molnar 已提交
293
	int			cpu;
294
	u64			ts;
I
Ingo Molnar 已提交
295

296 297
	unsigned long		iter_flags;
	loff_t			pos;
298
	long			idx;
299 300

	cpumask_t		started;
301 302
};

303
int tracing_is_enabled(void);
304
void trace_wake_up(void);
305
void tracing_reset(struct trace_array *tr, int cpu);
306 307
int tracing_open_generic(struct inode *inode, struct file *filp);
struct dentry *tracing_init_dentry(void);
I
Ingo Molnar 已提交
308 309
void init_tracer_sysprof_debugfs(struct dentry *d_tracer);

310 311 312
struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
						struct trace_array_cpu *data);
void tracing_generic_entry_update(struct trace_entry *entry,
313 314
				  unsigned long flags,
				  int pc);
315

316 317 318 319
void ftrace(struct trace_array *tr,
			    struct trace_array_cpu *data,
			    unsigned long ip,
			    unsigned long parent_ip,
320
			    unsigned long flags, int pc);
321 322 323 324
void tracing_sched_switch_trace(struct trace_array *tr,
				struct trace_array_cpu *data,
				struct task_struct *prev,
				struct task_struct *next,
325
				unsigned long flags, int pc);
326
void tracing_record_cmdline(struct task_struct *tsk);
327 328 329 330 331

void tracing_sched_wakeup_trace(struct trace_array *tr,
				struct trace_array_cpu *data,
				struct task_struct *wakee,
				struct task_struct *cur,
332
				unsigned long flags, int pc);
I
Ingo Molnar 已提交
333 334 335 336
void trace_special(struct trace_array *tr,
		   struct trace_array_cpu *data,
		   unsigned long arg1,
		   unsigned long arg2,
337
		   unsigned long arg3, int pc);
338 339 340 341
void trace_function(struct trace_array *tr,
		    struct trace_array_cpu *data,
		    unsigned long ip,
		    unsigned long parent_ip,
342
		    unsigned long flags, int pc);
343 344
void
trace_function_return(struct ftrace_retfunc *trace);
345

346 347
void tracing_start_cmdline_record(void);
void tracing_stop_cmdline_record(void);
S
Steven Rostedt 已提交
348 349 350
void tracing_sched_switch_assign_trace(struct trace_array *tr);
void tracing_stop_sched_switch_record(void);
void tracing_start_sched_switch_record(void);
351 352 353 354 355 356 357 358 359 360 361 362
int register_tracer(struct tracer *type);
void unregister_tracer(struct tracer *type);

extern unsigned long nsecs_to_usecs(unsigned long nsecs);

extern unsigned long tracing_max_latency;
extern unsigned long tracing_thresh;

void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
void update_max_tr_single(struct trace_array *tr,
			  struct task_struct *tsk, int cpu);

I
Ingo Molnar 已提交
363
extern cycle_t ftrace_now(int cpu);
364

365
#ifdef CONFIG_FUNCTION_TRACER
366 367 368 369 370 371 372
void tracing_start_function_trace(void);
void tracing_stop_function_trace(void);
#else
# define tracing_start_function_trace()		do { } while (0)
# define tracing_stop_function_trace()		do { } while (0)
#endif

373 374 375
#ifdef CONFIG_CONTEXT_SWITCH_TRACER
typedef void
(*tracer_switch_func_t)(void *private,
M
Mathieu Desnoyers 已提交
376
			void *__rq,
377 378 379 380 381 382 383 384 385 386 387 388 389
			struct task_struct *prev,
			struct task_struct *next);

struct tracer_switch_ops {
	tracer_switch_func_t		func;
	void				*private;
	struct tracer_switch_ops	*next;
};

#endif /* CONFIG_CONTEXT_SWITCH_TRACER */

#ifdef CONFIG_DYNAMIC_FTRACE
extern unsigned long ftrace_update_tot_cnt;
390 391
#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
extern int DYN_FTRACE_TEST_NAME(void);
392 393
#endif

S
Steven Rostedt 已提交
394 395 396 397 398 399 400 401 402 403 404
#ifdef CONFIG_FTRACE_STARTUP_TEST
extern int trace_selftest_startup_function(struct tracer *trace,
					   struct trace_array *tr);
extern int trace_selftest_startup_irqsoff(struct tracer *trace,
					  struct trace_array *tr);
extern int trace_selftest_startup_preemptoff(struct tracer *trace,
					     struct trace_array *tr);
extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
						 struct trace_array *tr);
extern int trace_selftest_startup_wakeup(struct tracer *trace,
					 struct trace_array *tr);
S
Steven Noonan 已提交
405 406
extern int trace_selftest_startup_nop(struct tracer *trace,
					 struct trace_array *tr);
S
Steven Rostedt 已提交
407 408
extern int trace_selftest_startup_sched_switch(struct tracer *trace,
					       struct trace_array *tr);
409 410
extern int trace_selftest_startup_sysprof(struct tracer *trace,
					       struct trace_array *tr);
S
Steven Rostedt 已提交
411 412
#endif /* CONFIG_FTRACE_STARTUP_TEST */

I
Ingo Molnar 已提交
413
extern void *head_page(struct trace_array_cpu *data);
414
extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
415 416
extern void trace_seq_print_cont(struct trace_seq *s,
				 struct trace_iterator *iter);
417 418 419 420

extern int
seq_print_ip_sym(struct trace_seq *s, unsigned long ip,
		unsigned long sym_flags);
421 422
extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
				 size_t cnt);
423
extern long ns2usecs(cycle_t nsec);
P
Pekka Paalanen 已提交
424
extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args);
I
Ingo Molnar 已提交
425

426 427
extern unsigned long trace_flags;

428 429 430 431 432 433 434 435 436 437 438
/* Standard output formatting function used for function return traces */
#ifdef CONFIG_FUNCTION_RET_TRACER
extern enum print_line_t print_return_function(struct trace_iterator *iter);
#else
static inline enum print_line_t
print_return_function(struct trace_iterator *iter)
{
	return TRACE_TYPE_UNHANDLED;
}
#endif

S
Steven Rostedt 已提交
439 440 441 442 443 444 445
/*
 * trace_iterator_flags is an enumeration that defines bit
 * positions into trace_flags that controls the output.
 *
 * NOTE: These bits must match the trace_options array in
 *       trace.c.
 */
446 447 448 449 450 451 452 453 454 455
enum trace_iterator_flags {
	TRACE_ITER_PRINT_PARENT		= 0x01,
	TRACE_ITER_SYM_OFFSET		= 0x02,
	TRACE_ITER_SYM_ADDR		= 0x04,
	TRACE_ITER_VERBOSE		= 0x08,
	TRACE_ITER_RAW			= 0x10,
	TRACE_ITER_HEX			= 0x20,
	TRACE_ITER_BIN			= 0x40,
	TRACE_ITER_BLOCK		= 0x80,
	TRACE_ITER_STACKTRACE		= 0x100,
I
Ingo Molnar 已提交
456
	TRACE_ITER_SCHED_TREE		= 0x200,
457
	TRACE_ITER_PRINTK		= 0x400,
458
	TRACE_ITER_PREEMPTONLY		= 0x800,
459 460
};

461 462 463 464 465 466 467
/*
 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
 * control the output of kernel symbols.
 */
#define TRACE_ITER_SYM_MASK \
	(TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)

468 469
extern struct tracer nop_trace;

470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
/**
 * ftrace_preempt_disable - disable preemption scheduler safe
 *
 * When tracing can happen inside the scheduler, there exists
 * cases that the tracing might happen before the need_resched
 * flag is checked. If this happens and the tracer calls
 * preempt_enable (after a disable), a schedule might take place
 * causing an infinite recursion.
 *
 * To prevent this, we read the need_recshed flag before
 * disabling preemption. When we want to enable preemption we
 * check the flag, if it is set, then we call preempt_enable_no_resched.
 * Otherwise, we call preempt_enable.
 *
 * The rational for doing the above is that if need resched is set
 * and we have yet to reschedule, we are either in an atomic location
 * (where we do not need to check for scheduling) or we are inside
 * the scheduler and do not want to resched.
 */
static inline int ftrace_preempt_disable(void)
{
	int resched;

	resched = need_resched();
	preempt_disable_notrace();

	return resched;
}

/**
 * ftrace_preempt_enable - enable preemption scheduler safe
 * @resched: the return value from ftrace_preempt_disable
 *
 * This is a scheduler safe way to enable preemption and not miss
 * any preemption checks. The disabled saved the state of preemption.
 * If resched is set, then we were either inside an atomic or
 * are inside the scheduler (we would have already scheduled
 * otherwise). In this case, we do not want to call normal
 * preempt_enable, but preempt_enable_no_resched instead.
 */
static inline void ftrace_preempt_enable(int resched)
{
	if (resched)
		preempt_enable_no_resched_notrace();
	else
		preempt_enable_notrace();
}

518
#endif /* _LINUX_KERNEL_TRACE_H */