trace.h 13.9 KB
Newer Older
1 2 3 4 5 6 7
#ifndef _LINUX_KERNEL_TRACE_H
#define _LINUX_KERNEL_TRACE_H

#include <linux/fs.h>
#include <asm/atomic.h>
#include <linux/sched.h>
#include <linux/clocksource.h>
8
#include <linux/ring_buffer.h>
P
Pekka Paalanen 已提交
9
#include <linux/mmiotrace.h>
10
#include <linux/ftrace.h>
11
#include <trace/boot.h>
12

13 14 15 16 17 18
enum trace_type {
	__TRACE_FIRST_TYPE = 0,

	TRACE_FN,
	TRACE_CTX,
	TRACE_WAKE,
19
	TRACE_CONT,
20
	TRACE_STACK,
21
	TRACE_PRINT,
22
	TRACE_SPECIAL,
P
Pekka Paalanen 已提交
23 24
	TRACE_MMIO_RW,
	TRACE_MMIO_MAP,
25
	TRACE_BOOT,
26
	TRACE_FN_RET,
27 28 29 30

	__TRACE_LAST_TYPE
};

31 32 33 34 35 36 37 38 39 40 41 42 43 44
/*
 * The trace entry - the most basic unit of tracing. This is what
 * is printed in the end as a single line in the trace output, such as:
 *
 *     bash-15816 [01]   235.197585: idle_cpu <- irq_enter
 */
struct trace_entry {
	unsigned char		type;
	unsigned char		cpu;
	unsigned char		flags;
	unsigned char		preempt_count;
	int			pid;
};

45 46 47 48
/*
 * Function trace entry - function address and parent function addres:
 */
struct ftrace_entry {
49
	struct trace_entry	ent;
50 51 52
	unsigned long		ip;
	unsigned long		parent_ip;
};
53 54 55 56 57 58 59 60 61

/* Function return entry */
struct ftrace_ret_entry {
	struct trace_entry	ent;
	unsigned long		ip;
	unsigned long		parent_ip;
	unsigned long long	calltime;
	unsigned long long	rettime;
};
62
extern struct tracer boot_tracer;
63 64 65 66 67

/*
 * Context switch trace entry - which task (and prio) we switched from/to:
 */
struct ctx_switch_entry {
68
	struct trace_entry	ent;
69 70 71 72 73
	unsigned int		prev_pid;
	unsigned char		prev_prio;
	unsigned char		prev_state;
	unsigned int		next_pid;
	unsigned char		next_prio;
P
Peter Zijlstra 已提交
74
	unsigned char		next_state;
75
	unsigned int		next_cpu;
76 77
};

I
Ingo Molnar 已提交
78 79 80 81
/*
 * Special (free-form) trace entry:
 */
struct special_entry {
82
	struct trace_entry	ent;
I
Ingo Molnar 已提交
83 84 85 86 87
	unsigned long		arg1;
	unsigned long		arg2;
	unsigned long		arg3;
};

I
Ingo Molnar 已提交
88 89 90 91
/*
 * Stack-trace entry:
 */

I
Ingo Molnar 已提交
92
#define FTRACE_STACK_ENTRIES	8
I
Ingo Molnar 已提交
93 94

struct stack_entry {
95
	struct trace_entry	ent;
I
Ingo Molnar 已提交
96 97 98
	unsigned long		caller[FTRACE_STACK_ENTRIES];
};

99 100 101 102
/*
 * ftrace_printk entry:
 */
struct print_entry {
103
	struct trace_entry	ent;
104 105 106 107
	unsigned long		ip;
	char			buf[];
};

108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
#define TRACE_OLD_SIZE		88

struct trace_field_cont {
	unsigned char		type;
	/* Temporary till we get rid of this completely */
	char			buf[TRACE_OLD_SIZE - 1];
};

struct trace_mmiotrace_rw {
	struct trace_entry	ent;
	struct mmiotrace_rw	rw;
};

struct trace_mmiotrace_map {
	struct trace_entry	ent;
	struct mmiotrace_map	map;
};

struct trace_boot {
	struct trace_entry	ent;
	struct boot_trace	initcall;
};

131 132 133
/*
 * trace_flag_type is an enumeration that holds different
 * states when a trace occurs. These are:
134 135 136 137 138 139
 *  IRQS_OFF		- interrupts were disabled
 *  IRQS_NOSUPPORT 	- arch does not support irqs_disabled_flags
 *  NEED_RESCED		- reschedule is requested
 *  HARDIRQ		- inside an interrupt handler
 *  SOFTIRQ		- inside a softirq handler
 *  CONT		- multiple entries hold the trace item
140 141 142
 */
enum trace_flag_type {
	TRACE_FLAG_IRQS_OFF		= 0x01,
143 144 145 146 147
	TRACE_FLAG_IRQS_NOSUPPORT	= 0x02,
	TRACE_FLAG_NEED_RESCHED		= 0x04,
	TRACE_FLAG_HARDIRQ		= 0x08,
	TRACE_FLAG_SOFTIRQ		= 0x10,
	TRACE_FLAG_CONT			= 0x20,
148 149
};

150
#define TRACE_BUF_SIZE		1024
151 152 153 154 155 156 157 158

/*
 * The CPU trace array - it consists of thousands of trace entries
 * plus some other descriptor data: (for example which task started
 * the trace, etc.)
 */
struct trace_array_cpu {
	atomic_t		disabled;
I
Ingo Molnar 已提交
159

I
Ingo Molnar 已提交
160 161
	/* these fields get copied into max-trace: */
	unsigned long		trace_idx;
162
	unsigned long		overrun;
163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
	unsigned long		saved_latency;
	unsigned long		critical_start;
	unsigned long		critical_end;
	unsigned long		critical_sequence;
	unsigned long		nice;
	unsigned long		policy;
	unsigned long		rt_priority;
	cycle_t			preempt_timestamp;
	pid_t			pid;
	uid_t			uid;
	char			comm[TASK_COMM_LEN];
};

struct trace_iterator;

/*
 * The trace array - an array of per-CPU trace arrays. This is the
 * highest level data structure that individual tracers deal with.
 * They have on/off state as well:
 */
struct trace_array {
184
	struct ring_buffer	*buffer;
185 186 187
	unsigned long		entries;
	int			cpu;
	cycle_t			time_start;
188
	struct task_struct	*waiter;
189 190 191
	struct trace_array_cpu	*data[NR_CPUS];
};

S
Steven Rostedt 已提交
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
#define FTRACE_CMP_TYPE(var, type) \
	__builtin_types_compatible_p(typeof(var), type *)

#undef IF_ASSIGN
#define IF_ASSIGN(var, entry, etype, id)		\
	if (FTRACE_CMP_TYPE(var, etype)) {		\
		var = (typeof(var))(entry);		\
		WARN_ON(id && (entry)->type != id);	\
		break;					\
	}

/* Will cause compile errors if type is not found. */
extern void __ftrace_bad_type(void);

/*
 * The trace_assign_type is a verifier that the entry type is
 * the same as the type being assigned. To add new types simply
 * add a line with the following format:
 *
 * IF_ASSIGN(var, ent, type, id);
 *
 *  Where "type" is the trace type that includes the trace_entry
 *  as the "ent" item. And "id" is the trace identifier that is
 *  used in the trace_type enum.
 *
 *  If the type can have more than one id, then use zero.
 */
#define trace_assign_type(var, ent)					\
	do {								\
		IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN);	\
		IF_ASSIGN(var, ent, struct ctx_switch_entry, 0);	\
		IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \
		IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK);	\
		IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT);	\
		IF_ASSIGN(var, ent, struct special_entry, 0);		\
		IF_ASSIGN(var, ent, struct trace_mmiotrace_rw,		\
			  TRACE_MMIO_RW);				\
		IF_ASSIGN(var, ent, struct trace_mmiotrace_map,		\
			  TRACE_MMIO_MAP);				\
		IF_ASSIGN(var, ent, struct trace_boot, TRACE_BOOT);	\
232
		IF_ASSIGN(var, ent, struct ftrace_ret_entry, TRACE_FN_RET); \
S
Steven Rostedt 已提交
233 234
		__ftrace_bad_type();					\
	} while (0)
235 236 237 238 239 240 241 242

/* Return values for print_line callback */
enum print_line_t {
	TRACE_TYPE_PARTIAL_LINE	= 0,	/* Retry after flushing the seq */
	TRACE_TYPE_HANDLED	= 1,
	TRACE_TYPE_UNHANDLED	= 2	/* Relay to other output functions */
};

243 244 245 246 247 248 249
/*
 * A specific tracer, represented by methods that operate on a trace array:
 */
struct tracer {
	const char		*name;
	void			(*init)(struct trace_array *tr);
	void			(*reset)(struct trace_array *tr);
250 251
	void			(*start)(struct trace_array *tr);
	void			(*stop)(struct trace_array *tr);
252
	void			(*open)(struct trace_iterator *iter);
253
	void			(*pipe_open)(struct trace_iterator *iter);
254
	void			(*close)(struct trace_iterator *iter);
255 256 257
	ssize_t			(*read)(struct trace_iterator *iter,
					struct file *filp, char __user *ubuf,
					size_t cnt, loff_t *ppos);
S
Steven Rostedt 已提交
258 259 260 261
#ifdef CONFIG_FTRACE_STARTUP_TEST
	int			(*selftest)(struct tracer *trace,
					    struct trace_array *tr);
#endif
262
	enum print_line_t	(*print_line)(struct trace_iterator *iter);
263 264 265 266
	struct tracer		*next;
	int			print_max;
};

S
Steven Rostedt 已提交
267 268 269
struct trace_seq {
	unsigned char		buffer[PAGE_SIZE];
	unsigned int		len;
270
	unsigned int		readpos;
S
Steven Rostedt 已提交
271 272
};

273 274 275 276 277 278 279
/*
 * Trace iterator - used by printout routines who present trace
 * results to users and which routines might sleep, etc:
 */
struct trace_iterator {
	struct trace_array	*tr;
	struct tracer		*trace;
280
	void			*private;
281
	struct ring_buffer_iter	*buffer_iter[NR_CPUS];
I
Ingo Molnar 已提交
282

283 284
	/* The below is zeroed out in pipe_read */
	struct trace_seq	seq;
285
	struct trace_entry	*ent;
I
Ingo Molnar 已提交
286
	int			cpu;
287
	u64			ts;
I
Ingo Molnar 已提交
288

289 290
	unsigned long		iter_flags;
	loff_t			pos;
291
	long			idx;
292 293

	cpumask_t		started;
294 295
};

296
int tracing_is_enabled(void);
297
void trace_wake_up(void);
298
void tracing_reset(struct trace_array *tr, int cpu);
299 300
int tracing_open_generic(struct inode *inode, struct file *filp);
struct dentry *tracing_init_dentry(void);
I
Ingo Molnar 已提交
301 302
void init_tracer_sysprof_debugfs(struct dentry *d_tracer);

303 304 305
struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
						struct trace_array_cpu *data);
void tracing_generic_entry_update(struct trace_entry *entry,
306 307
				  unsigned long flags,
				  int pc);
308

309 310 311 312
void ftrace(struct trace_array *tr,
			    struct trace_array_cpu *data,
			    unsigned long ip,
			    unsigned long parent_ip,
313
			    unsigned long flags, int pc);
314 315 316 317
void tracing_sched_switch_trace(struct trace_array *tr,
				struct trace_array_cpu *data,
				struct task_struct *prev,
				struct task_struct *next,
318
				unsigned long flags, int pc);
319
void tracing_record_cmdline(struct task_struct *tsk);
320 321 322 323 324

void tracing_sched_wakeup_trace(struct trace_array *tr,
				struct trace_array_cpu *data,
				struct task_struct *wakee,
				struct task_struct *cur,
325
				unsigned long flags, int pc);
I
Ingo Molnar 已提交
326 327 328 329
void trace_special(struct trace_array *tr,
		   struct trace_array_cpu *data,
		   unsigned long arg1,
		   unsigned long arg2,
330
		   unsigned long arg3, int pc);
331 332 333 334
void trace_function(struct trace_array *tr,
		    struct trace_array_cpu *data,
		    unsigned long ip,
		    unsigned long parent_ip,
335
		    unsigned long flags, int pc);
336 337
void
trace_function_return(struct ftrace_retfunc *trace);
338

339 340
void tracing_start_cmdline_record(void);
void tracing_stop_cmdline_record(void);
S
Steven Rostedt 已提交
341 342 343
void tracing_sched_switch_assign_trace(struct trace_array *tr);
void tracing_stop_sched_switch_record(void);
void tracing_start_sched_switch_record(void);
344 345 346 347 348 349 350 351 352 353 354 355
int register_tracer(struct tracer *type);
void unregister_tracer(struct tracer *type);

extern unsigned long nsecs_to_usecs(unsigned long nsecs);

extern unsigned long tracing_max_latency;
extern unsigned long tracing_thresh;

void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
void update_max_tr_single(struct trace_array *tr,
			  struct task_struct *tsk, int cpu);

I
Ingo Molnar 已提交
356
extern cycle_t ftrace_now(int cpu);
357

358
#ifdef CONFIG_FUNCTION_TRACER
359 360 361 362 363 364 365
void tracing_start_function_trace(void);
void tracing_stop_function_trace(void);
#else
# define tracing_start_function_trace()		do { } while (0)
# define tracing_stop_function_trace()		do { } while (0)
#endif

366 367 368
#ifdef CONFIG_CONTEXT_SWITCH_TRACER
typedef void
(*tracer_switch_func_t)(void *private,
M
Mathieu Desnoyers 已提交
369
			void *__rq,
370 371 372 373 374 375 376 377 378 379 380 381 382
			struct task_struct *prev,
			struct task_struct *next);

struct tracer_switch_ops {
	tracer_switch_func_t		func;
	void				*private;
	struct tracer_switch_ops	*next;
};

#endif /* CONFIG_CONTEXT_SWITCH_TRACER */

#ifdef CONFIG_DYNAMIC_FTRACE
extern unsigned long ftrace_update_tot_cnt;
383 384
#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
extern int DYN_FTRACE_TEST_NAME(void);
385 386
#endif

S
Steven Rostedt 已提交
387 388 389 390 391 392 393 394 395 396 397
#ifdef CONFIG_FTRACE_STARTUP_TEST
extern int trace_selftest_startup_function(struct tracer *trace,
					   struct trace_array *tr);
extern int trace_selftest_startup_irqsoff(struct tracer *trace,
					  struct trace_array *tr);
extern int trace_selftest_startup_preemptoff(struct tracer *trace,
					     struct trace_array *tr);
extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
						 struct trace_array *tr);
extern int trace_selftest_startup_wakeup(struct tracer *trace,
					 struct trace_array *tr);
S
Steven Noonan 已提交
398 399
extern int trace_selftest_startup_nop(struct tracer *trace,
					 struct trace_array *tr);
S
Steven Rostedt 已提交
400 401
extern int trace_selftest_startup_sched_switch(struct tracer *trace,
					       struct trace_array *tr);
402 403
extern int trace_selftest_startup_sysprof(struct tracer *trace,
					       struct trace_array *tr);
S
Steven Rostedt 已提交
404 405
#endif /* CONFIG_FTRACE_STARTUP_TEST */

I
Ingo Molnar 已提交
406
extern void *head_page(struct trace_array_cpu *data);
407
extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
408 409
extern void trace_seq_print_cont(struct trace_seq *s,
				 struct trace_iterator *iter);
410 411 412 413

extern int
seq_print_ip_sym(struct trace_seq *s, unsigned long ip,
		unsigned long sym_flags);
414 415
extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
				 size_t cnt);
416
extern long ns2usecs(cycle_t nsec);
P
Pekka Paalanen 已提交
417
extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args);
I
Ingo Molnar 已提交
418

419 420
extern unsigned long trace_flags;

421 422 423 424 425 426 427 428 429 430 431
/* Standard output formatting function used for function return traces */
#ifdef CONFIG_FUNCTION_RET_TRACER
extern enum print_line_t print_return_function(struct trace_iterator *iter);
#else
static inline enum print_line_t
print_return_function(struct trace_iterator *iter)
{
	return TRACE_TYPE_UNHANDLED;
}
#endif

S
Steven Rostedt 已提交
432 433 434 435 436 437 438
/*
 * trace_iterator_flags is an enumeration that defines bit
 * positions into trace_flags that controls the output.
 *
 * NOTE: These bits must match the trace_options array in
 *       trace.c.
 */
439 440 441 442 443 444 445 446 447 448
enum trace_iterator_flags {
	TRACE_ITER_PRINT_PARENT		= 0x01,
	TRACE_ITER_SYM_OFFSET		= 0x02,
	TRACE_ITER_SYM_ADDR		= 0x04,
	TRACE_ITER_VERBOSE		= 0x08,
	TRACE_ITER_RAW			= 0x10,
	TRACE_ITER_HEX			= 0x20,
	TRACE_ITER_BIN			= 0x40,
	TRACE_ITER_BLOCK		= 0x80,
	TRACE_ITER_STACKTRACE		= 0x100,
I
Ingo Molnar 已提交
449
	TRACE_ITER_SCHED_TREE		= 0x200,
450
	TRACE_ITER_PRINTK		= 0x400,
451
	TRACE_ITER_PREEMPTONLY		= 0x800,
452 453
};

454 455 456 457 458 459 460
/*
 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
 * control the output of kernel symbols.
 */
#define TRACE_ITER_SYM_MASK \
	(TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)

461 462
extern struct tracer nop_trace;

463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510
/**
 * ftrace_preempt_disable - disable preemption scheduler safe
 *
 * When tracing can happen inside the scheduler, there exists
 * cases that the tracing might happen before the need_resched
 * flag is checked. If this happens and the tracer calls
 * preempt_enable (after a disable), a schedule might take place
 * causing an infinite recursion.
 *
 * To prevent this, we read the need_recshed flag before
 * disabling preemption. When we want to enable preemption we
 * check the flag, if it is set, then we call preempt_enable_no_resched.
 * Otherwise, we call preempt_enable.
 *
 * The rational for doing the above is that if need resched is set
 * and we have yet to reschedule, we are either in an atomic location
 * (where we do not need to check for scheduling) or we are inside
 * the scheduler and do not want to resched.
 */
static inline int ftrace_preempt_disable(void)
{
	int resched;

	resched = need_resched();
	preempt_disable_notrace();

	return resched;
}

/**
 * ftrace_preempt_enable - enable preemption scheduler safe
 * @resched: the return value from ftrace_preempt_disable
 *
 * This is a scheduler safe way to enable preemption and not miss
 * any preemption checks. The disabled saved the state of preemption.
 * If resched is set, then we were either inside an atomic or
 * are inside the scheduler (we would have already scheduled
 * otherwise). In this case, we do not want to call normal
 * preempt_enable, but preempt_enable_no_resched instead.
 */
static inline void ftrace_preempt_enable(int resched)
{
	if (resched)
		preempt_enable_no_resched_notrace();
	else
		preempt_enable_notrace();
}

511
#endif /* _LINUX_KERNEL_TRACE_H */