trace.h 12.9 KB
Newer Older
1 2 3 4 5 6 7
#ifndef _LINUX_KERNEL_TRACE_H
#define _LINUX_KERNEL_TRACE_H

#include <linux/fs.h>
#include <asm/atomic.h>
#include <linux/sched.h>
#include <linux/clocksource.h>
8
#include <linux/ring_buffer.h>
P
Pekka Paalanen 已提交
9
#include <linux/mmiotrace.h>
10
#include <linux/ftrace.h>
11

12 13 14 15 16 17
enum trace_type {
	__TRACE_FIRST_TYPE = 0,

	TRACE_FN,
	TRACE_CTX,
	TRACE_WAKE,
18
	TRACE_CONT,
19
	TRACE_STACK,
20
	TRACE_PRINT,
21
	TRACE_SPECIAL,
P
Pekka Paalanen 已提交
22 23
	TRACE_MMIO_RW,
	TRACE_MMIO_MAP,
24
	TRACE_BOOT,
25 26 27 28

	__TRACE_LAST_TYPE
};

29 30 31 32 33 34 35 36 37 38 39 40 41 42
/*
 * The trace entry - the most basic unit of tracing. This is what
 * is printed in the end as a single line in the trace output, such as:
 *
 *     bash-15816 [01]   235.197585: idle_cpu <- irq_enter
 */
struct trace_entry {
	unsigned char		type;
	unsigned char		cpu;
	unsigned char		flags;
	unsigned char		preempt_count;
	int			pid;
};

43 44 45 46
/*
 * Function trace entry - function address and parent function addres:
 */
struct ftrace_entry {
47
	struct trace_entry	ent;
48 49 50
	unsigned long		ip;
	unsigned long		parent_ip;
};
51
extern struct tracer boot_tracer;
52 53 54 55 56

/*
 * Context switch trace entry - which task (and prio) we switched from/to:
 */
struct ctx_switch_entry {
57
	struct trace_entry	ent;
58 59 60 61 62
	unsigned int		prev_pid;
	unsigned char		prev_prio;
	unsigned char		prev_state;
	unsigned int		next_pid;
	unsigned char		next_prio;
P
Peter Zijlstra 已提交
63
	unsigned char		next_state;
64
	unsigned int		next_cpu;
65 66
};

I
Ingo Molnar 已提交
67 68 69 70
/*
 * Special (free-form) trace entry:
 */
struct special_entry {
71
	struct trace_entry	ent;
I
Ingo Molnar 已提交
72 73 74 75 76
	unsigned long		arg1;
	unsigned long		arg2;
	unsigned long		arg3;
};

I
Ingo Molnar 已提交
77 78 79 80
/*
 * Stack-trace entry:
 */

I
Ingo Molnar 已提交
81
#define FTRACE_STACK_ENTRIES	8
I
Ingo Molnar 已提交
82 83

struct stack_entry {
84
	struct trace_entry	ent;
I
Ingo Molnar 已提交
85 86 87
	unsigned long		caller[FTRACE_STACK_ENTRIES];
};

88 89 90 91
/*
 * ftrace_printk entry:
 */
struct print_entry {
92
	struct trace_entry	ent;
93 94 95 96
	unsigned long		ip;
	char			buf[];
};

97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
#define TRACE_OLD_SIZE		88

struct trace_field_cont {
	unsigned char		type;
	/* Temporary till we get rid of this completely */
	char			buf[TRACE_OLD_SIZE - 1];
};

struct trace_mmiotrace_rw {
	struct trace_entry	ent;
	struct mmiotrace_rw	rw;
};

struct trace_mmiotrace_map {
	struct trace_entry	ent;
	struct mmiotrace_map	map;
};

struct trace_boot {
	struct trace_entry	ent;
	struct boot_trace	initcall;
};

120 121 122
/*
 * trace_flag_type is an enumeration that holds different
 * states when a trace occurs. These are:
123 124 125 126 127 128
 *  IRQS_OFF		- interrupts were disabled
 *  IRQS_NOSUPPORT 	- arch does not support irqs_disabled_flags
 *  NEED_RESCED		- reschedule is requested
 *  HARDIRQ		- inside an interrupt handler
 *  SOFTIRQ		- inside a softirq handler
 *  CONT		- multiple entries hold the trace item
129 130 131
 */
enum trace_flag_type {
	TRACE_FLAG_IRQS_OFF		= 0x01,
132 133 134 135 136
	TRACE_FLAG_IRQS_NOSUPPORT	= 0x02,
	TRACE_FLAG_NEED_RESCHED		= 0x04,
	TRACE_FLAG_HARDIRQ		= 0x08,
	TRACE_FLAG_SOFTIRQ		= 0x10,
	TRACE_FLAG_CONT			= 0x20,
137 138
};

139
#define TRACE_BUF_SIZE		1024
140 141 142 143 144 145 146 147

/*
 * The CPU trace array - it consists of thousands of trace entries
 * plus some other descriptor data: (for example which task started
 * the trace, etc.)
 */
struct trace_array_cpu {
	atomic_t		disabled;
I
Ingo Molnar 已提交
148

I
Ingo Molnar 已提交
149 150
	/* these fields get copied into max-trace: */
	unsigned long		trace_idx;
151
	unsigned long		overrun;
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
	unsigned long		saved_latency;
	unsigned long		critical_start;
	unsigned long		critical_end;
	unsigned long		critical_sequence;
	unsigned long		nice;
	unsigned long		policy;
	unsigned long		rt_priority;
	cycle_t			preempt_timestamp;
	pid_t			pid;
	uid_t			uid;
	char			comm[TASK_COMM_LEN];
};

struct trace_iterator;

/*
 * The trace array - an array of per-CPU trace arrays. This is the
 * highest level data structure that individual tracers deal with.
 * They have on/off state as well:
 */
struct trace_array {
173
	struct ring_buffer	*buffer;
174 175 176
	unsigned long		entries;
	int			cpu;
	cycle_t			time_start;
177
	struct task_struct	*waiter;
178 179 180
	struct trace_array_cpu	*data[NR_CPUS];
};

S
Steven Rostedt 已提交
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
#define FTRACE_CMP_TYPE(var, type) \
	__builtin_types_compatible_p(typeof(var), type *)

#undef IF_ASSIGN
#define IF_ASSIGN(var, entry, etype, id)		\
	if (FTRACE_CMP_TYPE(var, etype)) {		\
		var = (typeof(var))(entry);		\
		WARN_ON(id && (entry)->type != id);	\
		break;					\
	}

/* Will cause compile errors if type is not found. */
extern void __ftrace_bad_type(void);

/*
 * The trace_assign_type is a verifier that the entry type is
 * the same as the type being assigned. To add new types simply
 * add a line with the following format:
 *
 * IF_ASSIGN(var, ent, type, id);
 *
 *  Where "type" is the trace type that includes the trace_entry
 *  as the "ent" item. And "id" is the trace identifier that is
 *  used in the trace_type enum.
 *
 *  If the type can have more than one id, then use zero.
 */
#define trace_assign_type(var, ent)					\
	do {								\
		IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN);	\
		IF_ASSIGN(var, ent, struct ctx_switch_entry, 0);	\
		IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \
		IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK);	\
		IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT);	\
		IF_ASSIGN(var, ent, struct special_entry, 0);		\
		IF_ASSIGN(var, ent, struct trace_mmiotrace_rw,		\
			  TRACE_MMIO_RW);				\
		IF_ASSIGN(var, ent, struct trace_mmiotrace_map,		\
			  TRACE_MMIO_MAP);				\
		IF_ASSIGN(var, ent, struct trace_boot, TRACE_BOOT);	\
		__ftrace_bad_type();					\
	} while (0)
223 224 225 226 227 228 229 230

/* Return values for print_line callback */
enum print_line_t {
	TRACE_TYPE_PARTIAL_LINE	= 0,	/* Retry after flushing the seq */
	TRACE_TYPE_HANDLED	= 1,
	TRACE_TYPE_UNHANDLED	= 2	/* Relay to other output functions */
};

231 232 233 234 235 236 237
/*
 * A specific tracer, represented by methods that operate on a trace array:
 */
struct tracer {
	const char		*name;
	void			(*init)(struct trace_array *tr);
	void			(*reset)(struct trace_array *tr);
238 239
	void			(*start)(struct trace_array *tr);
	void			(*stop)(struct trace_array *tr);
240
	void			(*open)(struct trace_iterator *iter);
241
	void			(*pipe_open)(struct trace_iterator *iter);
242
	void			(*close)(struct trace_iterator *iter);
243 244 245
	ssize_t			(*read)(struct trace_iterator *iter,
					struct file *filp, char __user *ubuf,
					size_t cnt, loff_t *ppos);
S
Steven Rostedt 已提交
246 247 248 249
#ifdef CONFIG_FTRACE_STARTUP_TEST
	int			(*selftest)(struct tracer *trace,
					    struct trace_array *tr);
#endif
250
	enum print_line_t	(*print_line)(struct trace_iterator *iter);
251 252 253 254
	struct tracer		*next;
	int			print_max;
};

S
Steven Rostedt 已提交
255 256 257
struct trace_seq {
	unsigned char		buffer[PAGE_SIZE];
	unsigned int		len;
258
	unsigned int		readpos;
S
Steven Rostedt 已提交
259 260
};

261 262 263 264 265 266 267
/*
 * Trace iterator - used by printout routines who present trace
 * results to users and which routines might sleep, etc:
 */
struct trace_iterator {
	struct trace_array	*tr;
	struct tracer		*trace;
268
	void			*private;
269
	struct ring_buffer_iter	*buffer_iter[NR_CPUS];
I
Ingo Molnar 已提交
270

271 272
	/* The below is zeroed out in pipe_read */
	struct trace_seq	seq;
273
	struct trace_entry	*ent;
I
Ingo Molnar 已提交
274
	int			cpu;
275
	u64			ts;
I
Ingo Molnar 已提交
276

277 278
	unsigned long		iter_flags;
	loff_t			pos;
279
	long			idx;
280 281

	cpumask_t		started;
282 283
};

284
int tracing_is_enabled(void);
285
void trace_wake_up(void);
286
void tracing_reset(struct trace_array *tr, int cpu);
287 288
int tracing_open_generic(struct inode *inode, struct file *filp);
struct dentry *tracing_init_dentry(void);
I
Ingo Molnar 已提交
289 290
void init_tracer_sysprof_debugfs(struct dentry *d_tracer);

291 292 293
struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
						struct trace_array_cpu *data);
void tracing_generic_entry_update(struct trace_entry *entry,
294 295
				  unsigned long flags,
				  int pc);
296

297 298 299 300
void ftrace(struct trace_array *tr,
			    struct trace_array_cpu *data,
			    unsigned long ip,
			    unsigned long parent_ip,
301
			    unsigned long flags, int pc);
302 303 304 305
void tracing_sched_switch_trace(struct trace_array *tr,
				struct trace_array_cpu *data,
				struct task_struct *prev,
				struct task_struct *next,
306
				unsigned long flags, int pc);
307
void tracing_record_cmdline(struct task_struct *tsk);
308 309 310 311 312

void tracing_sched_wakeup_trace(struct trace_array *tr,
				struct trace_array_cpu *data,
				struct task_struct *wakee,
				struct task_struct *cur,
313
				unsigned long flags, int pc);
I
Ingo Molnar 已提交
314 315 316 317
void trace_special(struct trace_array *tr,
		   struct trace_array_cpu *data,
		   unsigned long arg1,
		   unsigned long arg2,
318
		   unsigned long arg3, int pc);
319 320 321 322
void trace_function(struct trace_array *tr,
		    struct trace_array_cpu *data,
		    unsigned long ip,
		    unsigned long parent_ip,
323
		    unsigned long flags, int pc);
324

325 326
void tracing_start_cmdline_record(void);
void tracing_stop_cmdline_record(void);
S
Steven Rostedt 已提交
327 328 329
void tracing_sched_switch_assign_trace(struct trace_array *tr);
void tracing_stop_sched_switch_record(void);
void tracing_start_sched_switch_record(void);
330 331 332 333 334 335 336 337 338 339 340 341
int register_tracer(struct tracer *type);
void unregister_tracer(struct tracer *type);

extern unsigned long nsecs_to_usecs(unsigned long nsecs);

extern unsigned long tracing_max_latency;
extern unsigned long tracing_thresh;

void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
void update_max_tr_single(struct trace_array *tr,
			  struct task_struct *tsk, int cpu);

I
Ingo Molnar 已提交
342
extern cycle_t ftrace_now(int cpu);
343

344
#ifdef CONFIG_FUNCTION_TRACER
345 346 347 348 349 350 351
void tracing_start_function_trace(void);
void tracing_stop_function_trace(void);
#else
# define tracing_start_function_trace()		do { } while (0)
# define tracing_stop_function_trace()		do { } while (0)
#endif

352 353 354
#ifdef CONFIG_CONTEXT_SWITCH_TRACER
typedef void
(*tracer_switch_func_t)(void *private,
M
Mathieu Desnoyers 已提交
355
			void *__rq,
356 357 358 359 360 361 362 363 364 365 366 367 368
			struct task_struct *prev,
			struct task_struct *next);

struct tracer_switch_ops {
	tracer_switch_func_t		func;
	void				*private;
	struct tracer_switch_ops	*next;
};

#endif /* CONFIG_CONTEXT_SWITCH_TRACER */

#ifdef CONFIG_DYNAMIC_FTRACE
extern unsigned long ftrace_update_tot_cnt;
369 370
#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
extern int DYN_FTRACE_TEST_NAME(void);
371 372
#endif

S
Steven Rostedt 已提交
373 374 375 376 377 378 379 380 381 382 383
#ifdef CONFIG_FTRACE_STARTUP_TEST
extern int trace_selftest_startup_function(struct tracer *trace,
					   struct trace_array *tr);
extern int trace_selftest_startup_irqsoff(struct tracer *trace,
					  struct trace_array *tr);
extern int trace_selftest_startup_preemptoff(struct tracer *trace,
					     struct trace_array *tr);
extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
						 struct trace_array *tr);
extern int trace_selftest_startup_wakeup(struct tracer *trace,
					 struct trace_array *tr);
S
Steven Noonan 已提交
384 385
extern int trace_selftest_startup_nop(struct tracer *trace,
					 struct trace_array *tr);
S
Steven Rostedt 已提交
386 387
extern int trace_selftest_startup_sched_switch(struct tracer *trace,
					       struct trace_array *tr);
388 389
extern int trace_selftest_startup_sysprof(struct tracer *trace,
					       struct trace_array *tr);
S
Steven Rostedt 已提交
390 391
#endif /* CONFIG_FTRACE_STARTUP_TEST */

I
Ingo Molnar 已提交
392
extern void *head_page(struct trace_array_cpu *data);
393
extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
394 395
extern void trace_seq_print_cont(struct trace_seq *s,
				 struct trace_iterator *iter);
396 397
extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
				 size_t cnt);
398
extern long ns2usecs(cycle_t nsec);
P
Pekka Paalanen 已提交
399
extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args);
I
Ingo Molnar 已提交
400

401 402
extern unsigned long trace_flags;

S
Steven Rostedt 已提交
403 404 405 406 407 408 409
/*
 * trace_iterator_flags is an enumeration that defines bit
 * positions into trace_flags that controls the output.
 *
 * NOTE: These bits must match the trace_options array in
 *       trace.c.
 */
410 411 412 413 414 415 416 417 418 419
enum trace_iterator_flags {
	TRACE_ITER_PRINT_PARENT		= 0x01,
	TRACE_ITER_SYM_OFFSET		= 0x02,
	TRACE_ITER_SYM_ADDR		= 0x04,
	TRACE_ITER_VERBOSE		= 0x08,
	TRACE_ITER_RAW			= 0x10,
	TRACE_ITER_HEX			= 0x20,
	TRACE_ITER_BIN			= 0x40,
	TRACE_ITER_BLOCK		= 0x80,
	TRACE_ITER_STACKTRACE		= 0x100,
I
Ingo Molnar 已提交
420
	TRACE_ITER_SCHED_TREE		= 0x200,
421
	TRACE_ITER_PRINTK		= 0x400,
422
	TRACE_ITER_PREEMPTONLY		= 0x800,
423 424
};

425 426
extern struct tracer nop_trace;

427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474
/**
 * ftrace_preempt_disable - disable preemption scheduler safe
 *
 * When tracing can happen inside the scheduler, there exists
 * cases that the tracing might happen before the need_resched
 * flag is checked. If this happens and the tracer calls
 * preempt_enable (after a disable), a schedule might take place
 * causing an infinite recursion.
 *
 * To prevent this, we read the need_recshed flag before
 * disabling preemption. When we want to enable preemption we
 * check the flag, if it is set, then we call preempt_enable_no_resched.
 * Otherwise, we call preempt_enable.
 *
 * The rational for doing the above is that if need resched is set
 * and we have yet to reschedule, we are either in an atomic location
 * (where we do not need to check for scheduling) or we are inside
 * the scheduler and do not want to resched.
 */
static inline int ftrace_preempt_disable(void)
{
	int resched;

	resched = need_resched();
	preempt_disable_notrace();

	return resched;
}

/**
 * ftrace_preempt_enable - enable preemption scheduler safe
 * @resched: the return value from ftrace_preempt_disable
 *
 * This is a scheduler safe way to enable preemption and not miss
 * any preemption checks. The disabled saved the state of preemption.
 * If resched is set, then we were either inside an atomic or
 * are inside the scheduler (we would have already scheduled
 * otherwise). In this case, we do not want to call normal
 * preempt_enable, but preempt_enable_no_resched instead.
 */
static inline void ftrace_preempt_enable(int resched)
{
	if (resched)
		preempt_enable_no_resched_notrace();
	else
		preempt_enable_notrace();
}

475
#endif /* _LINUX_KERNEL_TRACE_H */