ftrace.h 14.6 KB
Newer Older
1 2 3 4 5
/*
 * Ftrace header.  For implementation details beyond the random comments
 * scattered below, see: Documentation/trace/ftrace-design.txt
 */

6 7 8
#ifndef _LINUX_FTRACE_H
#define _LINUX_FTRACE_H

9
#include <linux/trace_clock.h>
10
#include <linux/kallsyms.h>
11
#include <linux/linkage.h>
12
#include <linux/bitops.h>
13 14
#include <linux/module.h>
#include <linux/ktime.h>
15
#include <linux/sched.h>
16 17 18
#include <linux/types.h>
#include <linux/init.h>
#include <linux/fs.h>
19

20 21
#include <asm/ftrace.h>

22
#ifdef CONFIG_FUNCTION_TRACER
I
Ingo Molnar 已提交
23

24 25 26
extern int ftrace_enabled;
extern int
ftrace_enable_sysctl(struct ctl_table *table, int write,
27
		     void __user *buffer, size_t *lenp,
28 29
		     loff_t *ppos);

30 31
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);

32 33
struct ftrace_hash;

34
struct ftrace_ops {
35 36 37 38 39 40
	ftrace_func_t			func;
	struct ftrace_ops		*next;
#ifdef CONFIG_DYNAMIC_FTRACE
	struct ftrace_hash		*notrace_hash;
	struct ftrace_hash		*filter_hash;
#endif
41 42
};

43 44
extern int function_trace_stop;

45 46 47 48 49 50 51 52 53 54 55
/*
 * Type of the current tracing.
 */
enum ftrace_tracing_type_t {
	FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
	FTRACE_TYPE_RETURN,	/* Hook the return of the function */
};

/* Current tracing type, default is FTRACE_TYPE_ENTER */
extern enum ftrace_tracing_type_t ftrace_tracing_type;

56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
/**
 * ftrace_stop - stop function tracer.
 *
 * A quick way to stop the function tracer. Note this an on off switch,
 * it is not something that is recursive like preempt_disable.
 * This does not disable the calling of mcount, it only stops the
 * calling of functions from mcount.
 */
static inline void ftrace_stop(void)
{
	function_trace_stop = 1;
}

/**
 * ftrace_start - start the function tracer.
 *
 * This function is the inverse of ftrace_stop. This does not enable
 * the function tracing if the function tracer is disabled. This only
 * sets the function tracer flag to continue calling the functions
 * from mcount.
 */
static inline void ftrace_start(void)
{
	function_trace_stop = 0;
}

82 83 84 85 86 87 88 89 90 91 92 93 94
/*
 * The ftrace_ops must be a static and should also
 * be read_mostly.  These functions do modify read_mostly variables
 * so use them sparely. Never free an ftrace_op or modify the
 * next pointer after it has been registered. Even after unregistering
 * it, the next pointer may still be used internally.
 */
int register_ftrace_function(struct ftrace_ops *ops);
int unregister_ftrace_function(struct ftrace_ops *ops);
void clear_ftrace_function(void);

extern void ftrace_stub(unsigned long a0, unsigned long a1);

95
#else /* !CONFIG_FUNCTION_TRACER */
96 97 98 99 100 101 102
/*
 * (un)register_ftrace_function must be a macro since the ops parameter
 * must not be evaluated.
 */
#define register_ftrace_function(ops) ({ 0; })
#define unregister_ftrace_function(ops) ({ 0; })
static inline void clear_ftrace_function(void) { }
103
static inline void ftrace_kill(void) { }
104 105
static inline void ftrace_stop(void) { }
static inline void ftrace_start(void) { }
106
#endif /* CONFIG_FUNCTION_TRACER */
107

108 109 110 111
#ifdef CONFIG_STACK_TRACER
extern int stack_tracer_enabled;
int
stack_trace_sysctl(struct ctl_table *table, int write,
112
		   void __user *buffer, size_t *lenp,
113 114 115
		   loff_t *ppos);
#endif

116 117 118 119 120 121 122
struct ftrace_func_command {
	struct list_head	list;
	char			*name;
	int			(*func)(char *func, char *cmd,
					char *params, int enable);
};

123
#ifdef CONFIG_DYNAMIC_FTRACE
124

125 126 127
int ftrace_arch_code_modify_prepare(void);
int ftrace_arch_code_modify_post_process(void);

128 129
struct seq_file;

S
Steven Rostedt 已提交
130
struct ftrace_probe_ops {
131 132 133 134 135
	void			(*func)(unsigned long ip,
					unsigned long parent_ip,
					void **data);
	int			(*callback)(unsigned long ip, void **data);
	void			(*free)(void **data);
136 137
	int			(*print)(struct seq_file *m,
					 unsigned long ip,
S
Steven Rostedt 已提交
138
					 struct ftrace_probe_ops *ops,
139
					 void *data);
140 141 142
};

extern int
S
Steven Rostedt 已提交
143
register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
144 145
			      void *data);
extern void
S
Steven Rostedt 已提交
146
unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
147 148
				void *data);
extern void
S
Steven Rostedt 已提交
149 150
unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
extern void unregister_ftrace_function_probe_all(char *glob);
151

152 153
extern int ftrace_text_reserved(void *start, void *end);

154
enum {
155
	FTRACE_FL_FREE		= (1 << 0),
156
	FTRACE_FL_ENABLED	= (1 << 1),
157 158
};

159
struct dyn_ftrace {
160 161 162 163 164 165 166 167 168
	union {
		unsigned long		ip; /* address of mcount call-site */
		struct dyn_ftrace	*freelist;
	};
	union {
		unsigned long		flags;
		struct dyn_ftrace	*newlist;
	};
	struct dyn_arch_ftrace		arch;
169 170
};

S
Steven Rostedt 已提交
171
int ftrace_force_update(void);
172
void ftrace_set_filter(unsigned char *buf, int len, int reset);
S
Steven Rostedt 已提交
173

174 175 176
int register_ftrace_command(struct ftrace_func_command *cmd);
int unregister_ftrace_command(struct ftrace_func_command *cmd);

177
/* defined in arch */
178
extern int ftrace_ip_converted(unsigned long ip);
179 180 181 182 183
extern int ftrace_dyn_arch_init(void *data);
extern int ftrace_update_ftrace_func(ftrace_func_t func);
extern void ftrace_caller(void);
extern void ftrace_call(void);
extern void mcount_call(void);
184 185 186 187

#ifndef FTRACE_ADDR
#define FTRACE_ADDR ((unsigned long)ftrace_caller)
#endif
188 189
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
extern void ftrace_graph_caller(void);
190 191 192 193 194
extern int ftrace_enable_ftrace_graph_caller(void);
extern int ftrace_disable_ftrace_graph_caller(void);
#else
static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
195
#endif
196

197
/**
198
 * ftrace_make_nop - convert code into nop
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
 * @mod: module structure if called by module load initialization
 * @rec: the mcount call site record
 * @addr: the address that the call site should be calling
 *
 * This is a very sensitive operation and great care needs
 * to be taken by the arch.  The operation should carefully
 * read the location, check to see if what is read is indeed
 * what we expect it to be, and then on success of the compare,
 * it should write to the location.
 *
 * The code segment at @rec->ip should be a caller to @addr
 *
 * Return must be:
 *  0 on success
 *  -EFAULT on error reading the location
 *  -EINVAL on a failed compare of the contents
 *  -EPERM  on error writing to the location
 * Any other value will be considered a failure.
 */
extern int ftrace_make_nop(struct module *mod,
			   struct dyn_ftrace *rec, unsigned long addr);
S
Steven Rostedt 已提交
220

221
/**
222 223 224
 * ftrace_make_call - convert a nop call site into a call to addr
 * @rec: the mcount call site record
 * @addr: the address that the call site should call
225 226 227 228 229 230 231
 *
 * This is a very sensitive operation and great care needs
 * to be taken by the arch.  The operation should carefully
 * read the location, check to see if what is read is indeed
 * what we expect it to be, and then on success of the compare,
 * it should write to the location.
 *
232 233
 * The code segment at @rec->ip should be a nop
 *
234 235 236 237 238 239 240
 * Return must be:
 *  0 on success
 *  -EFAULT on error reading the location
 *  -EINVAL on a failed compare of the contents
 *  -EPERM  on error writing to the location
 * Any other value will be considered a failure.
 */
241 242 243 244
extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);

/* May be defined in arch */
extern int ftrace_arch_read_dyn_info(char *buf, int size);
245

A
Abhishek Sagar 已提交
246 247
extern int skip_trace(unsigned long ip);

248 249
extern void ftrace_disable_daemon(void);
extern void ftrace_enable_daemon(void);
S
Steven Rostedt 已提交
250
#else
251 252 253 254 255 256 257
static inline int skip_trace(unsigned long ip) { return 0; }
static inline int ftrace_force_update(void) { return 0; }
static inline void ftrace_set_filter(unsigned char *buf, int len, int reset)
{
}
static inline void ftrace_disable_daemon(void) { }
static inline void ftrace_enable_daemon(void) { }
258
static inline void ftrace_release_mod(struct module *mod) {}
259 260
static inline int register_ftrace_command(struct ftrace_func_command *cmd)
{
261
	return -EINVAL;
262 263 264
}
static inline int unregister_ftrace_command(char *cmd_name)
{
265
	return -EINVAL;
266
}
267 268 269 270
static inline int ftrace_text_reserved(void *start, void *end)
{
	return 0;
}
A
Abhishek Sagar 已提交
271
#endif /* CONFIG_DYNAMIC_FTRACE */
272

I
Ingo Molnar 已提交
273 274 275
/* totally disable ftrace - can not re-enable after this */
void ftrace_kill(void);

I
Ingo Molnar 已提交
276 277
static inline void tracer_disable(void)
{
278
#ifdef CONFIG_FUNCTION_TRACER
I
Ingo Molnar 已提交
279 280 281 282
	ftrace_enabled = 0;
#endif
}

283 284
/*
 * Ftrace disable/restore without lock. Some synchronization mechanism
285
 * must be used to prevent ftrace_enabled to be changed between
286 287
 * disable/restore.
 */
288 289
static inline int __ftrace_enabled_save(void)
{
290
#ifdef CONFIG_FUNCTION_TRACER
291 292 293 294 295 296 297 298 299 300
	int saved_ftrace_enabled = ftrace_enabled;
	ftrace_enabled = 0;
	return saved_ftrace_enabled;
#else
	return 0;
#endif
}

static inline void __ftrace_enabled_restore(int enabled)
{
301
#ifdef CONFIG_FUNCTION_TRACER
302 303 304 305
	ftrace_enabled = enabled;
#endif
}

306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
#ifndef HAVE_ARCH_CALLER_ADDR
# ifdef CONFIG_FRAME_POINTER
#  define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
#  define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
#  define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
#  define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
#  define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
#  define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
#  define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
# else
#  define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
#  define CALLER_ADDR1 0UL
#  define CALLER_ADDR2 0UL
#  define CALLER_ADDR3 0UL
#  define CALLER_ADDR4 0UL
#  define CALLER_ADDR5 0UL
#  define CALLER_ADDR6 0UL
# endif
#endif /* ifndef HAVE_ARCH_CALLER_ADDR */
325

326
#ifdef CONFIG_IRQSOFF_TRACER
I
Ingo Molnar 已提交
327 328
  extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
  extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
329
#else
330 331
  static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
  static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
332 333
#endif

334
#ifdef CONFIG_PREEMPT_TRACER
I
Ingo Molnar 已提交
335 336
  extern void trace_preempt_on(unsigned long a0, unsigned long a1);
  extern void trace_preempt_off(unsigned long a0, unsigned long a1);
337
#else
338 339
  static inline void trace_preempt_on(unsigned long a0, unsigned long a1) { }
  static inline void trace_preempt_off(unsigned long a0, unsigned long a1) { }
340 341
#endif

342 343 344 345 346 347
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
extern void ftrace_init(void);
#else
static inline void ftrace_init(void) { }
#endif

348 349 350 351 352 353 354
/*
 * Structure that defines an entry function trace.
 */
struct ftrace_graph_ent {
	unsigned long func; /* Current function */
	int depth;
};
355

356 357 358
/*
 * Structure that defines a return function trace.
 */
359
struct ftrace_graph_ret {
360 361 362
	unsigned long func; /* Current function */
	unsigned long long calltime;
	unsigned long long rettime;
363 364
	/* Number of functions that overran the depth limit for current task */
	unsigned long overrun;
365
	int depth;
366 367
};

368 369 370 371
/* Type of the callback handlers for tracing function graph*/
typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */

372
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
373

374
/* for init task */
375
#define INIT_FTRACE_GRAPH		.ret_stack = NULL,
376

377 378 379 380 381 382 383 384 385
/*
 * Stack of return addresses for functions
 * of a thread.
 * Used in struct thread_info
 */
struct ftrace_ret_stack {
	unsigned long ret;
	unsigned long func;
	unsigned long long calltime;
386
	unsigned long long subtime;
387
	unsigned long fp;
388 389 390 391 392 393 394 395 396 397
};

/*
 * Primary handler of a function return.
 * It relays on ftrace_return_to_handler.
 * Defined in entry_32/64.S
 */
extern void return_to_handler(void);

extern int
398 399
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
			 unsigned long frame_pointer);
400

401 402 403 404 405 406 407
/*
 * Sometimes we don't want to trace a function with the function
 * graph tracer but we want them to keep traced by the usual function
 * tracer if the function graph tracer is not configured.
 */
#define __notrace_funcgraph		notrace

408 409 410 411 412 413 414 415 416 417
/*
 * We want to which function is an entrypoint of a hardirq.
 * That will help us to put a signal on output.
 */
#define __irq_entry		 __attribute__((__section__(".irqentry.text")))

/* Limits of hardirq entrypoints */
extern char __irqentry_text_start[];
extern char __irqentry_text_end[];

418 419
#define FTRACE_RETFUNC_DEPTH 50
#define FTRACE_RETSTACK_ALLOC_SIZE 32
420 421 422
extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
				trace_func_graph_ent_t entryfunc);

S
Steven Rostedt 已提交
423 424
extern void ftrace_graph_stop(void);

425 426 427
/* The current handlers in use */
extern trace_func_graph_ret_t ftrace_graph_return;
extern trace_func_graph_ent_t ftrace_graph_entry;
428

429
extern void unregister_ftrace_graph(void);
430

431 432
extern void ftrace_graph_init_task(struct task_struct *t);
extern void ftrace_graph_exit_task(struct task_struct *t);
433
extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
434 435 436 437 438

static inline int task_curr_ret_stack(struct task_struct *t)
{
	return t->curr_ret_stack;
}
439 440 441 442 443 444 445 446 447 448

static inline void pause_graph_tracing(void)
{
	atomic_inc(&current->tracing_graph_pause);
}

static inline void unpause_graph_tracing(void)
{
	atomic_dec(&current->tracing_graph_pause);
}
449
#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
450 451

#define __notrace_funcgraph
452
#define __irq_entry
453
#define INIT_FTRACE_GRAPH
454

455 456
static inline void ftrace_graph_init_task(struct task_struct *t) { }
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
457
static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
458

459 460 461 462 463 464 465
static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
			  trace_func_graph_ent_t entryfunc)
{
	return -1;
}
static inline void unregister_ftrace_graph(void) { }

466 467 468 469
static inline int task_curr_ret_stack(struct task_struct *tsk)
{
	return -1;
}
470 471 472

static inline void pause_graph_tracing(void) { }
static inline void unpause_graph_tracing(void) { }
473
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
474

475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
#ifdef CONFIG_TRACING

/* flags for current->trace */
enum {
	TSK_TRACE_FL_TRACE_BIT	= 0,
	TSK_TRACE_FL_GRAPH_BIT	= 1,
};
enum {
	TSK_TRACE_FL_TRACE	= 1 << TSK_TRACE_FL_TRACE_BIT,
	TSK_TRACE_FL_GRAPH	= 1 << TSK_TRACE_FL_GRAPH_BIT,
};

static inline void set_tsk_trace_trace(struct task_struct *tsk)
{
	set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
}

static inline void clear_tsk_trace_trace(struct task_struct *tsk)
{
	clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
}

static inline int test_tsk_trace_trace(struct task_struct *tsk)
{
	return tsk->trace & TSK_TRACE_FL_TRACE;
}

static inline void set_tsk_trace_graph(struct task_struct *tsk)
{
	set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
}

static inline void clear_tsk_trace_graph(struct task_struct *tsk)
{
	clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
}

static inline int test_tsk_trace_graph(struct task_struct *tsk)
{
	return tsk->trace & TSK_TRACE_FL_GRAPH;
}

517 518 519
enum ftrace_dump_mode;

extern enum ftrace_dump_mode ftrace_dump_on_oops;
520

521 522 523 524
#ifdef CONFIG_PREEMPT
#define INIT_TRACE_RECURSION		.trace_recursion = 0,
#endif

525 526
#endif /* CONFIG_TRACING */

527 528 529
#ifndef INIT_TRACE_RECURSION
#define INIT_TRACE_RECURSION
#endif
530

531 532 533 534 535 536
#ifdef CONFIG_FTRACE_SYSCALLS

unsigned long arch_syscall_addr(int nr);

#endif /* CONFIG_FTRACE_SYSCALLS */

537
#endif /* _LINUX_FTRACE_H */