ftrace.h 29.2 KB
Newer Older
1 2 3 4 5
/*
 * Ftrace header.  For implementation details beyond the random comments
 * scattered below, see: Documentation/trace/ftrace-design.txt
 */

6 7 8
#ifndef _LINUX_FTRACE_H
#define _LINUX_FTRACE_H

9
#include <linux/trace_clock.h>
10
#include <linux/kallsyms.h>
11
#include <linux/linkage.h>
12
#include <linux/bitops.h>
13
#include <linux/ptrace.h>
14
#include <linux/ktime.h>
15
#include <linux/sched.h>
16 17 18
#include <linux/types.h>
#include <linux/init.h>
#include <linux/fs.h>
19

20 21
#include <asm/ftrace.h>

22 23 24 25 26 27 28 29 30
/*
 * If the arch supports passing the variable contents of
 * function_trace_op as the third parameter back from the
 * mcount call, then the arch should define this as 1.
 */
#ifndef ARCH_SUPPORTS_FTRACE_OPS
#define ARCH_SUPPORTS_FTRACE_OPS 0
#endif

31 32 33 34 35
/*
 * If the arch's mcount caller does not support all of ftrace's
 * features, then it must call an indirect function that
 * does. Or at least does enough to prevent any unwelcomed side effects.
 */
36
#if !ARCH_SUPPORTS_FTRACE_OPS
37 38 39 40 41
# define FTRACE_FORCE_LIST_FUNC 1
#else
# define FTRACE_FORCE_LIST_FUNC 0
#endif

42 43 44 45 46 47
/* Main tracing buffer and events set up */
#ifdef CONFIG_TRACING
void trace_init(void);
#else
static inline void trace_init(void) { }
#endif
48

49
struct module;
50 51
struct ftrace_hash;

52
#ifdef CONFIG_FUNCTION_TRACER
I
Ingo Molnar 已提交
53

54 55 56
extern int ftrace_enabled;
extern int
ftrace_enable_sysctl(struct ctl_table *table, int write,
57
		     void __user *buffer, size_t *lenp,
58 59
		     loff_t *ppos);

60 61 62
struct ftrace_ops;

typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
63
			      struct ftrace_ops *op, struct pt_regs *regs);
64

65 66
ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);

67 68 69
/*
 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
 * set in the flags member.
70 71 72 73 74
 * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
 * IPMODIFY are a kind of attribute flags which can be set only before
 * registering the ftrace_ops, and can not be modified while registered.
 * Changing those attribute flags after regsitering ftrace_ops will
 * cause unexpected results.
75 76 77 78
 *
 * ENABLED - set/unset when ftrace_ops is registered/unregistered
 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
 *           allocated ftrace_ops which need special care
79 80
 * PER_CPU - set manualy by ftrace_ops user to denote the ftrace_ops
 *           could be controlled by following calls:
81 82
 *             ftrace_function_local_enable
 *             ftrace_function_local_disable
83 84 85
 * SAVE_REGS - The ftrace_ops wants regs saved at each function called
 *            and passed to the callback. If this flag is set, but the
 *            architecture does not support passing regs
86
 *            (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
87 88 89 90 91 92 93 94 95 96
 *            ftrace_ops will fail to register, unless the next flag
 *            is set.
 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
 *            handler can handle an arch that does not save regs
 *            (the handler tests if regs == NULL), then it can set
 *            this flag instead. It will not fail registering the ftrace_ops
 *            but, the regs field will be NULL if the arch does not support
 *            passing regs to the handler.
 *            Note, if this flag is set, the SAVE_REGS flag will automatically
 *            get set upon registering the ftrace_ops, if the arch supports it.
97 98 99 100
 * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
 *            that the call back has its own recursion protection. If it does
 *            not set this, then the ftrace infrastructure will add recursion
 *            protection for the caller.
101
 * STUB   - The ftrace_ops is just a place holder.
102 103
 * INITIALIZED - The ftrace_ops has already been initialized (first use time
 *            register_ftrace_function() is called, it will initialized the ops)
104
 * DELETED - The ops are being deleted, do not let them be registered again.
105 106 107
 * ADDING  - The ops is in the process of being added.
 * REMOVING - The ops is in the process of being removed.
 * MODIFYING - The ops is in the process of changing its filter functions.
108 109 110 111 112 113 114
 * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
 *            The arch specific code sets this flag when it allocated a
 *            trampoline. This lets the arch know that it can update the
 *            trampoline in case the callback function changes.
 *            The ftrace_ops trampoline can be set by the ftrace users, and
 *            in such cases the arch must not modify it. Only the arch ftrace
 *            core code should set this flag.
115 116 117 118
 * IPMODIFY - The ops can modify the IP register. This can only be set with
 *            SAVE_REGS. If another ops with this flag set is already registered
 *            for any of the functions that this ops will be registered for, then
 *            this ops will fail to register or set_filter_ip.
119
 * PID     - Is affected by set_ftrace_pid (allows filtering on those pids)
120
 */
121
enum {
122
	FTRACE_OPS_FL_ENABLED			= 1 << 0,
123
	FTRACE_OPS_FL_DYNAMIC			= 1 << 1,
124
	FTRACE_OPS_FL_PER_CPU			= 1 << 2,
125 126 127 128 129 130
	FTRACE_OPS_FL_SAVE_REGS			= 1 << 3,
	FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED	= 1 << 4,
	FTRACE_OPS_FL_RECURSION_SAFE		= 1 << 5,
	FTRACE_OPS_FL_STUB			= 1 << 6,
	FTRACE_OPS_FL_INITIALIZED		= 1 << 7,
	FTRACE_OPS_FL_DELETED			= 1 << 8,
131 132 133
	FTRACE_OPS_FL_ADDING			= 1 << 9,
	FTRACE_OPS_FL_REMOVING			= 1 << 10,
	FTRACE_OPS_FL_MODIFYING			= 1 << 11,
134
	FTRACE_OPS_FL_ALLOC_TRAMP		= 1 << 12,
135
	FTRACE_OPS_FL_IPMODIFY			= 1 << 13,
136
	FTRACE_OPS_FL_PID			= 1 << 14,
137
	FTRACE_OPS_FL_RCU			= 1 << 15,
138 139
};

140 141 142 143 144 145 146 147 148
#ifdef CONFIG_DYNAMIC_FTRACE
/* The hash used to know what functions callbacks trace */
struct ftrace_ops_hash {
	struct ftrace_hash		*notrace_hash;
	struct ftrace_hash		*filter_hash;
	struct mutex			regex_lock;
};
#endif

149
/*
150 151 152 153 154
 * Note, ftrace_ops can be referenced outside of RCU protection, unless
 * the RCU flag is set. If ftrace_ops is allocated and not part of kernel
 * core data, the unregistering of it will perform a scheduling on all CPUs
 * to make sure that there are no more users. Depending on the load of the
 * system that may take a bit of time.
155 156 157 158 159
 *
 * Any private data added must also take care not to be freed and if private
 * data is added to a ftrace_ops that is in core code, the user of the
 * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
 */
160
struct ftrace_ops {
161 162
	ftrace_func_t			func;
	struct ftrace_ops		*next;
163
	unsigned long			flags;
164
	void				*private;
165
	ftrace_func_t			saved_func;
166
	int __percpu			*disabled;
167
#ifdef CONFIG_DYNAMIC_FTRACE
168
	int				nr_trampolines;
169 170
	struct ftrace_ops_hash		local_hash;
	struct ftrace_ops_hash		*func_hash;
171
	struct ftrace_ops_hash		old_hash;
172
	unsigned long			trampoline;
173
	unsigned long			trampoline_size;
174
#endif
175 176
};

177 178 179 180 181 182 183 184 185 186 187
/*
 * Type of the current tracing.
 */
enum ftrace_tracing_type_t {
	FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
	FTRACE_TYPE_RETURN,	/* Hook the return of the function */
};

/* Current tracing type, default is FTRACE_TYPE_ENTER */
extern enum ftrace_tracing_type_t ftrace_tracing_type;

188 189 190 191 192 193 194 195 196 197 198
/*
 * The ftrace_ops must be a static and should also
 * be read_mostly.  These functions do modify read_mostly variables
 * so use them sparely. Never free an ftrace_op or modify the
 * next pointer after it has been registered. Even after unregistering
 * it, the next pointer may still be used internally.
 */
int register_ftrace_function(struct ftrace_ops *ops);
int unregister_ftrace_function(struct ftrace_ops *ops);
void clear_ftrace_function(void);

199
/**
200
 * ftrace_function_local_enable - enable ftrace_ops on current cpu
201 202 203 204
 *
 * This function enables tracing on current cpu by decreasing
 * the per cpu control variable.
 * It must be called with preemption disabled and only on ftrace_ops
205
 * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
206 207 208 209
 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
 */
static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
{
210
	if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
211 212 213 214 215 216
		return;

	(*this_cpu_ptr(ops->disabled))--;
}

/**
217
 * ftrace_function_local_disable - disable ftrace_ops on current cpu
218
 *
219
 * This function disables tracing on current cpu by increasing
220 221
 * the per cpu control variable.
 * It must be called with preemption disabled and only on ftrace_ops
222
 * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
223 224 225 226
 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
 */
static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
{
227
	if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
228 229 230 231 232 233 234 235 236 237 238
		return;

	(*this_cpu_ptr(ops->disabled))++;
}

/**
 * ftrace_function_local_disabled - returns ftrace_ops disabled value
 *                                  on current cpu
 *
 * This function returns value of ftrace_ops::disabled on current cpu.
 * It must be called with preemption disabled and only on ftrace_ops
239
 * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
240 241 242 243
 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
 */
static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
{
244
	WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU));
245 246 247
	return *this_cpu_ptr(ops->disabled);
}

248 249
extern void ftrace_stub(unsigned long a0, unsigned long a1,
			struct ftrace_ops *op, struct pt_regs *regs);
250

251
#else /* !CONFIG_FUNCTION_TRACER */
252 253 254 255 256 257
/*
 * (un)register_ftrace_function must be a macro since the ops parameter
 * must not be evaluated.
 */
#define register_ftrace_function(ops) ({ 0; })
#define unregister_ftrace_function(ops) ({ 0; })
258 259 260 261
static inline int ftrace_nr_registered_ops(void)
{
	return 0;
}
262
static inline void clear_ftrace_function(void) { }
263
static inline void ftrace_kill(void) { }
264
#endif /* CONFIG_FUNCTION_TRACER */
265

266
#ifdef CONFIG_STACK_TRACER
267 268 269 270 271 272 273 274

#define STACK_TRACE_ENTRIES 500

struct stack_trace;

extern unsigned stack_trace_index[];
extern struct stack_trace stack_trace_max;
extern unsigned long stack_trace_max_size;
275
extern arch_spinlock_t stack_trace_max_lock;
276

277
extern int stack_tracer_enabled;
278
void stack_trace_print(void);
279 280
int
stack_trace_sysctl(struct ctl_table *table, int write,
281
		   void __user *buffer, size_t *lenp,
282 283 284
		   loff_t *ppos);
#endif

285 286 287
struct ftrace_func_command {
	struct list_head	list;
	char			*name;
288 289
	int			(*func)(struct ftrace_hash *hash,
					char *func, char *cmd,
290 291 292
					char *params, int enable);
};

293
#ifdef CONFIG_DYNAMIC_FTRACE
294

295 296 297
int ftrace_arch_code_modify_prepare(void);
int ftrace_arch_code_modify_post_process(void);

298 299
struct dyn_ftrace;

300 301 302 303 304 305 306 307 308
enum ftrace_bug_type {
	FTRACE_BUG_UNKNOWN,
	FTRACE_BUG_INIT,
	FTRACE_BUG_NOP,
	FTRACE_BUG_CALL,
	FTRACE_BUG_UPDATE,
};
extern enum ftrace_bug_type ftrace_bug_type;

309 310 311 312 313 314
/*
 * Archs can set this to point to a variable that holds the value that was
 * expected at the call site before calling ftrace_bug().
 */
extern const void *ftrace_expected;

315
void ftrace_bug(int err, struct dyn_ftrace *rec);
316

317 318
struct seq_file;

S
Steven Rostedt 已提交
319
struct ftrace_probe_ops {
320 321 322
	void			(*func)(unsigned long ip,
					unsigned long parent_ip,
					void **data);
323 324 325 326
	int			(*init)(struct ftrace_probe_ops *ops,
					unsigned long ip, void **data);
	void			(*free)(struct ftrace_probe_ops *ops,
					unsigned long ip, void **data);
327 328
	int			(*print)(struct seq_file *m,
					 unsigned long ip,
S
Steven Rostedt 已提交
329
					 struct ftrace_probe_ops *ops,
330
					 void *data);
331 332 333
};

extern int
S
Steven Rostedt 已提交
334
register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
335 336
			      void *data);
extern void
S
Steven Rostedt 已提交
337
unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
338 339
				void *data);
extern void
S
Steven Rostedt 已提交
340 341
unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
extern void unregister_ftrace_function_probe_all(char *glob);
342

343
extern int ftrace_text_reserved(const void *start, const void *end);
344

345 346
extern int ftrace_nr_registered_ops(void);

347 348
bool is_ftrace_trampoline(unsigned long addr);

349 350 351 352 353 354 355 356 357 358
/*
 * The dyn_ftrace record's flags field is split into two parts.
 * the first part which is '0-FTRACE_REF_MAX' is a counter of
 * the number of callbacks that have registered the function that
 * the dyn_ftrace descriptor represents.
 *
 * The second part is a mask:
 *  ENABLED - the function is being traced
 *  REGS    - the record wants the function to save regs
 *  REGS_EN - the function is set up to save regs.
359
 *  IPMODIFY - the record allows for the IP address to be changed.
360 361 362 363 364 365 366
 *
 * When a new ftrace_ops is registered and wants a function to save
 * pt_regs, the rec->flag REGS is set. When the function has been
 * set up to save regs, the REG_EN flag is set. Once a function
 * starts saving regs it will do so until all ftrace_ops are removed
 * from tracing that function.
 */
367
enum {
368
	FTRACE_FL_ENABLED	= (1UL << 31),
369
	FTRACE_FL_REGS		= (1UL << 30),
370 371 372
	FTRACE_FL_REGS_EN	= (1UL << 29),
	FTRACE_FL_TRAMP		= (1UL << 28),
	FTRACE_FL_TRAMP_EN	= (1UL << 27),
373
	FTRACE_FL_IPMODIFY	= (1UL << 26),
374 375
};

376 377
#define FTRACE_REF_MAX_SHIFT	26
#define FTRACE_FL_BITS		6
378 379 380
#define FTRACE_FL_MASKED_BITS	((1UL << FTRACE_FL_BITS) - 1)
#define FTRACE_FL_MASK		(FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
#define FTRACE_REF_MAX		((1UL << FTRACE_REF_MAX_SHIFT) - 1)
381

382 383
#define ftrace_rec_count(rec)	((rec)->flags & ~FTRACE_FL_MASK)

384
struct dyn_ftrace {
385
	unsigned long		ip; /* address of mcount call-site */
386
	unsigned long		flags;
387
	struct dyn_arch_ftrace	arch;
388 389
};

S
Steven Rostedt 已提交
390
int ftrace_force_update(void);
391 392
int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
			 int remove, int reset);
393
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
394
		       int len, int reset);
395
int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
396 397 398
			int len, int reset);
void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
399
void ftrace_free_filter(struct ftrace_ops *ops);
S
Steven Rostedt 已提交
400

401 402 403
int register_ftrace_command(struct ftrace_func_command *cmd);
int unregister_ftrace_command(struct ftrace_func_command *cmd);

404 405 406 407 408 409 410 411
enum {
	FTRACE_UPDATE_CALLS		= (1 << 0),
	FTRACE_DISABLE_CALLS		= (1 << 1),
	FTRACE_UPDATE_TRACE_FUNC	= (1 << 2),
	FTRACE_START_FUNC_RET		= (1 << 3),
	FTRACE_STOP_FUNC_RET		= (1 << 4),
};

412 413 414 415 416 417 418 419 420 421 422
/*
 * The FTRACE_UPDATE_* enum is used to pass information back
 * from the ftrace_update_record() and ftrace_test_record()
 * functions. These are called by the code update routines
 * to find out what is to be done for a given function.
 *
 *  IGNORE           - The function is already what we want it to be
 *  MAKE_CALL        - Start tracing the function
 *  MODIFY_CALL      - Stop saving regs for the function
 *  MAKE_NOP         - Stop tracing the function
 */
423 424 425
enum {
	FTRACE_UPDATE_IGNORE,
	FTRACE_UPDATE_MAKE_CALL,
426
	FTRACE_UPDATE_MODIFY_CALL,
427 428 429
	FTRACE_UPDATE_MAKE_NOP,
};

430 431 432 433
enum {
	FTRACE_ITER_FILTER	= (1 << 0),
	FTRACE_ITER_NOTRACE	= (1 << 1),
	FTRACE_ITER_PRINTALL	= (1 << 2),
434 435 436
	FTRACE_ITER_DO_HASH	= (1 << 3),
	FTRACE_ITER_HASH	= (1 << 4),
	FTRACE_ITER_ENABLED	= (1 << 5),
437 438
};

439 440 441 442 443 444 445 446
void arch_ftrace_update_code(int command);

struct ftrace_rec_iter;

struct ftrace_rec_iter *ftrace_rec_iter_start(void);
struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);

447 448 449 450 451 452
#define for_ftrace_rec_iter(iter)		\
	for (iter = ftrace_rec_iter_start();	\
	     iter;				\
	     iter = ftrace_rec_iter_next(iter))


453 454 455
int ftrace_update_record(struct dyn_ftrace *rec, int enable);
int ftrace_test_record(struct dyn_ftrace *rec, int enable);
void ftrace_run_stop_machine(int command);
456
unsigned long ftrace_location(unsigned long ip);
457 458
unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
459 460 461

extern ftrace_func_t ftrace_trace_function;

462 463 464 465 466 467 468 469
int ftrace_regex_open(struct ftrace_ops *ops, int flag,
		  struct inode *inode, struct file *file);
ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
			    size_t cnt, loff_t *ppos);
ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
			     size_t cnt, loff_t *ppos);
int ftrace_regex_release(struct inode *inode, struct file *file);

470 471 472
void __init
ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);

473
/* defined in arch */
474
extern int ftrace_ip_converted(unsigned long ip);
475
extern int ftrace_dyn_arch_init(void);
476
extern void ftrace_replace_code(int enable);
477 478
extern int ftrace_update_ftrace_func(ftrace_func_t func);
extern void ftrace_caller(void);
479
extern void ftrace_regs_caller(void);
480
extern void ftrace_call(void);
481
extern void ftrace_regs_call(void);
482
extern void mcount_call(void);
483

484 485
void ftrace_modify_all_code(int command);

486 487 488
#ifndef FTRACE_ADDR
#define FTRACE_ADDR ((unsigned long)ftrace_caller)
#endif
489

490 491 492 493
#ifndef FTRACE_GRAPH_ADDR
#define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
#endif

494
#ifndef FTRACE_REGS_ADDR
495
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
496 497 498 499 500 501
# define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
#else
# define FTRACE_REGS_ADDR FTRACE_ADDR
#endif
#endif

502 503 504 505 506 507 508 509 510 511
/*
 * If an arch would like functions that are only traced
 * by the function graph tracer to jump directly to its own
 * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
 * to be that address to jump to.
 */
#ifndef FTRACE_GRAPH_TRAMP_ADDR
#define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
#endif

512 513
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
extern void ftrace_graph_caller(void);
514 515 516 517 518
extern int ftrace_enable_ftrace_graph_caller(void);
extern int ftrace_disable_ftrace_graph_caller(void);
#else
static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
519
#endif
520

521
/**
522
 * ftrace_make_nop - convert code into nop
523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543
 * @mod: module structure if called by module load initialization
 * @rec: the mcount call site record
 * @addr: the address that the call site should be calling
 *
 * This is a very sensitive operation and great care needs
 * to be taken by the arch.  The operation should carefully
 * read the location, check to see if what is read is indeed
 * what we expect it to be, and then on success of the compare,
 * it should write to the location.
 *
 * The code segment at @rec->ip should be a caller to @addr
 *
 * Return must be:
 *  0 on success
 *  -EFAULT on error reading the location
 *  -EINVAL on a failed compare of the contents
 *  -EPERM  on error writing to the location
 * Any other value will be considered a failure.
 */
extern int ftrace_make_nop(struct module *mod,
			   struct dyn_ftrace *rec, unsigned long addr);
S
Steven Rostedt 已提交
544

545
/**
546 547 548
 * ftrace_make_call - convert a nop call site into a call to addr
 * @rec: the mcount call site record
 * @addr: the address that the call site should call
549 550 551 552 553 554 555
 *
 * This is a very sensitive operation and great care needs
 * to be taken by the arch.  The operation should carefully
 * read the location, check to see if what is read is indeed
 * what we expect it to be, and then on success of the compare,
 * it should write to the location.
 *
556 557
 * The code segment at @rec->ip should be a nop
 *
558 559 560 561 562 563 564
 * Return must be:
 *  0 on success
 *  -EFAULT on error reading the location
 *  -EINVAL on a failed compare of the contents
 *  -EPERM  on error writing to the location
 * Any other value will be considered a failure.
 */
565 566
extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);

567
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
/**
 * ftrace_modify_call - convert from one addr to another (no nop)
 * @rec: the mcount call site record
 * @old_addr: the address expected to be currently called to
 * @addr: the address to change to
 *
 * This is a very sensitive operation and great care needs
 * to be taken by the arch.  The operation should carefully
 * read the location, check to see if what is read is indeed
 * what we expect it to be, and then on success of the compare,
 * it should write to the location.
 *
 * The code segment at @rec->ip should be a caller to @old_addr
 *
 * Return must be:
 *  0 on success
 *  -EFAULT on error reading the location
 *  -EINVAL on a failed compare of the contents
 *  -EPERM  on error writing to the location
 * Any other value will be considered a failure.
 */
extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
			      unsigned long addr);
#else
/* Should never be called */
static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
				     unsigned long addr)
{
	return -EINVAL;
}
#endif

600 601
/* May be defined in arch */
extern int ftrace_arch_read_dyn_info(char *buf, int size);
602

A
Abhishek Sagar 已提交
603
extern int skip_trace(unsigned long ip);
604
extern void ftrace_module_init(struct module *mod);
A
Abhishek Sagar 已提交
605

606 607
extern void ftrace_disable_daemon(void);
extern void ftrace_enable_daemon(void);
608
#else /* CONFIG_DYNAMIC_FTRACE */
609 610 611 612
static inline int skip_trace(unsigned long ip) { return 0; }
static inline int ftrace_force_update(void) { return 0; }
static inline void ftrace_disable_daemon(void) { }
static inline void ftrace_enable_daemon(void) { }
613
static inline void ftrace_release_mod(struct module *mod) {}
614
static inline void ftrace_module_init(struct module *mod) {}
615
static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
616
{
617
	return -EINVAL;
618
}
619
static inline __init int unregister_ftrace_command(char *cmd_name)
620
{
621
	return -EINVAL;
622
}
623
static inline int ftrace_text_reserved(const void *start, const void *end)
624 625 626
{
	return 0;
}
627 628 629 630
static inline unsigned long ftrace_location(unsigned long ip)
{
	return 0;
}
631 632 633 634 635 636 637

/*
 * Again users of functions that have ftrace_ops may not
 * have them defined when ftrace is not enabled, but these
 * functions may still be called. Use a macro instead of inline.
 */
#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
638
#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
639
#define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
640 641 642
#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_free_filter(ops) do { } while (0)
643 644 645 646 647 648 649

static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
			    size_t cnt, loff_t *ppos) { return -ENODEV; }
static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
			     size_t cnt, loff_t *ppos) { return -ENODEV; }
static inline int
ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
650 651 652 653 654

static inline bool is_ftrace_trampoline(unsigned long addr)
{
	return false;
}
A
Abhishek Sagar 已提交
655
#endif /* CONFIG_DYNAMIC_FTRACE */
656

I
Ingo Molnar 已提交
657 658 659
/* totally disable ftrace - can not re-enable after this */
void ftrace_kill(void);

I
Ingo Molnar 已提交
660 661
static inline void tracer_disable(void)
{
662
#ifdef CONFIG_FUNCTION_TRACER
I
Ingo Molnar 已提交
663 664 665 666
	ftrace_enabled = 0;
#endif
}

667 668
/*
 * Ftrace disable/restore without lock. Some synchronization mechanism
669
 * must be used to prevent ftrace_enabled to be changed between
670 671
 * disable/restore.
 */
672 673
static inline int __ftrace_enabled_save(void)
{
674
#ifdef CONFIG_FUNCTION_TRACER
675 676 677 678 679 680 681 682 683 684
	int saved_ftrace_enabled = ftrace_enabled;
	ftrace_enabled = 0;
	return saved_ftrace_enabled;
#else
	return 0;
#endif
}

static inline void __ftrace_enabled_restore(int enabled)
{
685
#ifdef CONFIG_FUNCTION_TRACER
686 687 688 689
	ftrace_enabled = enabled;
#endif
}

690 691 692 693 694 695 696
/* All archs should have this, but we define it for consistency */
#ifndef ftrace_return_address0
# define ftrace_return_address0 __builtin_return_address(0)
#endif

/* Archs may use other ways for ADDR1 and beyond */
#ifndef ftrace_return_address
697
# ifdef CONFIG_FRAME_POINTER
698
#  define ftrace_return_address(n) __builtin_return_address(n)
699
# else
700
#  define ftrace_return_address(n) 0UL
701
# endif
702 703 704 705 706 707 708 709 710
#endif

#define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
#define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
#define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
#define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
#define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
711

712
#ifdef CONFIG_IRQSOFF_TRACER
I
Ingo Molnar 已提交
713 714
  extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
  extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
715
#else
716 717
  static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
  static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
718 719
#endif

720
#ifdef CONFIG_PREEMPT_TRACER
I
Ingo Molnar 已提交
721 722
  extern void trace_preempt_on(unsigned long a0, unsigned long a1);
  extern void trace_preempt_off(unsigned long a0, unsigned long a1);
723
#else
724 725 726 727 728 729
/*
 * Use defines instead of static inlines because some arches will make code out
 * of the CALLER_ADDR, when we really want these to be a real nop.
 */
# define trace_preempt_on(a0, a1) do { } while (0)
# define trace_preempt_off(a0, a1) do { } while (0)
730 731
#endif

732 733 734 735 736 737
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
extern void ftrace_init(void);
#else
static inline void ftrace_init(void) { }
#endif

738 739 740 741 742 743 744
/*
 * Structure that defines an entry function trace.
 */
struct ftrace_graph_ent {
	unsigned long func; /* Current function */
	int depth;
};
745

746 747 748
/*
 * Structure that defines a return function trace.
 */
749
struct ftrace_graph_ret {
750 751 752
	unsigned long func; /* Current function */
	unsigned long long calltime;
	unsigned long long rettime;
753 754
	/* Number of functions that overran the depth limit for current task */
	unsigned long overrun;
755
	int depth;
756 757
};

758 759 760 761
/* Type of the callback handlers for tracing function graph*/
typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */

762
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
763

764
/* for init task */
765
#define INIT_FTRACE_GRAPH		.ret_stack = NULL,
766

767 768 769 770 771 772 773 774 775
/*
 * Stack of return addresses for functions
 * of a thread.
 * Used in struct thread_info
 */
struct ftrace_ret_stack {
	unsigned long ret;
	unsigned long func;
	unsigned long long calltime;
776
	unsigned long long subtime;
777
	unsigned long fp;
778 779 780 781 782 783 784 785 786 787
};

/*
 * Primary handler of a function return.
 * It relays on ftrace_return_to_handler.
 * Defined in entry_32/64.S
 */
extern void return_to_handler(void);

extern int
788 789
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
			 unsigned long frame_pointer);
790

791 792 793 794 795 796 797
/*
 * Sometimes we don't want to trace a function with the function
 * graph tracer but we want them to keep traced by the usual function
 * tracer if the function graph tracer is not configured.
 */
#define __notrace_funcgraph		notrace

798 799 800 801 802 803 804 805 806 807
/*
 * We want to which function is an entrypoint of a hardirq.
 * That will help us to put a signal on output.
 */
#define __irq_entry		 __attribute__((__section__(".irqentry.text")))

/* Limits of hardirq entrypoints */
extern char __irqentry_text_start[];
extern char __irqentry_text_end[];

808
#define FTRACE_NOTRACE_DEPTH 65536
809 810
#define FTRACE_RETFUNC_DEPTH 50
#define FTRACE_RETSTACK_ALLOC_SIZE 32
811 812 813
extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
				trace_func_graph_ent_t entryfunc);

814
extern bool ftrace_graph_is_dead(void);
S
Steven Rostedt 已提交
815 816
extern void ftrace_graph_stop(void);

817 818 819
/* The current handlers in use */
extern trace_func_graph_ret_t ftrace_graph_return;
extern trace_func_graph_ent_t ftrace_graph_entry;
820

821
extern void unregister_ftrace_graph(void);
822

823 824
extern void ftrace_graph_init_task(struct task_struct *t);
extern void ftrace_graph_exit_task(struct task_struct *t);
825
extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
826 827 828 829 830

static inline int task_curr_ret_stack(struct task_struct *t)
{
	return t->curr_ret_stack;
}
831 832 833 834 835 836 837 838 839 840

static inline void pause_graph_tracing(void)
{
	atomic_inc(&current->tracing_graph_pause);
}

static inline void unpause_graph_tracing(void)
{
	atomic_dec(&current->tracing_graph_pause);
}
841
#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
842 843

#define __notrace_funcgraph
844
#define __irq_entry
845
#define INIT_FTRACE_GRAPH
846

847 848
static inline void ftrace_graph_init_task(struct task_struct *t) { }
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
849
static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
850

851 852 853 854 855 856 857
static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
			  trace_func_graph_ent_t entryfunc)
{
	return -1;
}
static inline void unregister_ftrace_graph(void) { }

858 859 860 861
static inline int task_curr_ret_stack(struct task_struct *tsk)
{
	return -1;
}
862 863 864

static inline void pause_graph_tracing(void) { }
static inline void unpause_graph_tracing(void) { }
865
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
866

867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908
#ifdef CONFIG_TRACING

/* flags for current->trace */
enum {
	TSK_TRACE_FL_TRACE_BIT	= 0,
	TSK_TRACE_FL_GRAPH_BIT	= 1,
};
enum {
	TSK_TRACE_FL_TRACE	= 1 << TSK_TRACE_FL_TRACE_BIT,
	TSK_TRACE_FL_GRAPH	= 1 << TSK_TRACE_FL_GRAPH_BIT,
};

static inline void set_tsk_trace_trace(struct task_struct *tsk)
{
	set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
}

static inline void clear_tsk_trace_trace(struct task_struct *tsk)
{
	clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
}

static inline int test_tsk_trace_trace(struct task_struct *tsk)
{
	return tsk->trace & TSK_TRACE_FL_TRACE;
}

static inline void set_tsk_trace_graph(struct task_struct *tsk)
{
	set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
}

static inline void clear_tsk_trace_graph(struct task_struct *tsk)
{
	clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
}

static inline int test_tsk_trace_graph(struct task_struct *tsk)
{
	return tsk->trace & TSK_TRACE_FL_GRAPH;
}

909 910 911
enum ftrace_dump_mode;

extern enum ftrace_dump_mode ftrace_dump_on_oops;
912
extern int tracepoint_printk;
913

914 915 916
extern void disable_trace_on_warning(void);
extern int __disable_trace_on_warning;

917 918 919 920
#ifdef CONFIG_PREEMPT
#define INIT_TRACE_RECURSION		.trace_recursion = 0,
#endif

921 922
#else /* CONFIG_TRACING */
static inline void  disable_trace_on_warning(void) { }
923 924
#endif /* CONFIG_TRACING */

925 926 927
#ifndef INIT_TRACE_RECURSION
#define INIT_TRACE_RECURSION
#endif
928

929 930 931 932 933 934
#ifdef CONFIG_FTRACE_SYSCALLS

unsigned long arch_syscall_addr(int nr);

#endif /* CONFIG_FTRACE_SYSCALLS */

935
#endif /* _LINUX_FTRACE_H */