ftrace.h 26.9 KB
Newer Older
1 2 3 4 5
/*
 * Ftrace header.  For implementation details beyond the random comments
 * scattered below, see: Documentation/trace/ftrace-design.txt
 */

6 7 8
#ifndef _LINUX_FTRACE_H
#define _LINUX_FTRACE_H

9
#include <linux/trace_clock.h>
10
#include <linux/kallsyms.h>
11
#include <linux/linkage.h>
12
#include <linux/bitops.h>
13
#include <linux/ptrace.h>
14
#include <linux/ktime.h>
15
#include <linux/sched.h>
16 17 18
#include <linux/types.h>
#include <linux/init.h>
#include <linux/fs.h>
19

20 21
#include <asm/ftrace.h>

22 23 24 25 26 27 28 29 30
/*
 * If the arch supports passing the variable contents of
 * function_trace_op as the third parameter back from the
 * mcount call, then the arch should define this as 1.
 */
#ifndef ARCH_SUPPORTS_FTRACE_OPS
#define ARCH_SUPPORTS_FTRACE_OPS 0
#endif

31 32 33 34 35
/*
 * If the arch's mcount caller does not support all of ftrace's
 * features, then it must call an indirect function that
 * does. Or at least does enough to prevent any unwelcomed side effects.
 */
36
#if !ARCH_SUPPORTS_FTRACE_OPS
37 38 39 40 41 42
# define FTRACE_FORCE_LIST_FUNC 1
#else
# define FTRACE_FORCE_LIST_FUNC 0
#endif


43
struct module;
44 45
struct ftrace_hash;

46
#ifdef CONFIG_FUNCTION_TRACER
I
Ingo Molnar 已提交
47

48 49 50
extern int ftrace_enabled;
extern int
ftrace_enable_sysctl(struct ctl_table *table, int write,
51
		     void __user *buffer, size_t *lenp,
52 53
		     loff_t *ppos);

54 55 56
struct ftrace_ops;

typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
57
			      struct ftrace_ops *op, struct pt_regs *regs);
58

59 60
ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);

61 62 63 64 65 66 67 68 69 70 71
/*
 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
 * set in the flags member.
 *
 * ENABLED - set/unset when ftrace_ops is registered/unregistered
 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
 *           allocated ftrace_ops which need special care
 * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops
 *           could be controled by following calls:
 *             ftrace_function_local_enable
 *             ftrace_function_local_disable
72 73 74
 * SAVE_REGS - The ftrace_ops wants regs saved at each function called
 *            and passed to the callback. If this flag is set, but the
 *            architecture does not support passing regs
75
 *            (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
76 77 78 79 80 81 82 83 84 85
 *            ftrace_ops will fail to register, unless the next flag
 *            is set.
 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
 *            handler can handle an arch that does not save regs
 *            (the handler tests if regs == NULL), then it can set
 *            this flag instead. It will not fail registering the ftrace_ops
 *            but, the regs field will be NULL if the arch does not support
 *            passing regs to the handler.
 *            Note, if this flag is set, the SAVE_REGS flag will automatically
 *            get set upon registering the ftrace_ops, if the arch supports it.
86 87 88 89
 * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
 *            that the call back has its own recursion protection. If it does
 *            not set this, then the ftrace infrastructure will add recursion
 *            protection for the caller.
90
 * STUB   - The ftrace_ops is just a place holder.
91 92
 * INITIALIZED - The ftrace_ops has already been initialized (first use time
 *            register_ftrace_function() is called, it will initialized the ops)
93
 * DELETED - The ops are being deleted, do not let them be registered again.
94 95 96
 * ADDING  - The ops is in the process of being added.
 * REMOVING - The ops is in the process of being removed.
 * MODIFYING - The ops is in the process of changing its filter functions.
97
 */
98
enum {
99
	FTRACE_OPS_FL_ENABLED			= 1 << 0,
100 101 102 103 104 105 106 107
	FTRACE_OPS_FL_DYNAMIC			= 1 << 1,
	FTRACE_OPS_FL_CONTROL			= 1 << 2,
	FTRACE_OPS_FL_SAVE_REGS			= 1 << 3,
	FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED	= 1 << 4,
	FTRACE_OPS_FL_RECURSION_SAFE		= 1 << 5,
	FTRACE_OPS_FL_STUB			= 1 << 6,
	FTRACE_OPS_FL_INITIALIZED		= 1 << 7,
	FTRACE_OPS_FL_DELETED			= 1 << 8,
108 109 110
	FTRACE_OPS_FL_ADDING			= 1 << 9,
	FTRACE_OPS_FL_REMOVING			= 1 << 10,
	FTRACE_OPS_FL_MODIFYING			= 1 << 11,
111 112
};

113 114 115 116 117 118 119 120 121
#ifdef CONFIG_DYNAMIC_FTRACE
/* The hash used to know what functions callbacks trace */
struct ftrace_ops_hash {
	struct ftrace_hash		*notrace_hash;
	struct ftrace_hash		*filter_hash;
	struct mutex			regex_lock;
};
#endif

122 123 124 125 126 127 128 129 130 131 132
/*
 * Note, ftrace_ops can be referenced outside of RCU protection.
 * (Although, for perf, the control ops prevent that). If ftrace_ops is
 * allocated and not part of kernel core data, the unregistering of it will
 * perform a scheduling on all CPUs to make sure that there are no more users.
 * Depending on the load of the system that may take a bit of time.
 *
 * Any private data added must also take care not to be freed and if private
 * data is added to a ftrace_ops that is in core code, the user of the
 * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
 */
133
struct ftrace_ops {
134 135
	ftrace_func_t			func;
	struct ftrace_ops		*next;
136
	unsigned long			flags;
137
	void				*private;
138
	int __percpu			*disabled;
139
#ifdef CONFIG_DYNAMIC_FTRACE
140
	int				nr_trampolines;
141 142
	struct ftrace_ops_hash		local_hash;
	struct ftrace_ops_hash		*func_hash;
143
	struct ftrace_ops_hash		old_hash;
144
	unsigned long			trampoline;
145
#endif
146 147
};

148 149 150 151 152 153 154 155 156 157 158
/*
 * Type of the current tracing.
 */
enum ftrace_tracing_type_t {
	FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
	FTRACE_TYPE_RETURN,	/* Hook the return of the function */
};

/* Current tracing type, default is FTRACE_TYPE_ENTER */
extern enum ftrace_tracing_type_t ftrace_tracing_type;

159 160 161 162 163 164 165 166 167 168 169
/*
 * The ftrace_ops must be a static and should also
 * be read_mostly.  These functions do modify read_mostly variables
 * so use them sparely. Never free an ftrace_op or modify the
 * next pointer after it has been registered. Even after unregistering
 * it, the next pointer may still be used internally.
 */
int register_ftrace_function(struct ftrace_ops *ops);
int unregister_ftrace_function(struct ftrace_ops *ops);
void clear_ftrace_function(void);

170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
/**
 * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu
 *
 * This function enables tracing on current cpu by decreasing
 * the per cpu control variable.
 * It must be called with preemption disabled and only on ftrace_ops
 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
 */
static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
{
	if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
		return;

	(*this_cpu_ptr(ops->disabled))--;
}

/**
 * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu
 *
 * This function enables tracing on current cpu by decreasing
 * the per cpu control variable.
 * It must be called with preemption disabled and only on ftrace_ops
 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
 */
static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
{
	if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
		return;

	(*this_cpu_ptr(ops->disabled))++;
}

/**
 * ftrace_function_local_disabled - returns ftrace_ops disabled value
 *                                  on current cpu
 *
 * This function returns value of ftrace_ops::disabled on current cpu.
 * It must be called with preemption disabled and only on ftrace_ops
 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
 */
static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
{
	WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL));
	return *this_cpu_ptr(ops->disabled);
}

219 220
extern void ftrace_stub(unsigned long a0, unsigned long a1,
			struct ftrace_ops *op, struct pt_regs *regs);
221

222
#else /* !CONFIG_FUNCTION_TRACER */
223 224 225 226 227 228
/*
 * (un)register_ftrace_function must be a macro since the ops parameter
 * must not be evaluated.
 */
#define register_ftrace_function(ops) ({ 0; })
#define unregister_ftrace_function(ops) ({ 0; })
229 230 231 232
static inline int ftrace_nr_registered_ops(void)
{
	return 0;
}
233
static inline void clear_ftrace_function(void) { }
234
static inline void ftrace_kill(void) { }
235
#endif /* CONFIG_FUNCTION_TRACER */
236

237 238 239 240
#ifdef CONFIG_STACK_TRACER
extern int stack_tracer_enabled;
int
stack_trace_sysctl(struct ctl_table *table, int write,
241
		   void __user *buffer, size_t *lenp,
242 243 244
		   loff_t *ppos);
#endif

245 246 247
struct ftrace_func_command {
	struct list_head	list;
	char			*name;
248 249
	int			(*func)(struct ftrace_hash *hash,
					char *func, char *cmd,
250 251 252
					char *params, int enable);
};

253
#ifdef CONFIG_DYNAMIC_FTRACE
254

255 256 257
int ftrace_arch_code_modify_prepare(void);
int ftrace_arch_code_modify_post_process(void);

258 259
void ftrace_bug(int err, unsigned long ip);

260 261
struct seq_file;

S
Steven Rostedt 已提交
262
struct ftrace_probe_ops {
263 264 265
	void			(*func)(unsigned long ip,
					unsigned long parent_ip,
					void **data);
266 267 268 269
	int			(*init)(struct ftrace_probe_ops *ops,
					unsigned long ip, void **data);
	void			(*free)(struct ftrace_probe_ops *ops,
					unsigned long ip, void **data);
270 271
	int			(*print)(struct seq_file *m,
					 unsigned long ip,
S
Steven Rostedt 已提交
272
					 struct ftrace_probe_ops *ops,
273
					 void *data);
274 275 276
};

extern int
S
Steven Rostedt 已提交
277
register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
278 279
			      void *data);
extern void
S
Steven Rostedt 已提交
280
unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
281 282
				void *data);
extern void
S
Steven Rostedt 已提交
283 284
unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
extern void unregister_ftrace_function_probe_all(char *glob);
285

286
extern int ftrace_text_reserved(const void *start, const void *end);
287

288 289
extern int ftrace_nr_registered_ops(void);

290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
/*
 * The dyn_ftrace record's flags field is split into two parts.
 * the first part which is '0-FTRACE_REF_MAX' is a counter of
 * the number of callbacks that have registered the function that
 * the dyn_ftrace descriptor represents.
 *
 * The second part is a mask:
 *  ENABLED - the function is being traced
 *  REGS    - the record wants the function to save regs
 *  REGS_EN - the function is set up to save regs.
 *
 * When a new ftrace_ops is registered and wants a function to save
 * pt_regs, the rec->flag REGS is set. When the function has been
 * set up to save regs, the REG_EN flag is set. Once a function
 * starts saving regs it will do so until all ftrace_ops are removed
 * from tracing that function.
 */
307
enum {
308
	FTRACE_FL_ENABLED	= (1UL << 31),
309
	FTRACE_FL_REGS		= (1UL << 30),
310 311 312
	FTRACE_FL_REGS_EN	= (1UL << 29),
	FTRACE_FL_TRAMP		= (1UL << 28),
	FTRACE_FL_TRAMP_EN	= (1UL << 27),
313 314
};

315 316
#define FTRACE_REF_MAX_SHIFT	27
#define FTRACE_FL_BITS		5
317 318 319
#define FTRACE_FL_MASKED_BITS	((1UL << FTRACE_FL_BITS) - 1)
#define FTRACE_FL_MASK		(FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
#define FTRACE_REF_MAX		((1UL << FTRACE_REF_MAX_SHIFT) - 1)
320

321 322
#define ftrace_rec_count(rec)	((rec)->flags & ~FTRACE_FL_MASK)

323
struct dyn_ftrace {
324
	unsigned long		ip; /* address of mcount call-site */
325
	unsigned long		flags;
326
	struct dyn_arch_ftrace	arch;
327 328
};

S
Steven Rostedt 已提交
329
int ftrace_force_update(void);
330 331
int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
			 int remove, int reset);
332
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
333
		       int len, int reset);
334
int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
335 336 337
			int len, int reset);
void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
338
void ftrace_free_filter(struct ftrace_ops *ops);
S
Steven Rostedt 已提交
339

340 341 342
int register_ftrace_command(struct ftrace_func_command *cmd);
int unregister_ftrace_command(struct ftrace_func_command *cmd);

343 344 345 346 347 348 349 350
enum {
	FTRACE_UPDATE_CALLS		= (1 << 0),
	FTRACE_DISABLE_CALLS		= (1 << 1),
	FTRACE_UPDATE_TRACE_FUNC	= (1 << 2),
	FTRACE_START_FUNC_RET		= (1 << 3),
	FTRACE_STOP_FUNC_RET		= (1 << 4),
};

351 352 353 354 355 356 357 358 359 360 361
/*
 * The FTRACE_UPDATE_* enum is used to pass information back
 * from the ftrace_update_record() and ftrace_test_record()
 * functions. These are called by the code update routines
 * to find out what is to be done for a given function.
 *
 *  IGNORE           - The function is already what we want it to be
 *  MAKE_CALL        - Start tracing the function
 *  MODIFY_CALL      - Stop saving regs for the function
 *  MAKE_NOP         - Stop tracing the function
 */
362 363 364
enum {
	FTRACE_UPDATE_IGNORE,
	FTRACE_UPDATE_MAKE_CALL,
365
	FTRACE_UPDATE_MODIFY_CALL,
366 367 368
	FTRACE_UPDATE_MAKE_NOP,
};

369 370 371 372
enum {
	FTRACE_ITER_FILTER	= (1 << 0),
	FTRACE_ITER_NOTRACE	= (1 << 1),
	FTRACE_ITER_PRINTALL	= (1 << 2),
373 374 375
	FTRACE_ITER_DO_HASH	= (1 << 3),
	FTRACE_ITER_HASH	= (1 << 4),
	FTRACE_ITER_ENABLED	= (1 << 5),
376 377
};

378 379 380 381 382 383 384 385
void arch_ftrace_update_code(int command);

struct ftrace_rec_iter;

struct ftrace_rec_iter *ftrace_rec_iter_start(void);
struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);

386 387 388 389 390 391
#define for_ftrace_rec_iter(iter)		\
	for (iter = ftrace_rec_iter_start();	\
	     iter;				\
	     iter = ftrace_rec_iter_next(iter))


392 393 394
int ftrace_update_record(struct dyn_ftrace *rec, int enable);
int ftrace_test_record(struct dyn_ftrace *rec, int enable);
void ftrace_run_stop_machine(int command);
395
unsigned long ftrace_location(unsigned long ip);
396 397
unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
398 399 400

extern ftrace_func_t ftrace_trace_function;

401 402 403 404 405 406 407 408
int ftrace_regex_open(struct ftrace_ops *ops, int flag,
		  struct inode *inode, struct file *file);
ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
			    size_t cnt, loff_t *ppos);
ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
			     size_t cnt, loff_t *ppos);
int ftrace_regex_release(struct inode *inode, struct file *file);

409 410 411
void __init
ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);

412
/* defined in arch */
413
extern int ftrace_ip_converted(unsigned long ip);
414
extern int ftrace_dyn_arch_init(void);
415
extern void ftrace_replace_code(int enable);
416 417
extern int ftrace_update_ftrace_func(ftrace_func_t func);
extern void ftrace_caller(void);
418
extern void ftrace_regs_caller(void);
419
extern void ftrace_call(void);
420
extern void ftrace_regs_call(void);
421
extern void mcount_call(void);
422

423 424
void ftrace_modify_all_code(int command);

425 426 427
#ifndef FTRACE_ADDR
#define FTRACE_ADDR ((unsigned long)ftrace_caller)
#endif
428

429 430 431 432
#ifndef FTRACE_GRAPH_ADDR
#define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
#endif

433
#ifndef FTRACE_REGS_ADDR
434
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
435 436 437 438 439 440
# define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
#else
# define FTRACE_REGS_ADDR FTRACE_ADDR
#endif
#endif

441 442 443 444 445 446 447 448 449 450
/*
 * If an arch would like functions that are only traced
 * by the function graph tracer to jump directly to its own
 * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
 * to be that address to jump to.
 */
#ifndef FTRACE_GRAPH_TRAMP_ADDR
#define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
#endif

451 452
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
extern void ftrace_graph_caller(void);
453 454 455 456 457
extern int ftrace_enable_ftrace_graph_caller(void);
extern int ftrace_disable_ftrace_graph_caller(void);
#else
static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
458
#endif
459

460
/**
461
 * ftrace_make_nop - convert code into nop
462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
 * @mod: module structure if called by module load initialization
 * @rec: the mcount call site record
 * @addr: the address that the call site should be calling
 *
 * This is a very sensitive operation and great care needs
 * to be taken by the arch.  The operation should carefully
 * read the location, check to see if what is read is indeed
 * what we expect it to be, and then on success of the compare,
 * it should write to the location.
 *
 * The code segment at @rec->ip should be a caller to @addr
 *
 * Return must be:
 *  0 on success
 *  -EFAULT on error reading the location
 *  -EINVAL on a failed compare of the contents
 *  -EPERM  on error writing to the location
 * Any other value will be considered a failure.
 */
extern int ftrace_make_nop(struct module *mod,
			   struct dyn_ftrace *rec, unsigned long addr);
S
Steven Rostedt 已提交
483

484
/**
485 486 487
 * ftrace_make_call - convert a nop call site into a call to addr
 * @rec: the mcount call site record
 * @addr: the address that the call site should call
488 489 490 491 492 493 494
 *
 * This is a very sensitive operation and great care needs
 * to be taken by the arch.  The operation should carefully
 * read the location, check to see if what is read is indeed
 * what we expect it to be, and then on success of the compare,
 * it should write to the location.
 *
495 496
 * The code segment at @rec->ip should be a nop
 *
497 498 499 500 501 502 503
 * Return must be:
 *  0 on success
 *  -EFAULT on error reading the location
 *  -EINVAL on a failed compare of the contents
 *  -EPERM  on error writing to the location
 * Any other value will be considered a failure.
 */
504 505
extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);

506
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
/**
 * ftrace_modify_call - convert from one addr to another (no nop)
 * @rec: the mcount call site record
 * @old_addr: the address expected to be currently called to
 * @addr: the address to change to
 *
 * This is a very sensitive operation and great care needs
 * to be taken by the arch.  The operation should carefully
 * read the location, check to see if what is read is indeed
 * what we expect it to be, and then on success of the compare,
 * it should write to the location.
 *
 * The code segment at @rec->ip should be a caller to @old_addr
 *
 * Return must be:
 *  0 on success
 *  -EFAULT on error reading the location
 *  -EINVAL on a failed compare of the contents
 *  -EPERM  on error writing to the location
 * Any other value will be considered a failure.
 */
extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
			      unsigned long addr);
#else
/* Should never be called */
static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
				     unsigned long addr)
{
	return -EINVAL;
}
#endif

539 540
/* May be defined in arch */
extern int ftrace_arch_read_dyn_info(char *buf, int size);
541

A
Abhishek Sagar 已提交
542
extern int skip_trace(unsigned long ip);
543
extern void ftrace_module_init(struct module *mod);
A
Abhishek Sagar 已提交
544

545 546
extern void ftrace_disable_daemon(void);
extern void ftrace_enable_daemon(void);
547
#else /* CONFIG_DYNAMIC_FTRACE */
548 549 550 551
static inline int skip_trace(unsigned long ip) { return 0; }
static inline int ftrace_force_update(void) { return 0; }
static inline void ftrace_disable_daemon(void) { }
static inline void ftrace_enable_daemon(void) { }
552
static inline void ftrace_release_mod(struct module *mod) {}
553
static inline void ftrace_module_init(struct module *mod) {}
554
static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
555
{
556
	return -EINVAL;
557
}
558
static inline __init int unregister_ftrace_command(char *cmd_name)
559
{
560
	return -EINVAL;
561
}
562
static inline int ftrace_text_reserved(const void *start, const void *end)
563 564 565
{
	return 0;
}
566 567 568 569
static inline unsigned long ftrace_location(unsigned long ip)
{
	return 0;
}
570 571 572 573 574 575 576

/*
 * Again users of functions that have ftrace_ops may not
 * have them defined when ftrace is not enabled, but these
 * functions may still be called. Use a macro instead of inline.
 */
#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
577
#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
578
#define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
579 580 581
#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_free_filter(ops) do { } while (0)
582 583 584 585 586 587 588

static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
			    size_t cnt, loff_t *ppos) { return -ENODEV; }
static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
			     size_t cnt, loff_t *ppos) { return -ENODEV; }
static inline int
ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
A
Abhishek Sagar 已提交
589
#endif /* CONFIG_DYNAMIC_FTRACE */
590

I
Ingo Molnar 已提交
591 592 593
/* totally disable ftrace - can not re-enable after this */
void ftrace_kill(void);

I
Ingo Molnar 已提交
594 595
static inline void tracer_disable(void)
{
596
#ifdef CONFIG_FUNCTION_TRACER
I
Ingo Molnar 已提交
597 598 599 600
	ftrace_enabled = 0;
#endif
}

601 602
/*
 * Ftrace disable/restore without lock. Some synchronization mechanism
603
 * must be used to prevent ftrace_enabled to be changed between
604 605
 * disable/restore.
 */
606 607
static inline int __ftrace_enabled_save(void)
{
608
#ifdef CONFIG_FUNCTION_TRACER
609 610 611 612 613 614 615 616 617 618
	int saved_ftrace_enabled = ftrace_enabled;
	ftrace_enabled = 0;
	return saved_ftrace_enabled;
#else
	return 0;
#endif
}

static inline void __ftrace_enabled_restore(int enabled)
{
619
#ifdef CONFIG_FUNCTION_TRACER
620 621 622 623
	ftrace_enabled = enabled;
#endif
}

624 625 626 627 628 629 630
/* All archs should have this, but we define it for consistency */
#ifndef ftrace_return_address0
# define ftrace_return_address0 __builtin_return_address(0)
#endif

/* Archs may use other ways for ADDR1 and beyond */
#ifndef ftrace_return_address
631
# ifdef CONFIG_FRAME_POINTER
632
#  define ftrace_return_address(n) __builtin_return_address(n)
633
# else
634
#  define ftrace_return_address(n) 0UL
635
# endif
636 637 638 639 640 641 642 643 644
#endif

#define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
#define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
#define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
#define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
#define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
645

646
#ifdef CONFIG_IRQSOFF_TRACER
I
Ingo Molnar 已提交
647 648
  extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
  extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
649
#else
650 651
  static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
  static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
652 653
#endif

654
#ifdef CONFIG_PREEMPT_TRACER
I
Ingo Molnar 已提交
655 656
  extern void trace_preempt_on(unsigned long a0, unsigned long a1);
  extern void trace_preempt_off(unsigned long a0, unsigned long a1);
657
#else
658 659 660 661 662 663
/*
 * Use defines instead of static inlines because some arches will make code out
 * of the CALLER_ADDR, when we really want these to be a real nop.
 */
# define trace_preempt_on(a0, a1) do { } while (0)
# define trace_preempt_off(a0, a1) do { } while (0)
664 665
#endif

666 667 668 669 670 671
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
extern void ftrace_init(void);
#else
static inline void ftrace_init(void) { }
#endif

672 673 674 675 676 677 678
/*
 * Structure that defines an entry function trace.
 */
struct ftrace_graph_ent {
	unsigned long func; /* Current function */
	int depth;
};
679

680 681 682
/*
 * Structure that defines a return function trace.
 */
683
struct ftrace_graph_ret {
684 685 686
	unsigned long func; /* Current function */
	unsigned long long calltime;
	unsigned long long rettime;
687 688
	/* Number of functions that overran the depth limit for current task */
	unsigned long overrun;
689
	int depth;
690 691
};

692 693 694 695
/* Type of the callback handlers for tracing function graph*/
typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */

696
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
697

698
/* for init task */
699
#define INIT_FTRACE_GRAPH		.ret_stack = NULL,
700

701 702 703 704 705 706 707 708 709
/*
 * Stack of return addresses for functions
 * of a thread.
 * Used in struct thread_info
 */
struct ftrace_ret_stack {
	unsigned long ret;
	unsigned long func;
	unsigned long long calltime;
710
	unsigned long long subtime;
711
	unsigned long fp;
712 713 714 715 716 717 718 719 720 721
};

/*
 * Primary handler of a function return.
 * It relays on ftrace_return_to_handler.
 * Defined in entry_32/64.S
 */
extern void return_to_handler(void);

extern int
722 723
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
			 unsigned long frame_pointer);
724

725 726 727 728 729 730 731
/*
 * Sometimes we don't want to trace a function with the function
 * graph tracer but we want them to keep traced by the usual function
 * tracer if the function graph tracer is not configured.
 */
#define __notrace_funcgraph		notrace

732 733 734 735 736 737 738 739 740 741
/*
 * We want to which function is an entrypoint of a hardirq.
 * That will help us to put a signal on output.
 */
#define __irq_entry		 __attribute__((__section__(".irqentry.text")))

/* Limits of hardirq entrypoints */
extern char __irqentry_text_start[];
extern char __irqentry_text_end[];

742
#define FTRACE_NOTRACE_DEPTH 65536
743 744
#define FTRACE_RETFUNC_DEPTH 50
#define FTRACE_RETSTACK_ALLOC_SIZE 32
745 746 747
extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
				trace_func_graph_ent_t entryfunc);

748
extern bool ftrace_graph_is_dead(void);
S
Steven Rostedt 已提交
749 750
extern void ftrace_graph_stop(void);

751 752 753
/* The current handlers in use */
extern trace_func_graph_ret_t ftrace_graph_return;
extern trace_func_graph_ent_t ftrace_graph_entry;
754

755
extern void unregister_ftrace_graph(void);
756

757 758
extern void ftrace_graph_init_task(struct task_struct *t);
extern void ftrace_graph_exit_task(struct task_struct *t);
759
extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
760 761 762 763 764

static inline int task_curr_ret_stack(struct task_struct *t)
{
	return t->curr_ret_stack;
}
765 766 767 768 769 770 771 772 773 774

static inline void pause_graph_tracing(void)
{
	atomic_inc(&current->tracing_graph_pause);
}

static inline void unpause_graph_tracing(void)
{
	atomic_dec(&current->tracing_graph_pause);
}
775
#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
776 777

#define __notrace_funcgraph
778
#define __irq_entry
779
#define INIT_FTRACE_GRAPH
780

781 782
static inline void ftrace_graph_init_task(struct task_struct *t) { }
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
783
static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
784

785 786 787 788 789 790 791
static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
			  trace_func_graph_ent_t entryfunc)
{
	return -1;
}
static inline void unregister_ftrace_graph(void) { }

792 793 794 795
static inline int task_curr_ret_stack(struct task_struct *tsk)
{
	return -1;
}
796 797 798

static inline void pause_graph_tracing(void) { }
static inline void unpause_graph_tracing(void) { }
799
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
800

801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842
#ifdef CONFIG_TRACING

/* flags for current->trace */
enum {
	TSK_TRACE_FL_TRACE_BIT	= 0,
	TSK_TRACE_FL_GRAPH_BIT	= 1,
};
enum {
	TSK_TRACE_FL_TRACE	= 1 << TSK_TRACE_FL_TRACE_BIT,
	TSK_TRACE_FL_GRAPH	= 1 << TSK_TRACE_FL_GRAPH_BIT,
};

static inline void set_tsk_trace_trace(struct task_struct *tsk)
{
	set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
}

static inline void clear_tsk_trace_trace(struct task_struct *tsk)
{
	clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
}

static inline int test_tsk_trace_trace(struct task_struct *tsk)
{
	return tsk->trace & TSK_TRACE_FL_TRACE;
}

static inline void set_tsk_trace_graph(struct task_struct *tsk)
{
	set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
}

static inline void clear_tsk_trace_graph(struct task_struct *tsk)
{
	clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
}

static inline int test_tsk_trace_graph(struct task_struct *tsk)
{
	return tsk->trace & TSK_TRACE_FL_GRAPH;
}

843 844 845
enum ftrace_dump_mode;

extern enum ftrace_dump_mode ftrace_dump_on_oops;
846

847 848 849
extern void disable_trace_on_warning(void);
extern int __disable_trace_on_warning;

850 851 852 853
#ifdef CONFIG_PREEMPT
#define INIT_TRACE_RECURSION		.trace_recursion = 0,
#endif

854 855
#else /* CONFIG_TRACING */
static inline void  disable_trace_on_warning(void) { }
856 857
#endif /* CONFIG_TRACING */

858 859 860
#ifndef INIT_TRACE_RECURSION
#define INIT_TRACE_RECURSION
#endif
861

862 863 864 865 866 867
#ifdef CONFIG_FTRACE_SYSCALLS

unsigned long arch_syscall_addr(int nr);

#endif /* CONFIG_FTRACE_SYSCALLS */

868
#endif /* _LINUX_FTRACE_H */