ftrace.c 131.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * Infrastructure for profiling code inserted by 'gcc -pg'.
 *
 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
 *
 * Originally ported from the -rt patch by:
 *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
 *
 * Based on code in the latency_tracer, that is:
 *
 *  Copyright (C) 2004-2006 Ingo Molnar
13
 *  Copyright (C) 2004 Nadia Yvette Chambers
14 15
 */

16 17 18
#include <linux/stop_machine.h>
#include <linux/clocksource.h>
#include <linux/kallsyms.h>
19
#include <linux/seq_file.h>
20
#include <linux/suspend.h>
21
#include <linux/debugfs.h>
22
#include <linux/hardirq.h>
I
Ingo Molnar 已提交
23
#include <linux/kthread.h>
24
#include <linux/uaccess.h>
25
#include <linux/bsearch.h>
26
#include <linux/module.h>
I
Ingo Molnar 已提交
27
#include <linux/ftrace.h>
28
#include <linux/sysctl.h>
29
#include <linux/slab.h>
30
#include <linux/ctype.h>
31
#include <linux/sort.h>
32
#include <linux/list.h>
33
#include <linux/hash.h>
34
#include <linux/rcupdate.h>
35

36
#include <trace/events/sched.h>
37

38
#include <asm/setup.h>
39

40
#include "trace_output.h"
S
Steven Rostedt 已提交
41
#include "trace_stat.h"
42

43
#define FTRACE_WARN_ON(cond)			\
44 45 46
	({					\
		int ___r = cond;		\
		if (WARN_ON(___r))		\
47
			ftrace_kill();		\
48 49
		___r;				\
	})
50 51

#define FTRACE_WARN_ON_ONCE(cond)		\
52 53 54
	({					\
		int ___r = cond;		\
		if (WARN_ON_ONCE(___r))		\
55
			ftrace_kill();		\
56 57
		___r;				\
	})
58

59 60 61
/* hash bits for specific function selection */
#define FTRACE_HASH_BITS 7
#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62 63
#define FTRACE_HASH_DEFAULT_BITS 10
#define FTRACE_HASH_MAX_BITS 12
64

65
#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
66

67
#ifdef CONFIG_DYNAMIC_FTRACE
68 69 70
#define INIT_OPS_HASH(opsname)	\
	.func_hash		= &opsname.local_hash,			\
	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
71 72 73
#define ASSIGN_OPS_HASH(opsname, val) \
	.func_hash		= val, \
	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
74
#else
75
#define INIT_OPS_HASH(opsname)
76
#define ASSIGN_OPS_HASH(opsname, val)
77 78
#endif

79 80
static struct ftrace_ops ftrace_list_end __read_mostly = {
	.func		= ftrace_stub,
81
	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
82
	INIT_OPS_HASH(ftrace_list_end)
83 84
};

85 86
/* ftrace_enabled is a method to turn ftrace on or off */
int ftrace_enabled __read_mostly;
87
static int last_ftrace_enabled;
88

89 90
/* Current function tracing op */
struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
91 92
/* What to set function_trace_op to */
static struct ftrace_ops *set_function_trace_op;
93

94 95 96 97 98 99 100
/* List for set_ftrace_pid's pids. */
LIST_HEAD(ftrace_pids);
struct ftrace_pid {
	struct list_head list;
	struct pid *pid;
};

101 102 103 104 105 106
/*
 * ftrace_disabled is set when an anomaly is discovered.
 * ftrace_disabled is much stronger than ftrace_enabled.
 */
static int ftrace_disabled __read_mostly;

107
static DEFINE_MUTEX(ftrace_lock);
108

109
static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
110
static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
111
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
112
ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
113
static struct ftrace_ops global_ops;
114
static struct ftrace_ops control_ops;
115

116 117 118
static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
				   struct ftrace_ops *op, struct pt_regs *regs);

119 120
#if ARCH_SUPPORTS_FTRACE_OPS
static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
121
				 struct ftrace_ops *op, struct pt_regs *regs);
122 123 124 125 126
#else
/* See comment below, where ftrace_ops_list_func is defined */
static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
#endif
127

128 129
/*
 * Traverse the ftrace_global_list, invoking all entries.  The reason that we
130
 * can use rcu_dereference_raw_notrace() is that elements removed from this list
131
 * are simply leaked, so there is no need to interact with a grace-period
132
 * mechanism.  The rcu_dereference_raw_notrace() calls are needed to handle
133 134 135 136 137
 * concurrent insertions into the ftrace_global_list.
 *
 * Silly Alpha and silly pointer-speculation compiler optimizations!
 */
#define do_for_each_ftrace_op(op, list)			\
138
	op = rcu_dereference_raw_notrace(list);			\
139 140 141 142 143 144
	do

/*
 * Optimized for just a single item in the list (as that is the normal case).
 */
#define while_for_each_ftrace_op(op)				\
145
	while (likely(op = rcu_dereference_raw_notrace((op)->next)) &&	\
146 147
	       unlikely((op) != &ftrace_list_end))

148 149 150 151
static inline void ftrace_ops_init(struct ftrace_ops *ops)
{
#ifdef CONFIG_DYNAMIC_FTRACE
	if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
152 153
		mutex_init(&ops->local_hash.regex_lock);
		ops->func_hash = &ops->local_hash;
154 155 156 157 158
		ops->flags |= FTRACE_OPS_FL_INITIALIZED;
	}
#endif
}

159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
/**
 * ftrace_nr_registered_ops - return number of ops registered
 *
 * Returns the number of ftrace_ops registered and tracing functions
 */
int ftrace_nr_registered_ops(void)
{
	struct ftrace_ops *ops;
	int cnt = 0;

	mutex_lock(&ftrace_lock);

	for (ops = ftrace_ops_list;
	     ops != &ftrace_list_end; ops = ops->next)
		cnt++;

	mutex_unlock(&ftrace_lock);

	return cnt;
}

180
static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
181
			    struct ftrace_ops *op, struct pt_regs *regs)
182
{
183
	if (!test_tsk_trace_trace(current))
184 185
		return;

186
	ftrace_pid_function(ip, parent_ip, op, regs);
187 188 189 190 191 192 193 194 195
}

static void set_ftrace_pid_function(ftrace_func_t func)
{
	/* do not set ftrace_pid_function to itself! */
	if (func != ftrace_pid_func)
		ftrace_pid_function = func;
}

196
/**
197
 * clear_ftrace_function - reset the ftrace function
198
 *
199 200
 * This NULLs the ftrace function and in essence stops
 * tracing.  There may be lag
201
 */
202
void clear_ftrace_function(void)
203
{
204
	ftrace_trace_function = ftrace_stub;
205
	ftrace_pid_function = ftrace_stub;
206 207
}

208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
static void control_ops_disable_all(struct ftrace_ops *ops)
{
	int cpu;

	for_each_possible_cpu(cpu)
		*per_cpu_ptr(ops->disabled, cpu) = 1;
}

static int control_ops_alloc(struct ftrace_ops *ops)
{
	int __percpu *disabled;

	disabled = alloc_percpu(int);
	if (!disabled)
		return -ENOMEM;

	ops->disabled = disabled;
	control_ops_disable_all(ops);
	return 0;
}

229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
static void ftrace_sync(struct work_struct *work)
{
	/*
	 * This function is just a stub to implement a hard force
	 * of synchronize_sched(). This requires synchronizing
	 * tasks even in userspace and idle.
	 *
	 * Yes, function tracing is rude.
	 */
}

static void ftrace_sync_ipi(void *data)
{
	/* Probably not needed, but do it anyway */
	smp_rmb();
}

246 247 248 249 250 251
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static void update_function_graph_func(void);
#else
static inline void update_function_graph_func(void) { }
#endif

252 253 254 255
static void update_ftrace_function(void)
{
	ftrace_func_t func;

256 257 258 259 260 261 262 263 264 265 266
	/*
	 * Prepare the ftrace_ops that the arch callback will use.
	 * If there's only one ftrace_ops registered, the ftrace_ops_list
	 * will point to the ops we want.
	 */
	set_function_trace_op = ftrace_ops_list;

	/* If there's no ftrace_ops registered, just call the stub function */
	if (ftrace_ops_list == &ftrace_list_end) {
		func = ftrace_stub;

267 268
	/*
	 * If we are at the end of the list and this ops is
269 270
	 * recursion safe and not dynamic and the arch supports passing ops,
	 * then have the mcount trampoline call the function directly.
271
	 */
272
	} else if (ftrace_ops_list->next == &ftrace_list_end) {
273
		func = ftrace_ops_get_func(ftrace_ops_list);
274

275 276
	} else {
		/* Just use the default ftrace_ops */
277
		set_function_trace_op = &ftrace_list_end;
278
		func = ftrace_ops_list_func;
279
	}
280

281 282
	update_function_graph_func();

283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
	/* If there's no change, then do nothing more here */
	if (ftrace_trace_function == func)
		return;

	/*
	 * If we are using the list function, it doesn't care
	 * about the function_trace_ops.
	 */
	if (func == ftrace_ops_list_func) {
		ftrace_trace_function = func;
		/*
		 * Don't even bother setting function_trace_ops,
		 * it would be racy to do so anyway.
		 */
		return;
	}

#ifndef CONFIG_DYNAMIC_FTRACE
	/*
	 * For static tracing, we need to be a bit more careful.
	 * The function change takes affect immediately. Thus,
	 * we need to coorditate the setting of the function_trace_ops
	 * with the setting of the ftrace_trace_function.
	 *
	 * Set the function to the list ops, which will call the
	 * function we want, albeit indirectly, but it handles the
	 * ftrace_ops and doesn't depend on function_trace_op.
	 */
	ftrace_trace_function = ftrace_ops_list_func;
	/*
	 * Make sure all CPUs see this. Yes this is slow, but static
	 * tracing is slow and nasty to have enabled.
	 */
	schedule_on_each_cpu(ftrace_sync);
	/* Now all cpus are using the list ops. */
	function_trace_op = set_function_trace_op;
	/* Make sure the function_trace_op is visible on all CPUs */
	smp_wmb();
	/* Nasty way to force a rmb on all cpus */
	smp_call_function(ftrace_sync_ipi, NULL, 1);
	/* OK, we are all set to update the ftrace_trace_function now! */
#endif /* !CONFIG_DYNAMIC_FTRACE */

326 327 328
	ftrace_trace_function = func;
}

329 330 331 332 333
int using_ftrace_ops_list_func(void)
{
	return ftrace_trace_function == ftrace_ops_list_func;
}

334
static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
335
{
336
	ops->next = *list;
337
	/*
338
	 * We are entering ops into the list but another
339 340
	 * CPU might be walking that list. We need to make sure
	 * the ops->next pointer is valid before another CPU sees
341
	 * the ops pointer included into the list.
342
	 */
343
	rcu_assign_pointer(*list, ops);
344 345
}

346
static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
347 348 349 350
{
	struct ftrace_ops **p;

	/*
351 352
	 * If we are removing the last function, then simply point
	 * to the ftrace_stub.
353
	 */
354 355
	if (*list == ops && ops->next == &ftrace_list_end) {
		*list = &ftrace_list_end;
S
Steven Rostedt 已提交
356
		return 0;
357 358
	}

359
	for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
360 361 362
		if (*p == ops)
			break;

S
Steven Rostedt 已提交
363 364
	if (*p != ops)
		return -1;
365 366

	*p = (*p)->next;
367 368
	return 0;
}
369

370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
static void add_ftrace_list_ops(struct ftrace_ops **list,
				struct ftrace_ops *main_ops,
				struct ftrace_ops *ops)
{
	int first = *list == &ftrace_list_end;
	add_ftrace_ops(list, ops);
	if (first)
		add_ftrace_ops(&ftrace_ops_list, main_ops);
}

static int remove_ftrace_list_ops(struct ftrace_ops **list,
				  struct ftrace_ops *main_ops,
				  struct ftrace_ops *ops)
{
	int ret = remove_ftrace_ops(list, ops);
	if (!ret && *list == &ftrace_list_end)
		ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
	return ret;
}

390 391
static void ftrace_update_trampoline(struct ftrace_ops *ops);

392 393
static int __register_ftrace_function(struct ftrace_ops *ops)
{
394 395 396
	if (ops->flags & FTRACE_OPS_FL_DELETED)
		return -EINVAL;

397 398 399
	if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
		return -EBUSY;

400
#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
401 402 403 404 405 406 407 408 409 410 411 412 413
	/*
	 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
	 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
	 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
	 */
	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
	    !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
		return -EINVAL;

	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
		ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
#endif

414 415 416
	if (!core_kernel_data((unsigned long)ops))
		ops->flags |= FTRACE_OPS_FL_DYNAMIC;

417
	if (ops->flags & FTRACE_OPS_FL_CONTROL) {
418 419 420
		if (control_ops_alloc(ops))
			return -ENOMEM;
		add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
421 422
		/* The control_ops needs the trampoline update */
		ops = &control_ops;
423 424 425
	} else
		add_ftrace_ops(&ftrace_ops_list, ops);

426 427
	ftrace_update_trampoline(ops);

428 429 430 431 432 433 434 435 436 437
	if (ftrace_enabled)
		update_ftrace_function();

	return 0;
}

static int __unregister_ftrace_function(struct ftrace_ops *ops)
{
	int ret;

438 439 440
	if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
		return -EBUSY;

441
	if (ops->flags & FTRACE_OPS_FL_CONTROL) {
442 443
		ret = remove_ftrace_list_ops(&ftrace_control_list,
					     &control_ops, ops);
444 445 446
	} else
		ret = remove_ftrace_ops(&ftrace_ops_list, ops);

447 448
	if (ret < 0)
		return ret;
449

450 451
	if (ftrace_enabled)
		update_ftrace_function();
452

S
Steven Rostedt 已提交
453
	return 0;
454 455
}

456 457
static void ftrace_update_pid_func(void)
{
458
	/* Only do something if we are tracing something */
459
	if (ftrace_trace_function == ftrace_stub)
460
		return;
461

462
	update_ftrace_function();
463 464
}

465 466 467 468 469
#ifdef CONFIG_FUNCTION_PROFILER
struct ftrace_profile {
	struct hlist_node		node;
	unsigned long			ip;
	unsigned long			counter;
470 471
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	unsigned long long		time;
472
	unsigned long long		time_squared;
473
#endif
474 475
};

476 477 478 479
struct ftrace_profile_page {
	struct ftrace_profile_page	*next;
	unsigned long			index;
	struct ftrace_profile		records[];
480 481
};

482 483 484 485 486 487 488 489
struct ftrace_profile_stat {
	atomic_t			disabled;
	struct hlist_head		*hash;
	struct ftrace_profile_page	*pages;
	struct ftrace_profile_page	*start;
	struct tracer_stat		stat;
};

490 491
#define PROFILE_RECORDS_SIZE						\
	(PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
492

493 494
#define PROFILES_PER_PAGE					\
	(PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
495

496 497 498
static int ftrace_profile_enabled __read_mostly;

/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
S
Steven Rostedt 已提交
499 500
static DEFINE_MUTEX(ftrace_profile_lock);

501
static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
502

503 504
#define FTRACE_PROFILE_HASH_BITS 10
#define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
505

S
Steven Rostedt 已提交
506 507 508
static void *
function_stat_next(void *v, int idx)
{
509 510
	struct ftrace_profile *rec = v;
	struct ftrace_profile_page *pg;
S
Steven Rostedt 已提交
511

512
	pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
S
Steven Rostedt 已提交
513 514

 again:
L
Li Zefan 已提交
515 516 517
	if (idx != 0)
		rec++;

S
Steven Rostedt 已提交
518 519 520 521 522
	if ((void *)rec >= (void *)&pg->records[pg->index]) {
		pg = pg->next;
		if (!pg)
			return NULL;
		rec = &pg->records[0];
523 524
		if (!rec->counter)
			goto again;
S
Steven Rostedt 已提交
525 526 527 528 529 530 531
	}

	return rec;
}

static void *function_stat_start(struct tracer_stat *trace)
{
532 533 534 535 536 537 538
	struct ftrace_profile_stat *stat =
		container_of(trace, struct ftrace_profile_stat, stat);

	if (!stat || !stat->start)
		return NULL;

	return function_stat_next(&stat->start->records[0], 0);
S
Steven Rostedt 已提交
539 540
}

541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* function graph compares on total time */
static int function_stat_cmp(void *p1, void *p2)
{
	struct ftrace_profile *a = p1;
	struct ftrace_profile *b = p2;

	if (a->time < b->time)
		return -1;
	if (a->time > b->time)
		return 1;
	else
		return 0;
}
#else
/* not function graph compares against hits */
S
Steven Rostedt 已提交
557 558
static int function_stat_cmp(void *p1, void *p2)
{
559 560
	struct ftrace_profile *a = p1;
	struct ftrace_profile *b = p2;
S
Steven Rostedt 已提交
561 562 563 564 565 566 567 568

	if (a->counter < b->counter)
		return -1;
	if (a->counter > b->counter)
		return 1;
	else
		return 0;
}
569
#endif
S
Steven Rostedt 已提交
570 571 572

static int function_stat_headers(struct seq_file *m)
{
573
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
574 575 576 577
	seq_puts(m, "  Function                               "
		 "Hit    Time            Avg             s^2\n"
		    "  --------                               "
		 "---    ----            ---             ---\n");
578
#else
579 580
	seq_puts(m, "  Function                               Hit\n"
		    "  --------                               ---\n");
581
#endif
S
Steven Rostedt 已提交
582 583 584 585 586
	return 0;
}

static int function_stat_show(struct seq_file *m, void *v)
{
587
	struct ftrace_profile *rec = v;
S
Steven Rostedt 已提交
588
	char str[KSYM_SYMBOL_LEN];
589
	int ret = 0;
590
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
591 592
	static struct trace_seq s;
	unsigned long long avg;
593
	unsigned long long stddev;
594
#endif
595 596 597 598 599 600 601
	mutex_lock(&ftrace_profile_lock);

	/* we raced with function_profile_reset() */
	if (unlikely(rec->counter == 0)) {
		ret = -EBUSY;
		goto out;
	}
S
Steven Rostedt 已提交
602 603

	kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
604 605 606
	seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
607
	seq_puts(m, "    ");
608 609 610
	avg = rec->time;
	do_div(avg, rec->counter);

611 612 613 614
	/* Sample standard deviation (s^2) */
	if (rec->counter <= 1)
		stddev = 0;
	else {
615 616 617 618 619 620 621
		/*
		 * Apply Welford's method:
		 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
		 */
		stddev = rec->counter * rec->time_squared -
			 rec->time * rec->time;

622 623 624 625
		/*
		 * Divide only 1000 for ns^2 -> us^2 conversion.
		 * trace_print_graph_duration will divide 1000 again.
		 */
626
		do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
627 628
	}

629 630 631 632
	trace_seq_init(&s);
	trace_print_graph_duration(rec->time, &s);
	trace_seq_puts(&s, "    ");
	trace_print_graph_duration(avg, &s);
633 634
	trace_seq_puts(&s, "    ");
	trace_print_graph_duration(stddev, &s);
635 636 637
	trace_print_seq(m, &s);
#endif
	seq_putc(m, '\n');
638 639
out:
	mutex_unlock(&ftrace_profile_lock);
S
Steven Rostedt 已提交
640

641
	return ret;
S
Steven Rostedt 已提交
642 643
}

644
static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
S
Steven Rostedt 已提交
645
{
646
	struct ftrace_profile_page *pg;
S
Steven Rostedt 已提交
647

648
	pg = stat->pages = stat->start;
S
Steven Rostedt 已提交
649

650 651 652 653
	while (pg) {
		memset(pg->records, 0, PROFILE_RECORDS_SIZE);
		pg->index = 0;
		pg = pg->next;
S
Steven Rostedt 已提交
654 655
	}

656
	memset(stat->hash, 0,
657 658
	       FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
}
S
Steven Rostedt 已提交
659

660
int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
661 662
{
	struct ftrace_profile_page *pg;
663 664
	int functions;
	int pages;
665
	int i;
S
Steven Rostedt 已提交
666

667
	/* If we already allocated, do nothing */
668
	if (stat->pages)
669
		return 0;
S
Steven Rostedt 已提交
670

671 672
	stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
	if (!stat->pages)
673
		return -ENOMEM;
S
Steven Rostedt 已提交
674

675 676 677 678 679 680 681 682 683 684 685 686 687
#ifdef CONFIG_DYNAMIC_FTRACE
	functions = ftrace_update_tot_cnt;
#else
	/*
	 * We do not know the number of functions that exist because
	 * dynamic tracing is what counts them. With past experience
	 * we have around 20K functions. That should be more than enough.
	 * It is highly unlikely we will execute every function in
	 * the kernel.
	 */
	functions = 20000;
#endif

688
	pg = stat->start = stat->pages;
S
Steven Rostedt 已提交
689

690 691
	pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);

692
	for (i = 1; i < pages; i++) {
693 694
		pg->next = (void *)get_zeroed_page(GFP_KERNEL);
		if (!pg->next)
695
			goto out_free;
696 697 698 699
		pg = pg->next;
	}

	return 0;
700 701 702 703 704 705 706 707 708 709 710 711 712 713

 out_free:
	pg = stat->start;
	while (pg) {
		unsigned long tmp = (unsigned long)pg;

		pg = pg->next;
		free_page(tmp);
	}

	stat->pages = NULL;
	stat->start = NULL;

	return -ENOMEM;
S
Steven Rostedt 已提交
714 715
}

716
static int ftrace_profile_init_cpu(int cpu)
S
Steven Rostedt 已提交
717
{
718
	struct ftrace_profile_stat *stat;
719
	int size;
S
Steven Rostedt 已提交
720

721 722 723
	stat = &per_cpu(ftrace_profile_stats, cpu);

	if (stat->hash) {
724
		/* If the profile is already created, simply reset it */
725
		ftrace_profile_reset(stat);
726 727
		return 0;
	}
S
Steven Rostedt 已提交
728

729 730 731 732 733
	/*
	 * We are profiling all functions, but usually only a few thousand
	 * functions are hit. We'll make a hash of 1024 items.
	 */
	size = FTRACE_PROFILE_HASH_SIZE;
S
Steven Rostedt 已提交
734

735
	stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
736

737
	if (!stat->hash)
738 739
		return -ENOMEM;

740
	/* Preallocate the function profiling pages */
741 742 743
	if (ftrace_profile_pages_init(stat) < 0) {
		kfree(stat->hash);
		stat->hash = NULL;
744 745 746 747
		return -ENOMEM;
	}

	return 0;
S
Steven Rostedt 已提交
748 749
}

750 751 752 753 754
static int ftrace_profile_init(void)
{
	int cpu;
	int ret = 0;

755
	for_each_possible_cpu(cpu) {
756 757 758 759 760 761 762 763
		ret = ftrace_profile_init_cpu(cpu);
		if (ret)
			break;
	}

	return ret;
}

764
/* interrupts must be disabled */
765 766
static struct ftrace_profile *
ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
S
Steven Rostedt 已提交
767
{
768
	struct ftrace_profile *rec;
S
Steven Rostedt 已提交
769 770 771
	struct hlist_head *hhd;
	unsigned long key;

772
	key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
773
	hhd = &stat->hash[key];
S
Steven Rostedt 已提交
774 775 776 777

	if (hlist_empty(hhd))
		return NULL;

778
	hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
S
Steven Rostedt 已提交
779
		if (rec->ip == ip)
780 781 782 783 784 785
			return rec;
	}

	return NULL;
}

786 787
static void ftrace_add_profile(struct ftrace_profile_stat *stat,
			       struct ftrace_profile *rec)
788 789 790
{
	unsigned long key;

791
	key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
792
	hlist_add_head_rcu(&rec->node, &stat->hash[key]);
793 794
}

795 796 797
/*
 * The memory is already allocated, this simply finds a new record to use.
 */
798
static struct ftrace_profile *
799
ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
800 801 802
{
	struct ftrace_profile *rec = NULL;

803
	/* prevent recursion (from NMIs) */
804
	if (atomic_inc_return(&stat->disabled) != 1)
805 806 807
		goto out;

	/*
808 809
	 * Try to find the function again since an NMI
	 * could have added it
810
	 */
811
	rec = ftrace_find_profiled_func(stat, ip);
812
	if (rec)
813
		goto out;
814

815 816 817 818
	if (stat->pages->index == PROFILES_PER_PAGE) {
		if (!stat->pages->next)
			goto out;
		stat->pages = stat->pages->next;
S
Steven Rostedt 已提交
819
	}
820

821
	rec = &stat->pages->records[stat->pages->index++];
822
	rec->ip = ip;
823
	ftrace_add_profile(stat, rec);
824

S
Steven Rostedt 已提交
825
 out:
826
	atomic_dec(&stat->disabled);
S
Steven Rostedt 已提交
827 828 829 830 831

	return rec;
}

static void
832
function_profile_call(unsigned long ip, unsigned long parent_ip,
833
		      struct ftrace_ops *ops, struct pt_regs *regs)
S
Steven Rostedt 已提交
834
{
835
	struct ftrace_profile_stat *stat;
836
	struct ftrace_profile *rec;
S
Steven Rostedt 已提交
837 838 839 840 841 842
	unsigned long flags;

	if (!ftrace_profile_enabled)
		return;

	local_irq_save(flags);
843

844
	stat = this_cpu_ptr(&ftrace_profile_stats);
845
	if (!stat->hash || !ftrace_profile_enabled)
846 847 848
		goto out;

	rec = ftrace_find_profiled_func(stat, ip);
849
	if (!rec) {
850
		rec = ftrace_profile_alloc(stat, ip);
851 852 853
		if (!rec)
			goto out;
	}
S
Steven Rostedt 已提交
854 855 856 857 858 859

	rec->counter++;
 out:
	local_irq_restore(flags);
}

860 861 862
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static int profile_graph_entry(struct ftrace_graph_ent *trace)
{
863
	function_profile_call(trace->func, 0, NULL, NULL);
864 865 866 867 868
	return 1;
}

static void profile_graph_return(struct ftrace_graph_ret *trace)
{
869
	struct ftrace_profile_stat *stat;
870
	unsigned long long calltime;
871
	struct ftrace_profile *rec;
872
	unsigned long flags;
873 874

	local_irq_save(flags);
875
	stat = this_cpu_ptr(&ftrace_profile_stats);
876
	if (!stat->hash || !ftrace_profile_enabled)
877 878
		goto out;

879 880 881 882
	/* If the calltime was zero'd ignore it */
	if (!trace->calltime)
		goto out;

883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899
	calltime = trace->rettime - trace->calltime;

	if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
		int index;

		index = trace->depth;

		/* Append this call time to the parent time to subtract */
		if (index)
			current->ret_stack[index - 1].subtime += calltime;

		if (current->ret_stack[index].subtime < calltime)
			calltime -= current->ret_stack[index].subtime;
		else
			calltime = 0;
	}

900
	rec = ftrace_find_profiled_func(stat, trace->func);
901
	if (rec) {
902
		rec->time += calltime;
903 904
		rec->time_squared += calltime * calltime;
	}
905

906
 out:
907 908 909 910 911 912 913 914 915 916 917 918 919 920
	local_irq_restore(flags);
}

static int register_ftrace_profiler(void)
{
	return register_ftrace_graph(&profile_graph_return,
				     &profile_graph_entry);
}

static void unregister_ftrace_profiler(void)
{
	unregister_ftrace_graph();
}
#else
921
static struct ftrace_ops ftrace_profile_ops __read_mostly = {
922
	.func		= function_profile_call,
923
	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
924
	INIT_OPS_HASH(ftrace_profile_ops)
S
Steven Rostedt 已提交
925 926
};

927 928 929 930 931 932 933 934 935 936 937
static int register_ftrace_profiler(void)
{
	return register_ftrace_function(&ftrace_profile_ops);
}

static void unregister_ftrace_profiler(void)
{
	unregister_ftrace_function(&ftrace_profile_ops);
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */

S
Steven Rostedt 已提交
938 939 940 941 942 943 944
static ssize_t
ftrace_profile_write(struct file *filp, const char __user *ubuf,
		     size_t cnt, loff_t *ppos)
{
	unsigned long val;
	int ret;

945 946
	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
	if (ret)
S
Steven Rostedt 已提交
947 948 949 950 951 952 953
		return ret;

	val = !!val;

	mutex_lock(&ftrace_profile_lock);
	if (ftrace_profile_enabled ^ val) {
		if (val) {
954 955 956 957 958 959
			ret = ftrace_profile_init();
			if (ret < 0) {
				cnt = ret;
				goto out;
			}

960 961 962 963 964
			ret = register_ftrace_profiler();
			if (ret < 0) {
				cnt = ret;
				goto out;
			}
S
Steven Rostedt 已提交
965 966 967
			ftrace_profile_enabled = 1;
		} else {
			ftrace_profile_enabled = 0;
968 969 970 971
			/*
			 * unregister_ftrace_profiler calls stop_machine
			 * so this acts like an synchronize_sched.
			 */
972
			unregister_ftrace_profiler();
S
Steven Rostedt 已提交
973 974
		}
	}
975
 out:
S
Steven Rostedt 已提交
976 977
	mutex_unlock(&ftrace_profile_lock);

978
	*ppos += cnt;
S
Steven Rostedt 已提交
979 980 981 982

	return cnt;
}

983 984 985 986
static ssize_t
ftrace_profile_read(struct file *filp, char __user *ubuf,
		     size_t cnt, loff_t *ppos)
{
987
	char buf[64];		/* big enough to hold a number */
988 989 990 991 992 993
	int r;

	r = sprintf(buf, "%u\n", ftrace_profile_enabled);
	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}

S
Steven Rostedt 已提交
994 995 996 997
static const struct file_operations ftrace_profile_fops = {
	.open		= tracing_open_generic,
	.read		= ftrace_profile_read,
	.write		= ftrace_profile_write,
998
	.llseek		= default_llseek,
S
Steven Rostedt 已提交
999 1000
};

1001 1002
/* used to initialize the real stat files */
static struct tracer_stat function_stats __initdata = {
1003 1004 1005 1006 1007 1008
	.name		= "functions",
	.stat_start	= function_stat_start,
	.stat_next	= function_stat_next,
	.stat_cmp	= function_stat_cmp,
	.stat_headers	= function_stat_headers,
	.stat_show	= function_stat_show
1009 1010
};

1011
static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
S
Steven Rostedt 已提交
1012
{
1013
	struct ftrace_profile_stat *stat;
S
Steven Rostedt 已提交
1014
	struct dentry *entry;
1015
	char *name;
S
Steven Rostedt 已提交
1016
	int ret;
1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
	int cpu;

	for_each_possible_cpu(cpu) {
		stat = &per_cpu(ftrace_profile_stats, cpu);

		/* allocate enough for function name + cpu number */
		name = kmalloc(32, GFP_KERNEL);
		if (!name) {
			/*
			 * The files created are permanent, if something happens
			 * we still do not free memory.
			 */
			WARN(1,
			     "Could not allocate stat file for cpu %d\n",
			     cpu);
			return;
		}
		stat->stat = function_stats;
		snprintf(name, 32, "function%d", cpu);
		stat->stat.name = name;
		ret = register_stat_tracer(&stat->stat);
		if (ret) {
			WARN(1,
			     "Could not register function stat for cpu %d\n",
			     cpu);
			kfree(name);
			return;
		}
S
Steven Rostedt 已提交
1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
	}

	entry = debugfs_create_file("function_profile_enabled", 0644,
				    d_tracer, NULL, &ftrace_profile_fops);
	if (!entry)
		pr_warning("Could not create debugfs "
			   "'function_profile_enabled' entry\n");
}

#else /* CONFIG_FUNCTION_PROFILER */
1055
static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
S
Steven Rostedt 已提交
1056 1057 1058 1059
{
}
#endif /* CONFIG_FUNCTION_PROFILER */

1060 1061 1062 1063
static struct pid * const ftrace_swapper_pid = &init_struct_pid;

#ifdef CONFIG_DYNAMIC_FTRACE

1064 1065
static struct ftrace_ops *removed_ops;

1066 1067 1068 1069 1070 1071
/*
 * Set when doing a global update, like enabling all recs or disabling them.
 * It is not set when just updating a single ftrace_ops.
 */
static bool update_all_ops;

1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
#ifndef CONFIG_FTRACE_MCOUNT_RECORD
# error Dynamic ftrace depends on MCOUNT_RECORD
#endif

static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;

struct ftrace_func_probe {
	struct hlist_node	node;
	struct ftrace_probe_ops	*ops;
	unsigned long		flags;
	unsigned long		ip;
	void			*data;
1084
	struct list_head	free_list;
1085 1086
};

1087 1088 1089 1090 1091 1092 1093 1094 1095
struct ftrace_func_entry {
	struct hlist_node hlist;
	unsigned long ip;
};

struct ftrace_hash {
	unsigned long		size_bits;
	struct hlist_head	*buckets;
	unsigned long		count;
1096
	struct rcu_head		rcu;
1097 1098
};

1099 1100 1101 1102 1103 1104 1105 1106 1107
/*
 * We make these constant because no one should touch them,
 * but they are used as the default "empty hash", to avoid allocating
 * it all the time. These are in a read only section such that if
 * anyone does try to modify it, it will cause an exception.
 */
static const struct hlist_head empty_buckets[1];
static const struct ftrace_hash empty_hash = {
	.buckets = (struct hlist_head *)empty_buckets,
1108
};
1109
#define EMPTY_HASH	((struct ftrace_hash *)&empty_hash)
1110

1111
static struct ftrace_ops global_ops = {
1112 1113 1114 1115 1116 1117
	.func				= ftrace_stub,
	.local_hash.notrace_hash	= EMPTY_HASH,
	.local_hash.filter_hash		= EMPTY_HASH,
	INIT_OPS_HASH(global_ops)
	.flags				= FTRACE_OPS_FL_RECURSION_SAFE |
					  FTRACE_OPS_FL_INITIALIZED,
1118 1119
};

1120 1121
/*
 * This is used by __kernel_text_address() to return true if the
1122
 * address is on a dynamically allocated trampoline that would
1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156
 * not return true for either core_kernel_text() or
 * is_module_text_address().
 */
bool is_ftrace_trampoline(unsigned long addr)
{
	struct ftrace_ops *op;
	bool ret = false;

	/*
	 * Some of the ops may be dynamically allocated,
	 * they are freed after a synchronize_sched().
	 */
	preempt_disable_notrace();

	do_for_each_ftrace_op(op, ftrace_ops_list) {
		/*
		 * This is to check for dynamically allocated trampolines.
		 * Trampolines that are in kernel text will have
		 * core_kernel_text() return true.
		 */
		if (op->trampoline && op->trampoline_size)
			if (addr >= op->trampoline &&
			    addr < op->trampoline + op->trampoline_size) {
				ret = true;
				goto out;
			}
	} while_for_each_ftrace_op(op);

 out:
	preempt_enable_notrace();

	return ret;
}

1157 1158
struct ftrace_page {
	struct ftrace_page	*next;
1159
	struct dyn_ftrace	*records;
1160
	int			index;
1161
	int			size;
1162 1163
};

1164 1165
#define ENTRY_SIZE sizeof(struct dyn_ftrace)
#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1166 1167 1168 1169 1170 1171 1172

/* estimate from running different kernels */
#define NR_TO_INIT		10000

static struct ftrace_page	*ftrace_pages_start;
static struct ftrace_page	*ftrace_pages;

1173
static bool __always_inline ftrace_hash_empty(struct ftrace_hash *hash)
1174 1175 1176 1177
{
	return !hash || !hash->count;
}

1178 1179 1180 1181 1182 1183 1184
static struct ftrace_func_entry *
ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
{
	unsigned long key;
	struct ftrace_func_entry *entry;
	struct hlist_head *hhd;

1185
	if (ftrace_hash_empty(hash))
1186 1187 1188 1189 1190 1191 1192 1193 1194
		return NULL;

	if (hash->size_bits > 0)
		key = hash_long(ip, hash->size_bits);
	else
		key = 0;

	hhd = &hash->buckets[key];

1195
	hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1196 1197 1198 1199 1200 1201
		if (entry->ip == ip)
			return entry;
	}
	return NULL;
}

1202 1203
static void __add_hash_entry(struct ftrace_hash *hash,
			     struct ftrace_func_entry *entry)
1204 1205 1206 1207 1208
{
	struct hlist_head *hhd;
	unsigned long key;

	if (hash->size_bits)
1209
		key = hash_long(entry->ip, hash->size_bits);
1210 1211 1212 1213 1214 1215
	else
		key = 0;

	hhd = &hash->buckets[key];
	hlist_add_head(&entry->hlist, hhd);
	hash->count++;
1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
}

static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
{
	struct ftrace_func_entry *entry;

	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
	if (!entry)
		return -ENOMEM;

	entry->ip = ip;
	__add_hash_entry(hash, entry);
1228 1229 1230 1231 1232

	return 0;
}

static void
1233
free_hash_entry(struct ftrace_hash *hash,
1234 1235 1236 1237 1238 1239 1240
		  struct ftrace_func_entry *entry)
{
	hlist_del(&entry->hlist);
	kfree(entry);
	hash->count--;
}

1241 1242 1243 1244 1245 1246 1247 1248
static void
remove_hash_entry(struct ftrace_hash *hash,
		  struct ftrace_func_entry *entry)
{
	hlist_del(&entry->hlist);
	hash->count--;
}

1249 1250 1251
static void ftrace_hash_clear(struct ftrace_hash *hash)
{
	struct hlist_head *hhd;
1252
	struct hlist_node *tn;
1253 1254 1255 1256
	struct ftrace_func_entry *entry;
	int size = 1 << hash->size_bits;
	int i;

1257 1258 1259
	if (!hash->count)
		return;

1260 1261
	for (i = 0; i < size; i++) {
		hhd = &hash->buckets[i];
1262
		hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1263
			free_hash_entry(hash, entry);
1264 1265 1266 1267
	}
	FTRACE_WARN_ON(hash->count);
}

1268 1269 1270 1271 1272 1273 1274 1275 1276
static void free_ftrace_hash(struct ftrace_hash *hash)
{
	if (!hash || hash == EMPTY_HASH)
		return;
	ftrace_hash_clear(hash);
	kfree(hash->buckets);
	kfree(hash);
}

1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291
static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
{
	struct ftrace_hash *hash;

	hash = container_of(rcu, struct ftrace_hash, rcu);
	free_ftrace_hash(hash);
}

static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
{
	if (!hash || hash == EMPTY_HASH)
		return;
	call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
}

1292 1293
void ftrace_free_filter(struct ftrace_ops *ops)
{
1294
	ftrace_ops_init(ops);
1295 1296
	free_ftrace_hash(ops->func_hash->filter_hash);
	free_ftrace_hash(ops->func_hash->notrace_hash);
1297 1298
}

1299 1300 1301 1302 1303 1304 1305 1306 1307 1308
static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
{
	struct ftrace_hash *hash;
	int size;

	hash = kzalloc(sizeof(*hash), GFP_KERNEL);
	if (!hash)
		return NULL;

	size = 1 << size_bits;
1309
	hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334

	if (!hash->buckets) {
		kfree(hash);
		return NULL;
	}

	hash->size_bits = size_bits;

	return hash;
}

static struct ftrace_hash *
alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
{
	struct ftrace_func_entry *entry;
	struct ftrace_hash *new_hash;
	int size;
	int ret;
	int i;

	new_hash = alloc_ftrace_hash(size_bits);
	if (!new_hash)
		return NULL;

	/* Empty hash? */
1335
	if (ftrace_hash_empty(hash))
1336 1337 1338 1339
		return new_hash;

	size = 1 << hash->size_bits;
	for (i = 0; i < size; i++) {
1340
		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355
			ret = add_hash_entry(new_hash, entry->ip);
			if (ret < 0)
				goto free_hash;
		}
	}

	FTRACE_WARN_ON(new_hash->count != hash->count);

	return new_hash;

 free_hash:
	free_ftrace_hash(new_hash);
	return NULL;
}

1356
static void
1357
ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
1358
static void
1359
ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
1360

1361
static int
1362 1363
ftrace_hash_move(struct ftrace_ops *ops, int enable,
		 struct ftrace_hash **dst, struct ftrace_hash *src)
1364 1365
{
	struct ftrace_func_entry *entry;
1366
	struct hlist_node *tn;
1367
	struct hlist_head *hhd;
1368
	struct ftrace_hash *new_hash;
1369 1370 1371 1372 1373 1374 1375 1376 1377
	int size = src->count;
	int bits = 0;
	int i;

	/*
	 * If the new source is empty, just free dst and assign it
	 * the empty_hash.
	 */
	if (!src->count) {
1378 1379
		new_hash = EMPTY_HASH;
		goto update;
1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391
	}

	/*
	 * Make the hash size about 1/2 the # found
	 */
	for (size /= 2; size; size >>= 1)
		bits++;

	/* Don't allocate too much */
	if (bits > FTRACE_HASH_MAX_BITS)
		bits = FTRACE_HASH_MAX_BITS;

1392 1393
	new_hash = alloc_ftrace_hash(bits);
	if (!new_hash)
1394
		return -ENOMEM;
1395 1396 1397 1398

	size = 1 << src->size_bits;
	for (i = 0; i < size; i++) {
		hhd = &src->buckets[i];
1399
		hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1400
			remove_hash_entry(src, entry);
1401
			__add_hash_entry(new_hash, entry);
1402 1403 1404
		}
	}

1405 1406 1407 1408 1409
update:
	/*
	 * Remove the current set, update the hash and add
	 * them back.
	 */
1410
	ftrace_hash_rec_disable_modify(ops, enable);
1411

1412 1413
	rcu_assign_pointer(*dst, new_hash);

1414
	ftrace_hash_rec_enable_modify(ops, enable);
1415

1416
	return 0;
1417 1418
}

1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433
static bool hash_contains_ip(unsigned long ip,
			     struct ftrace_ops_hash *hash)
{
	/*
	 * The function record is a match if it exists in the filter
	 * hash and not in the notrace hash. Note, an emty hash is
	 * considered a match for the filter hash, but an empty
	 * notrace hash is considered not in the notrace hash.
	 */
	return (ftrace_hash_empty(hash->filter_hash) ||
		ftrace_lookup_ip(hash->filter_hash, ip)) &&
		(ftrace_hash_empty(hash->notrace_hash) ||
		 !ftrace_lookup_ip(hash->notrace_hash, ip));
}

1434 1435 1436 1437 1438 1439 1440 1441
/*
 * Test the hashes for this ops to see if we want to call
 * the ops->func or not.
 *
 * It's a match if the ip is in the ops->filter_hash or
 * the filter_hash does not exist or is empty,
 *  AND
 * the ip is not in the ops->notrace_hash.
1442 1443 1444
 *
 * This needs to be called with preemption disabled as
 * the hashes are freed with call_rcu_sched().
1445 1446
 */
static int
1447
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1448
{
1449
	struct ftrace_ops_hash hash;
1450 1451
	int ret;

1452 1453 1454 1455 1456 1457 1458 1459 1460 1461
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
	/*
	 * There's a small race when adding ops that the ftrace handler
	 * that wants regs, may be called without them. We can not
	 * allow that handler to be called if regs is NULL.
	 */
	if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
		return 0;
#endif

1462 1463
	hash.filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
	hash.notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
1464

1465
	if (hash_contains_ip(ip, &hash))
1466 1467 1468 1469 1470 1471 1472
		ret = 1;
	else
		ret = 0;

	return ret;
}

1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486
/*
 * This is a double for. Do not use 'break' to break out of the loop,
 * you must use a goto.
 */
#define do_for_each_ftrace_rec(pg, rec)					\
	for (pg = ftrace_pages_start; pg; pg = pg->next) {		\
		int _____i;						\
		for (_____i = 0; _____i < pg->index; _____i++) {	\
			rec = &pg->records[_____i];

#define while_for_each_ftrace_rec()		\
		}				\
	}

1487 1488 1489

static int ftrace_cmp_recs(const void *a, const void *b)
{
1490 1491
	const struct dyn_ftrace *key = a;
	const struct dyn_ftrace *rec = b;
1492

1493
	if (key->flags < rec->ip)
1494
		return -1;
1495 1496
	if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
		return 1;
1497 1498 1499
	return 0;
}

1500
static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1501 1502 1503
{
	struct ftrace_page *pg;
	struct dyn_ftrace *rec;
1504
	struct dyn_ftrace key;
1505

1506 1507
	key.ip = start;
	key.flags = end;	/* overload flags, as it is unsigned long */
1508 1509

	for (pg = ftrace_pages_start; pg; pg = pg->next) {
1510 1511
		if (end < pg->records[0].ip ||
		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1512
			continue;
1513 1514 1515 1516
		rec = bsearch(&key, pg->records, pg->index,
			      sizeof(struct dyn_ftrace),
			      ftrace_cmp_recs);
		if (rec)
1517
			return rec->ip;
1518
	}
1519 1520 1521 1522

	return 0;
}

1523 1524 1525 1526
/**
 * ftrace_location - return true if the ip giving is a traced location
 * @ip: the instruction pointer to check
 *
1527
 * Returns rec->ip if @ip given is a pointer to a ftrace location.
1528 1529 1530 1531
 * That is, the instruction that is either a NOP or call to
 * the function tracer. It checks the ftrace internal tables to
 * determine if the address belongs or not.
 */
1532
unsigned long ftrace_location(unsigned long ip)
1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546
{
	return ftrace_location_range(ip, ip);
}

/**
 * ftrace_text_reserved - return true if range contains an ftrace location
 * @start: start of range to search
 * @end: end of range to search (inclusive). @end points to the last byte to check.
 *
 * Returns 1 if @start and @end contains a ftrace location.
 * That is, the instruction that is either a NOP or call to
 * the function tracer. It checks the ftrace internal tables to
 * determine if the address belongs or not.
 */
1547
int ftrace_text_reserved(const void *start, const void *end)
1548
{
1549 1550 1551 1552 1553 1554
	unsigned long ret;

	ret = ftrace_location_range((unsigned long)start,
				    (unsigned long)end);

	return (int)!!ret;
1555 1556
}

1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576
/* Test if ops registered to this rec needs regs */
static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
{
	struct ftrace_ops *ops;
	bool keep_regs = false;

	for (ops = ftrace_ops_list;
	     ops != &ftrace_list_end; ops = ops->next) {
		/* pass rec in as regs to have non-NULL val */
		if (ftrace_ops_test(ops, rec->ip, rec)) {
			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
				keep_regs = true;
				break;
			}
		}
	}

	return  keep_regs;
}

1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603
static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
				     int filter_hash,
				     bool inc)
{
	struct ftrace_hash *hash;
	struct ftrace_hash *other_hash;
	struct ftrace_page *pg;
	struct dyn_ftrace *rec;
	int count = 0;
	int all = 0;

	/* Only update if the ops has been registered */
	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
		return;

	/*
	 * In the filter_hash case:
	 *   If the count is zero, we update all records.
	 *   Otherwise we just update the items in the hash.
	 *
	 * In the notrace_hash case:
	 *   We enable the update in the hash.
	 *   As disabling notrace means enabling the tracing,
	 *   and enabling notrace means disabling, the inc variable
	 *   gets inversed.
	 */
	if (filter_hash) {
1604 1605
		hash = ops->func_hash->filter_hash;
		other_hash = ops->func_hash->notrace_hash;
1606
		if (ftrace_hash_empty(hash))
1607 1608 1609
			all = 1;
	} else {
		inc = !inc;
1610 1611
		hash = ops->func_hash->notrace_hash;
		other_hash = ops->func_hash->filter_hash;
1612 1613 1614 1615
		/*
		 * If the notrace hash has no items,
		 * then there's nothing to do.
		 */
1616
		if (ftrace_hash_empty(hash))
1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629
			return;
	}

	do_for_each_ftrace_rec(pg, rec) {
		int in_other_hash = 0;
		int in_hash = 0;
		int match = 0;

		if (all) {
			/*
			 * Only the filter_hash affects all records.
			 * Update if the record is not in the notrace hash.
			 */
1630
			if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1631 1632
				match = 1;
		} else {
1633 1634
			in_hash = !!ftrace_lookup_ip(hash, rec->ip);
			in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1635 1636

			/*
1637 1638
			 * If filter_hash is set, we want to match all functions
			 * that are in the hash but not in the other hash.
1639
			 *
1640 1641 1642 1643 1644
			 * If filter_hash is not set, then we are decrementing.
			 * That means we match anything that is in the hash
			 * and also in the other_hash. That is, we need to turn
			 * off functions in the other hash because they are disabled
			 * by this hash.
1645 1646 1647 1648
			 */
			if (filter_hash && in_hash && !in_other_hash)
				match = 1;
			else if (!filter_hash && in_hash &&
1649
				 (in_other_hash || ftrace_hash_empty(other_hash)))
1650 1651 1652 1653 1654 1655 1656
				match = 1;
		}
		if (!match)
			continue;

		if (inc) {
			rec->flags++;
1657
			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1658
				return;
1659 1660 1661 1662 1663 1664

			/*
			 * If there's only a single callback registered to a
			 * function, and the ops has a trampoline registered
			 * for it, then we can call it directly.
			 */
1665
			if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1666
				rec->flags |= FTRACE_FL_TRAMP;
1667
			else
1668 1669 1670
				/*
				 * If we are adding another function callback
				 * to this function, and the previous had a
1671 1672
				 * custom trampoline in use, then we need to go
				 * back to the default trampoline.
1673
				 */
1674
				rec->flags &= ~FTRACE_FL_TRAMP;
1675

1676 1677 1678 1679 1680 1681
			/*
			 * If any ops wants regs saved for this function
			 * then all ops will get saved regs.
			 */
			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
				rec->flags |= FTRACE_FL_REGS;
1682
		} else {
1683
			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1684 1685
				return;
			rec->flags--;
1686

1687 1688 1689 1690 1691 1692
			/*
			 * If the rec had REGS enabled and the ops that is
			 * being removed had REGS set, then see if there is
			 * still any ops for this record that wants regs.
			 * If not, we can stop recording them.
			 */
1693
			if (ftrace_rec_count(rec) > 0 &&
1694 1695 1696 1697 1698
			    rec->flags & FTRACE_FL_REGS &&
			    ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
				if (!test_rec_ops_needs_regs(rec))
					rec->flags &= ~FTRACE_FL_REGS;
			}
1699

1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710
			/*
			 * If the rec had TRAMP enabled, then it needs to
			 * be cleared. As TRAMP can only be enabled iff
			 * there is only a single ops attached to it.
			 * In otherwords, always disable it on decrementing.
			 * In the future, we may set it if rec count is
			 * decremented to one, and the ops that is left
			 * has a trampoline.
			 */
			rec->flags &= ~FTRACE_FL_TRAMP;

1711 1712 1713 1714
			/*
			 * flags will be cleared in ftrace_check_record()
			 * if rec count is zero.
			 */
1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734
		}
		count++;
		/* Shortcut, if we handled all records, we are done. */
		if (!all && count == hash->count)
			return;
	} while_for_each_ftrace_rec();
}

static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
				    int filter_hash)
{
	__ftrace_hash_rec_update(ops, filter_hash, 0);
}

static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
				   int filter_hash)
{
	__ftrace_hash_rec_update(ops, filter_hash, 1);
}

1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769
static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
					  int filter_hash, int inc)
{
	struct ftrace_ops *op;

	__ftrace_hash_rec_update(ops, filter_hash, inc);

	if (ops->func_hash != &global_ops.local_hash)
		return;

	/*
	 * If the ops shares the global_ops hash, then we need to update
	 * all ops that are enabled and use this hash.
	 */
	do_for_each_ftrace_op(op, ftrace_ops_list) {
		/* Already done */
		if (op == ops)
			continue;
		if (op->func_hash == &global_ops.local_hash)
			__ftrace_hash_rec_update(op, filter_hash, inc);
	} while_for_each_ftrace_op(op);
}

static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
					   int filter_hash)
{
	ftrace_hash_rec_update_modify(ops, filter_hash, 0);
}

static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
					  int filter_hash)
{
	ftrace_hash_rec_update_modify(ops, filter_hash, 1);
}

1770 1771 1772 1773 1774 1775 1776 1777 1778 1779
static void print_ip_ins(const char *fmt, unsigned char *p)
{
	int i;

	printk(KERN_CONT "%s", fmt);

	for (i = 0; i < MCOUNT_INSN_SIZE; i++)
		printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
}

1780 1781 1782
static struct ftrace_ops *
ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);

1783 1784 1785
/**
 * ftrace_bug - report and shutdown function tracer
 * @failed: The failed type (EFAULT, EINVAL, EPERM)
1786
 * @rec: The record that failed
1787 1788 1789 1790 1791 1792 1793 1794
 *
 * The arch code that enables or disables the function tracing
 * can call ftrace_bug() when it has detected a problem in
 * modifying the code. @failed should be one of either:
 * EFAULT - if the problem happens on reading the @ip address
 * EINVAL - if what is read at @ip is not what was expected
 * EPERM - if the problem happens on writting to the @ip address
 */
1795
void ftrace_bug(int failed, struct dyn_ftrace *rec)
1796
{
1797 1798
	unsigned long ip = rec ? rec->ip : 0;

1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809
	switch (failed) {
	case -EFAULT:
		FTRACE_WARN_ON_ONCE(1);
		pr_info("ftrace faulted on modifying ");
		print_ip_sym(ip);
		break;
	case -EINVAL:
		FTRACE_WARN_ON_ONCE(1);
		pr_info("ftrace failed to modify ");
		print_ip_sym(ip);
		print_ip_ins(" actual: ", (unsigned char *)ip);
1810
		pr_cont("\n");
1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821
		break;
	case -EPERM:
		FTRACE_WARN_ON_ONCE(1);
		pr_info("ftrace faulted on writing ");
		print_ip_sym(ip);
		break;
	default:
		FTRACE_WARN_ON_ONCE(1);
		pr_info("ftrace faulted on unknown error ");
		print_ip_sym(ip);
	}
1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839
	if (rec) {
		struct ftrace_ops *ops = NULL;

		pr_info("ftrace record flags: %lx\n", rec->flags);
		pr_cont(" (%ld)%s", ftrace_rec_count(rec),
			rec->flags & FTRACE_FL_REGS ? " R" : "  ");
		if (rec->flags & FTRACE_FL_TRAMP_EN) {
			ops = ftrace_find_tramp_ops_any(rec);
			if (ops)
				pr_cont("\ttramp: %pS",
					(void *)ops->trampoline);
			else
				pr_cont("\ttramp: ERROR!");

		}
		ip = ftrace_get_addr_curr(rec);
		pr_cont(" expected tramp: %lx\n", ip);
	}
1840 1841
}

1842
static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1843
{
1844
	unsigned long flag = 0UL;
1845

S
Steven Rostedt 已提交
1846
	/*
1847
	 * If we are updating calls:
S
Steven Rostedt 已提交
1848
	 *
1849 1850
	 *   If the record has a ref count, then we need to enable it
	 *   because someone is using it.
S
Steven Rostedt 已提交
1851
	 *
1852 1853
	 *   Otherwise we make sure its disabled.
	 *
1854
	 * If we are disabling calls, then disable all records that
1855
	 * are enabled.
S
Steven Rostedt 已提交
1856
	 */
1857
	if (enable && ftrace_rec_count(rec))
1858
		flag = FTRACE_FL_ENABLED;
S
Steven Rostedt 已提交
1859

1860
	/*
1861 1862 1863
	 * If enabling and the REGS flag does not match the REGS_EN, or
	 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
	 * this record. Set flags to fail the compare against ENABLED.
1864
	 */
1865 1866 1867 1868 1869 1870 1871 1872 1873
	if (flag) {
		if (!(rec->flags & FTRACE_FL_REGS) != 
		    !(rec->flags & FTRACE_FL_REGS_EN))
			flag |= FTRACE_FL_REGS;

		if (!(rec->flags & FTRACE_FL_TRAMP) != 
		    !(rec->flags & FTRACE_FL_TRAMP_EN))
			flag |= FTRACE_FL_TRAMP;
	}
1874

1875 1876
	/* If the state of this record hasn't changed, then do nothing */
	if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1877
		return FTRACE_UPDATE_IGNORE;
S
Steven Rostedt 已提交
1878

1879
	if (flag) {
1880 1881 1882 1883
		/* Save off if rec is being enabled (for return value) */
		flag ^= rec->flags & FTRACE_FL_ENABLED;

		if (update) {
1884
			rec->flags |= FTRACE_FL_ENABLED;
1885 1886 1887 1888 1889 1890
			if (flag & FTRACE_FL_REGS) {
				if (rec->flags & FTRACE_FL_REGS)
					rec->flags |= FTRACE_FL_REGS_EN;
				else
					rec->flags &= ~FTRACE_FL_REGS_EN;
			}
1891 1892 1893 1894 1895 1896
			if (flag & FTRACE_FL_TRAMP) {
				if (rec->flags & FTRACE_FL_TRAMP)
					rec->flags |= FTRACE_FL_TRAMP_EN;
				else
					rec->flags &= ~FTRACE_FL_TRAMP_EN;
			}
1897 1898 1899 1900 1901 1902 1903
		}

		/*
		 * If this record is being updated from a nop, then
		 *   return UPDATE_MAKE_CALL.
		 * Otherwise,
		 *   return UPDATE_MODIFY_CALL to tell the caller to convert
1904
		 *   from the save regs, to a non-save regs function or
1905
		 *   vice versa, or from a trampoline call.
1906 1907 1908
		 */
		if (flag & FTRACE_FL_ENABLED)
			return FTRACE_UPDATE_MAKE_CALL;
1909 1910

		return FTRACE_UPDATE_MODIFY_CALL;
1911 1912
	}

1913 1914
	if (update) {
		/* If there's no more users, clear all flags */
1915
		if (!ftrace_rec_count(rec))
1916 1917 1918 1919 1920
			rec->flags = 0;
		else
			/* Just disable the record (keep REGS state) */
			rec->flags &= ~FTRACE_FL_ENABLED;
	}
1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951

	return FTRACE_UPDATE_MAKE_NOP;
}

/**
 * ftrace_update_record, set a record that now is tracing or not
 * @rec: the record to update
 * @enable: set to 1 if the record is tracing, zero to force disable
 *
 * The records that represent all functions that can be traced need
 * to be updated when tracing has been enabled.
 */
int ftrace_update_record(struct dyn_ftrace *rec, int enable)
{
	return ftrace_check_record(rec, enable, 1);
}

/**
 * ftrace_test_record, check if the record has been enabled or not
 * @rec: the record to test
 * @enable: set to 1 to check if enabled, 0 if it is disabled
 *
 * The arch code may need to test if a record is already set to
 * tracing to determine how to modify the function code that it
 * represents.
 */
int ftrace_test_record(struct dyn_ftrace *rec, int enable)
{
	return ftrace_check_record(rec, enable, 0);
}

1952 1953 1954 1955
static struct ftrace_ops *
ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
{
	struct ftrace_ops *op;
1956
	unsigned long ip = rec->ip;
1957 1958 1959 1960 1961 1962

	do_for_each_ftrace_op(op, ftrace_ops_list) {

		if (!op->trampoline)
			continue;

1963
		if (hash_contains_ip(ip, op->func_hash))
1964 1965 1966 1967 1968 1969
			return op;
	} while_for_each_ftrace_op(op);

	return NULL;
}

1970 1971 1972 1973
static struct ftrace_ops *
ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
{
	struct ftrace_ops *op;
1974
	unsigned long ip = rec->ip;
1975

1976 1977 1978 1979 1980 1981 1982 1983
	/*
	 * Need to check removed ops first.
	 * If they are being removed, and this rec has a tramp,
	 * and this rec is in the ops list, then it would be the
	 * one with the tramp.
	 */
	if (removed_ops) {
		if (hash_contains_ip(ip, &removed_ops->old_hash))
1984 1985 1986
			return removed_ops;
	}

1987 1988 1989 1990 1991 1992 1993
	/*
	 * Need to find the current trampoline for a rec.
	 * Now, a trampoline is only attached to a rec if there
	 * was a single 'ops' attached to it. But this can be called
	 * when we are adding another op to the rec or removing the
	 * current one. Thus, if the op is being added, we can
	 * ignore it because it hasn't attached itself to the rec
1994 1995 1996 1997 1998 1999 2000 2001 2002 2003
	 * yet.
	 *
	 * If an ops is being modified (hooking to different functions)
	 * then we don't care about the new functions that are being
	 * added, just the old ones (that are probably being removed).
	 *
	 * If we are adding an ops to a function that already is using
	 * a trampoline, it needs to be removed (trampolines are only
	 * for single ops connected), then an ops that is not being
	 * modified also needs to be checked.
2004
	 */
2005
	do_for_each_ftrace_op(op, ftrace_ops_list) {
2006 2007 2008 2009 2010 2011 2012 2013 2014

		if (!op->trampoline)
			continue;

		/*
		 * If the ops is being added, it hasn't gotten to
		 * the point to be removed from this tree yet.
		 */
		if (op->flags & FTRACE_OPS_FL_ADDING)
2015 2016
			continue;

2017

2018
		/*
2019 2020 2021
		 * If the ops is being modified and is in the old
		 * hash, then it is probably being removed from this
		 * function.
2022 2023 2024
		 */
		if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
		    hash_contains_ip(ip, &op->old_hash))
2025
			return op;
2026 2027 2028 2029 2030 2031 2032 2033
		/*
		 * If the ops is not being added or modified, and it's
		 * in its normal filter hash, then this must be the one
		 * we want!
		 */
		if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
		    hash_contains_ip(ip, op->func_hash))
			return op;
2034 2035 2036 2037 2038 2039 2040 2041 2042 2043

	} while_for_each_ftrace_op(op);

	return NULL;
}

static struct ftrace_ops *
ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
{
	struct ftrace_ops *op;
2044
	unsigned long ip = rec->ip;
2045 2046 2047

	do_for_each_ftrace_op(op, ftrace_ops_list) {
		/* pass rec in as regs to have non-NULL val */
2048
		if (hash_contains_ip(ip, op->func_hash))
2049 2050 2051 2052 2053 2054
			return op;
	} while_for_each_ftrace_op(op);

	return NULL;
}

2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066
/**
 * ftrace_get_addr_new - Get the call address to set to
 * @rec:  The ftrace record descriptor
 *
 * If the record has the FTRACE_FL_REGS set, that means that it
 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
 * is not not set, then it wants to convert to the normal callback.
 *
 * Returns the address of the trampoline to set to
 */
unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
{
2067 2068 2069 2070 2071 2072
	struct ftrace_ops *ops;

	/* Trampolines take precedence over regs */
	if (rec->flags & FTRACE_FL_TRAMP) {
		ops = ftrace_find_tramp_ops_new(rec);
		if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2073 2074
			pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
				(void *)rec->ip, (void *)rec->ip, rec->flags);
2075 2076 2077 2078 2079 2080
			/* Ftrace is shutting down, return anything */
			return (unsigned long)FTRACE_ADDR;
		}
		return ops->trampoline;
	}

2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098
	if (rec->flags & FTRACE_FL_REGS)
		return (unsigned long)FTRACE_REGS_ADDR;
	else
		return (unsigned long)FTRACE_ADDR;
}

/**
 * ftrace_get_addr_curr - Get the call address that is already there
 * @rec:  The ftrace record descriptor
 *
 * The FTRACE_FL_REGS_EN is set when the record already points to
 * a function that saves all the regs. Basically the '_EN' version
 * represents the current state of the function.
 *
 * Returns the address of the trampoline that is currently being called
 */
unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
{
2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112
	struct ftrace_ops *ops;

	/* Trampolines take precedence over regs */
	if (rec->flags & FTRACE_FL_TRAMP_EN) {
		ops = ftrace_find_tramp_ops_curr(rec);
		if (FTRACE_WARN_ON(!ops)) {
			pr_warning("Bad trampoline accounting at: %p (%pS)\n",
				    (void *)rec->ip, (void *)rec->ip);
			/* Ftrace is shutting down, return anything */
			return (unsigned long)FTRACE_ADDR;
		}
		return ops->trampoline;
	}

2113 2114 2115 2116 2117 2118
	if (rec->flags & FTRACE_FL_REGS_EN)
		return (unsigned long)FTRACE_REGS_ADDR;
	else
		return (unsigned long)FTRACE_ADDR;
}

2119 2120 2121
static int
__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
{
2122
	unsigned long ftrace_old_addr;
2123 2124 2125
	unsigned long ftrace_addr;
	int ret;

2126
	ftrace_addr = ftrace_get_addr_new(rec);
2127

2128 2129 2130 2131
	/* This needs to be done before we call ftrace_update_record */
	ftrace_old_addr = ftrace_get_addr_curr(rec);

	ret = ftrace_update_record(rec, enable);
2132

2133 2134 2135 2136 2137
	switch (ret) {
	case FTRACE_UPDATE_IGNORE:
		return 0;

	case FTRACE_UPDATE_MAKE_CALL:
2138
		return ftrace_make_call(rec, ftrace_addr);
2139 2140

	case FTRACE_UPDATE_MAKE_NOP:
2141
		return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2142 2143 2144

	case FTRACE_UPDATE_MODIFY_CALL:
		return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2145 2146
	}

2147
	return -1; /* unknow ftrace bug */
2148 2149
}

2150
void __weak ftrace_replace_code(int enable)
2151 2152 2153
{
	struct dyn_ftrace *rec;
	struct ftrace_page *pg;
S
Steven Rostedt 已提交
2154
	int failed;
2155

2156 2157 2158
	if (unlikely(ftrace_disabled))
		return;

2159
	do_for_each_ftrace_rec(pg, rec) {
2160
		failed = __ftrace_replace_code(rec, enable);
2161
		if (failed) {
2162
			ftrace_bug(failed, rec);
2163 2164
			/* Stop processing */
			return;
2165
		}
2166
	} while_for_each_ftrace_rec();
2167 2168
}

2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240
struct ftrace_rec_iter {
	struct ftrace_page	*pg;
	int			index;
};

/**
 * ftrace_rec_iter_start, start up iterating over traced functions
 *
 * Returns an iterator handle that is used to iterate over all
 * the records that represent address locations where functions
 * are traced.
 *
 * May return NULL if no records are available.
 */
struct ftrace_rec_iter *ftrace_rec_iter_start(void)
{
	/*
	 * We only use a single iterator.
	 * Protected by the ftrace_lock mutex.
	 */
	static struct ftrace_rec_iter ftrace_rec_iter;
	struct ftrace_rec_iter *iter = &ftrace_rec_iter;

	iter->pg = ftrace_pages_start;
	iter->index = 0;

	/* Could have empty pages */
	while (iter->pg && !iter->pg->index)
		iter->pg = iter->pg->next;

	if (!iter->pg)
		return NULL;

	return iter;
}

/**
 * ftrace_rec_iter_next, get the next record to process.
 * @iter: The handle to the iterator.
 *
 * Returns the next iterator after the given iterator @iter.
 */
struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
{
	iter->index++;

	if (iter->index >= iter->pg->index) {
		iter->pg = iter->pg->next;
		iter->index = 0;

		/* Could have empty pages */
		while (iter->pg && !iter->pg->index)
			iter->pg = iter->pg->next;
	}

	if (!iter->pg)
		return NULL;

	return iter;
}

/**
 * ftrace_rec_iter_record, get the record at the iterator location
 * @iter: The current iterator location
 *
 * Returns the record that the current @iter is at.
 */
struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
{
	return &iter->pg->records[iter->index];
}

2241
static int
2242
ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
2243
{
2244
	int ret;
2245

2246 2247 2248
	if (unlikely(ftrace_disabled))
		return 0;

2249
	ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
2250
	if (ret) {
2251
		ftrace_bug(ret, rec);
2252
		return 0;
2253
	}
2254
	return 1;
2255 2256
}

2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274
/*
 * archs can override this function if they must do something
 * before the modifying code is performed.
 */
int __weak ftrace_arch_code_modify_prepare(void)
{
	return 0;
}

/*
 * archs can override this function if they must do something
 * after the modifying code is performed.
 */
int __weak ftrace_arch_code_modify_post_process(void)
{
	return 0;
}

2275
void ftrace_modify_all_code(int command)
2276
{
2277
	int update = command & FTRACE_UPDATE_TRACE_FUNC;
2278
	int err = 0;
2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289

	/*
	 * If the ftrace_caller calls a ftrace_ops func directly,
	 * we need to make sure that it only traces functions it
	 * expects to trace. When doing the switch of functions,
	 * we need to update to the ftrace_ops_list_func first
	 * before the transition between old and new calls are set,
	 * as the ftrace_ops_list_func will check the ops hashes
	 * to make sure the ops are having the right functions
	 * traced.
	 */
2290 2291 2292 2293 2294
	if (update) {
		err = ftrace_update_ftrace_func(ftrace_ops_list_func);
		if (FTRACE_WARN_ON(err))
			return;
	}
2295

2296
	if (command & FTRACE_UPDATE_CALLS)
2297
		ftrace_replace_code(1);
2298
	else if (command & FTRACE_DISABLE_CALLS)
2299 2300
		ftrace_replace_code(0);

2301 2302 2303 2304 2305 2306
	if (update && ftrace_trace_function != ftrace_ops_list_func) {
		function_trace_op = set_function_trace_op;
		smp_wmb();
		/* If irqs are disabled, we are in stop machine */
		if (!irqs_disabled())
			smp_call_function(ftrace_sync_ipi, NULL, 1);
2307 2308 2309
		err = ftrace_update_ftrace_func(ftrace_trace_function);
		if (FTRACE_WARN_ON(err))
			return;
2310
	}
2311

2312
	if (command & FTRACE_START_FUNC_RET)
2313
		err = ftrace_enable_ftrace_graph_caller();
2314
	else if (command & FTRACE_STOP_FUNC_RET)
2315 2316
		err = ftrace_disable_ftrace_graph_caller();
	FTRACE_WARN_ON(err);
2317 2318 2319 2320 2321 2322 2323
}

static int __ftrace_modify_code(void *data)
{
	int *command = data;

	ftrace_modify_all_code(*command);
2324

2325
	return 0;
2326 2327
}

2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351
/**
 * ftrace_run_stop_machine, go back to the stop machine method
 * @command: The command to tell ftrace what to do
 *
 * If an arch needs to fall back to the stop machine method, the
 * it can call this function.
 */
void ftrace_run_stop_machine(int command)
{
	stop_machine(__ftrace_modify_code, &command, NULL);
}

/**
 * arch_ftrace_update_code, modify the code to trace or not trace
 * @command: The command that needs to be done
 *
 * Archs can override this function if it does not need to
 * run stop_machine() to modify code.
 */
void __weak arch_ftrace_update_code(int command)
{
	ftrace_run_stop_machine(command);
}

I
Ingo Molnar 已提交
2352
static void ftrace_run_update_code(int command)
2353
{
2354 2355 2356 2357 2358 2359 2360
	int ret;

	ret = ftrace_arch_code_modify_prepare();
	FTRACE_WARN_ON(ret);
	if (ret)
		return;

2361 2362 2363 2364 2365 2366 2367 2368
	/*
	 * By default we use stop_machine() to modify the code.
	 * But archs can do what ever they want as long as it
	 * is safe. The stop_machine() is the safest, but also
	 * produces the most overhead.
	 */
	arch_ftrace_update_code(command);

2369 2370
	ret = ftrace_arch_code_modify_post_process();
	FTRACE_WARN_ON(ret);
2371 2372
}

2373 2374
static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
				   struct ftrace_hash *old_hash)
2375 2376
{
	ops->flags |= FTRACE_OPS_FL_MODIFYING;
2377
	ops->old_hash.filter_hash = old_hash;
2378
	ftrace_run_update_code(command);
2379
	ops->old_hash.filter_hash = NULL;
2380 2381 2382
	ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
}

2383
static ftrace_func_t saved_ftrace_func;
2384
static int ftrace_start_up;
2385

2386 2387 2388 2389
void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
{
}

2390 2391 2392 2393 2394
static void control_ops_free(struct ftrace_ops *ops)
{
	free_percpu(ops->disabled);
}

2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406
static void ftrace_startup_enable(int command)
{
	if (saved_ftrace_func != ftrace_trace_function) {
		saved_ftrace_func = ftrace_trace_function;
		command |= FTRACE_UPDATE_TRACE_FUNC;
	}

	if (!command || !ftrace_enabled)
		return;

	ftrace_run_update_code(command);
}
2407

2408 2409 2410 2411 2412 2413 2414
static void ftrace_startup_all(int command)
{
	update_all_ops = true;
	ftrace_startup_enable(command);
	update_all_ops = false;
}

2415
static int ftrace_startup(struct ftrace_ops *ops, int command)
2416
{
2417
	int ret;
2418

2419
	if (unlikely(ftrace_disabled))
2420
		return -ENODEV;
2421

2422 2423 2424 2425
	ret = __register_ftrace_function(ops);
	if (ret)
		return ret;

2426
	ftrace_start_up++;
2427
	command |= FTRACE_UPDATE_CALLS;
2428

2429 2430 2431 2432 2433 2434 2435 2436 2437
	/*
	 * Note that ftrace probes uses this to start up
	 * and modify functions it will probe. But we still
	 * set the ADDING flag for modification, as probes
	 * do not have trampolines. If they add them in the
	 * future, then the probes will need to distinguish
	 * between adding and updating probes.
	 */
	ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
2438 2439

	ftrace_hash_rec_enable(ops, 1);
2440

2441
	ftrace_startup_enable(command);
2442

2443 2444
	ops->flags &= ~FTRACE_OPS_FL_ADDING;

2445
	return 0;
2446 2447
}

2448
static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2449
{
2450
	int ret;
2451

2452
	if (unlikely(ftrace_disabled))
2453 2454 2455 2456 2457
		return -ENODEV;

	ret = __unregister_ftrace_function(ops);
	if (ret)
		return ret;
2458

2459
	ftrace_start_up--;
2460 2461 2462 2463 2464 2465 2466
	/*
	 * Just warn in case of unbalance, no need to kill ftrace, it's not
	 * critical but the ftrace_call callers may be never nopped again after
	 * further ftrace uses.
	 */
	WARN_ON_ONCE(ftrace_start_up < 0);

2467
	ftrace_hash_rec_disable(ops, 1);
2468

2469
	ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2470

2471
	command |= FTRACE_UPDATE_CALLS;
2472

2473 2474 2475 2476
	if (saved_ftrace_func != ftrace_trace_function) {
		saved_ftrace_func = ftrace_trace_function;
		command |= FTRACE_UPDATE_TRACE_FUNC;
	}
2477

2478 2479 2480 2481 2482 2483 2484 2485 2486
	if (!command || !ftrace_enabled) {
		/*
		 * If these are control ops, they still need their
		 * per_cpu field freed. Since, function tracing is
		 * not currently active, we can just free them
		 * without synchronizing all CPUs.
		 */
		if (ops->flags & FTRACE_OPS_FL_CONTROL)
			control_ops_free(ops);
2487
		return 0;
2488
	}
2489

2490 2491 2492 2493
	/*
	 * If the ops uses a trampoline, then it needs to be
	 * tested first on update.
	 */
2494
	ops->flags |= FTRACE_OPS_FL_REMOVING;
2495 2496
	removed_ops = ops;

2497 2498 2499 2500
	/* The trampoline logic checks the old hashes */
	ops->old_hash.filter_hash = ops->func_hash->filter_hash;
	ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;

2501
	ftrace_run_update_code(command);
2502

2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517
	/*
	 * If there's no more ops registered with ftrace, run a
	 * sanity check to make sure all rec flags are cleared.
	 */
	if (ftrace_ops_list == &ftrace_list_end) {
		struct ftrace_page *pg;
		struct dyn_ftrace *rec;

		do_for_each_ftrace_rec(pg, rec) {
			if (FTRACE_WARN_ON_ONCE(rec->flags))
				pr_warn("  %pS flags:%lx\n",
					(void *)rec->ip, rec->flags);
		} while_for_each_ftrace_rec();
	}

2518 2519 2520 2521
	ops->old_hash.filter_hash = NULL;
	ops->old_hash.notrace_hash = NULL;

	removed_ops = NULL;
2522
	ops->flags &= ~FTRACE_OPS_FL_REMOVING;
2523

2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540
	/*
	 * Dynamic ops may be freed, we must make sure that all
	 * callers are done before leaving this function.
	 * The same goes for freeing the per_cpu data of the control
	 * ops.
	 *
	 * Again, normal synchronize_sched() is not good enough.
	 * We need to do a hard force of sched synchronization.
	 * This is because we use preempt_disable() to do RCU, but
	 * the function tracers can be called where RCU is not watching
	 * (like before user_exit()). We can not rely on the RCU
	 * infrastructure to do the synchronization, thus we must do it
	 * ourselves.
	 */
	if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
		schedule_on_each_cpu(ftrace_sync);

2541 2542
		arch_ftrace_trampoline_free(ops);

2543 2544 2545 2546
		if (ops->flags & FTRACE_OPS_FL_CONTROL)
			control_ops_free(ops);
	}

2547
	return 0;
2548 2549
}

I
Ingo Molnar 已提交
2550
static void ftrace_startup_sysctl(void)
2551
{
2552 2553 2554
	if (unlikely(ftrace_disabled))
		return;

2555 2556
	/* Force update next time */
	saved_ftrace_func = NULL;
2557 2558
	/* ftrace_start_up is true if we want ftrace running */
	if (ftrace_start_up)
2559
		ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2560 2561
}

I
Ingo Molnar 已提交
2562
static void ftrace_shutdown_sysctl(void)
2563
{
2564 2565 2566
	if (unlikely(ftrace_disabled))
		return;

2567 2568
	/* ftrace_start_up is true if ftrace is running */
	if (ftrace_start_up)
2569
		ftrace_run_update_code(FTRACE_DISABLE_CALLS);
2570 2571
}

2572 2573 2574
static cycle_t		ftrace_update_time;
unsigned long		ftrace_update_tot_cnt;

2575
static inline int ops_traces_mod(struct ftrace_ops *ops)
2576
{
2577 2578 2579 2580
	/*
	 * Filter_hash being empty will default to trace module.
	 * But notrace hash requires a test of individual module functions.
	 */
2581 2582
	return ftrace_hash_empty(ops->func_hash->filter_hash) &&
		ftrace_hash_empty(ops->func_hash->notrace_hash);
2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603
}

/*
 * Check if the current ops references the record.
 *
 * If the ops traces all functions, then it was already accounted for.
 * If the ops does not trace the current record function, skip it.
 * If the ops ignores the function via notrace filter, skip it.
 */
static inline bool
ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
{
	/* If ops isn't enabled, ignore it */
	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
		return 0;

	/* If ops traces all mods, we already accounted for it */
	if (ops_traces_mod(ops))
		return 0;

	/* The function must be in the filter */
2604 2605
	if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
	    !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
2606
		return 0;
2607

2608
	/* If in notrace hash, we ignore it too */
2609
	if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625
		return 0;

	return 1;
}

static int referenced_filters(struct dyn_ftrace *rec)
{
	struct ftrace_ops *ops;
	int cnt = 0;

	for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
		if (ops_references_rec(ops, rec))
		    cnt++;
	}

	return cnt;
2626 2627
}

2628
static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
2629
{
2630
	struct ftrace_page *pg;
2631
	struct dyn_ftrace *p;
2632
	cycle_t start, stop;
2633
	unsigned long update_cnt = 0;
2634
	unsigned long ref = 0;
2635
	bool test = false;
2636
	int i;
2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648

	/*
	 * When adding a module, we need to check if tracers are
	 * currently enabled and if they are set to trace all functions.
	 * If they are, we need to enable the module functions as well
	 * as update the reference counts for those function records.
	 */
	if (mod) {
		struct ftrace_ops *ops;

		for (ops = ftrace_ops_list;
		     ops != &ftrace_list_end; ops = ops->next) {
2649 2650 2651 2652 2653 2654
			if (ops->flags & FTRACE_OPS_FL_ENABLED) {
				if (ops_traces_mod(ops))
					ref++;
				else
					test = true;
			}
2655 2656
		}
	}
2657

I
Ingo Molnar 已提交
2658
	start = ftrace_now(raw_smp_processor_id());
2659

2660
	for (pg = new_pgs; pg; pg = pg->next) {
2661

2662
		for (i = 0; i < pg->index; i++) {
2663 2664
			int cnt = ref;

2665 2666 2667
			/* If something went wrong, bail without enabling anything */
			if (unlikely(ftrace_disabled))
				return -1;
2668

2669
			p = &pg->records[i];
2670 2671 2672
			if (test)
				cnt += referenced_filters(p);
			p->flags = cnt;
2673

2674 2675 2676 2677 2678 2679
			/*
			 * Do the initial record conversion from mcount jump
			 * to the NOP instructions.
			 */
			if (!ftrace_code_disable(mod, p))
				break;
2680

2681
			update_cnt++;
2682

2683 2684 2685 2686 2687 2688 2689 2690 2691
			/*
			 * If the tracing is enabled, go ahead and enable the record.
			 *
			 * The reason not to enable the record immediatelly is the
			 * inherent check of ftrace_make_nop/ftrace_make_call for
			 * correct previous instructions.  Making first the NOP
			 * conversion puts the module to the correct state, thus
			 * passing the ftrace_make_call check.
			 */
2692
			if (ftrace_start_up && cnt) {
2693 2694
				int failed = __ftrace_replace_code(p, 1);
				if (failed)
2695
					ftrace_bug(failed, p);
2696
			}
2697
		}
2698 2699
	}

I
Ingo Molnar 已提交
2700
	stop = ftrace_now(raw_smp_processor_id());
2701
	ftrace_update_time = stop - start;
2702
	ftrace_update_tot_cnt += update_cnt;
2703

2704 2705 2706
	return 0;
}

2707
static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2708
{
2709
	int order;
2710 2711
	int cnt;

2712 2713 2714 2715
	if (WARN_ON(!count))
		return -EINVAL;

	order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2716 2717

	/*
2718 2719
	 * We want to fill as much as possible. No more than a page
	 * may be empty.
2720
	 */
2721 2722
	while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
		order--;
2723

2724 2725
 again:
	pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2726

2727 2728 2729 2730 2731 2732 2733
	if (!pg->records) {
		/* if we can't allocate this size, try something smaller */
		if (!order)
			return -ENOMEM;
		order >>= 1;
		goto again;
	}
2734

2735 2736
	cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
	pg->size = cnt;
2737

2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770
	if (cnt > count)
		cnt = count;

	return cnt;
}

static struct ftrace_page *
ftrace_allocate_pages(unsigned long num_to_init)
{
	struct ftrace_page *start_pg;
	struct ftrace_page *pg;
	int order;
	int cnt;

	if (!num_to_init)
		return 0;

	start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
	if (!pg)
		return NULL;

	/*
	 * Try to allocate as much as possible in one continues
	 * location that fills in all of the space. We want to
	 * waste as little space as possible.
	 */
	for (;;) {
		cnt = ftrace_allocate_records(pg, num_to_init);
		if (cnt < 0)
			goto free_pages;

		num_to_init -= cnt;
		if (!num_to_init)
2771 2772
			break;

2773 2774 2775 2776
		pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
		if (!pg->next)
			goto free_pages;

2777 2778 2779
		pg = pg->next;
	}

2780 2781 2782
	return start_pg;

 free_pages:
2783 2784
	pg = start_pg;
	while (pg) {
2785 2786 2787 2788 2789 2790 2791 2792 2793 2794
		order = get_count_order(pg->size / ENTRIES_PER_PAGE);
		free_pages((unsigned long)pg->records, order);
		start_pg = pg->next;
		kfree(pg);
		pg = start_pg;
	}
	pr_info("ftrace: FAILED to allocate memory for functions\n");
	return NULL;
}

2795 2796 2797
#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */

struct ftrace_iterator {
2798
	loff_t				pos;
2799 2800 2801 2802 2803
	loff_t				func_pos;
	struct ftrace_page		*pg;
	struct dyn_ftrace		*func;
	struct ftrace_func_probe	*probe;
	struct trace_parser		parser;
2804
	struct ftrace_hash		*hash;
2805
	struct ftrace_ops		*ops;
2806 2807 2808
	int				hidx;
	int				idx;
	unsigned			flags;
2809 2810
};

2811
static void *
2812
t_hash_next(struct seq_file *m, loff_t *pos)
2813 2814
{
	struct ftrace_iterator *iter = m->private;
2815
	struct hlist_node *hnd = NULL;
2816 2817 2818
	struct hlist_head *hhd;

	(*pos)++;
2819
	iter->pos = *pos;
2820

2821 2822
	if (iter->probe)
		hnd = &iter->probe->node;
2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844
 retry:
	if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
		return NULL;

	hhd = &ftrace_func_hash[iter->hidx];

	if (hlist_empty(hhd)) {
		iter->hidx++;
		hnd = NULL;
		goto retry;
	}

	if (!hnd)
		hnd = hhd->first;
	else {
		hnd = hnd->next;
		if (!hnd) {
			iter->hidx++;
			goto retry;
		}
	}

2845 2846 2847 2848 2849 2850
	if (WARN_ON_ONCE(!hnd))
		return NULL;

	iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);

	return iter;
2851 2852 2853 2854 2855 2856
}

static void *t_hash_start(struct seq_file *m, loff_t *pos)
{
	struct ftrace_iterator *iter = m->private;
	void *p = NULL;
L
Li Zefan 已提交
2857 2858
	loff_t l;

2859 2860 2861
	if (!(iter->flags & FTRACE_ITER_DO_HASH))
		return NULL;

2862 2863
	if (iter->func_pos > *pos)
		return NULL;
2864

L
Li Zefan 已提交
2865
	iter->hidx = 0;
2866
	for (l = 0; l <= (*pos - iter->func_pos); ) {
2867
		p = t_hash_next(m, &l);
L
Li Zefan 已提交
2868 2869 2870
		if (!p)
			break;
	}
2871 2872 2873
	if (!p)
		return NULL;

2874 2875 2876
	/* Only set this if we have an item */
	iter->flags |= FTRACE_ITER_HASH;

2877
	return iter;
2878 2879
}

2880 2881
static int
t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2882
{
S
Steven Rostedt 已提交
2883
	struct ftrace_func_probe *rec;
2884

2885 2886 2887
	rec = iter->probe;
	if (WARN_ON_ONCE(!rec))
		return -EIO;
2888

2889 2890 2891
	if (rec->ops->print)
		return rec->ops->print(m, rec->ip, rec->ops, rec->data);

2892
	seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2893 2894 2895 2896 2897 2898 2899 2900

	if (rec->data)
		seq_printf(m, ":%p", rec->data);
	seq_putc(m, '\n');

	return 0;
}

I
Ingo Molnar 已提交
2901
static void *
2902 2903 2904
t_next(struct seq_file *m, void *v, loff_t *pos)
{
	struct ftrace_iterator *iter = m->private;
2905
	struct ftrace_ops *ops = iter->ops;
2906 2907
	struct dyn_ftrace *rec = NULL;

2908 2909 2910
	if (unlikely(ftrace_disabled))
		return NULL;

2911
	if (iter->flags & FTRACE_ITER_HASH)
2912
		return t_hash_next(m, pos);
2913

2914
	(*pos)++;
2915
	iter->pos = iter->func_pos = *pos;
2916

2917
	if (iter->flags & FTRACE_ITER_PRINTALL)
2918
		return t_hash_start(m, pos);
2919

2920 2921 2922 2923 2924 2925 2926 2927 2928
 retry:
	if (iter->idx >= iter->pg->index) {
		if (iter->pg->next) {
			iter->pg = iter->pg->next;
			iter->idx = 0;
			goto retry;
		}
	} else {
		rec = &iter->pg->records[iter->idx++];
2929
		if (((iter->flags & FTRACE_ITER_FILTER) &&
2930
		     !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) ||
S
Steven Rostedt 已提交
2931

2932
		    ((iter->flags & FTRACE_ITER_NOTRACE) &&
2933
		     !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) ||
2934 2935

		    ((iter->flags & FTRACE_ITER_ENABLED) &&
2936
		     !(rec->flags & FTRACE_FL_ENABLED))) {
2937

2938 2939 2940 2941 2942
			rec = NULL;
			goto retry;
		}
	}

2943
	if (!rec)
2944
		return t_hash_start(m, pos);
2945 2946 2947 2948

	iter->func = rec;

	return iter;
2949 2950
}

2951 2952 2953 2954
static void reset_iter_read(struct ftrace_iterator *iter)
{
	iter->pos = 0;
	iter->func_pos = 0;
2955
	iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
2956 2957 2958 2959 2960
}

static void *t_start(struct seq_file *m, loff_t *pos)
{
	struct ftrace_iterator *iter = m->private;
2961
	struct ftrace_ops *ops = iter->ops;
2962
	void *p = NULL;
2963
	loff_t l;
2964

2965
	mutex_lock(&ftrace_lock);
2966 2967 2968 2969

	if (unlikely(ftrace_disabled))
		return NULL;

2970 2971 2972 2973 2974 2975
	/*
	 * If an lseek was done, then reset and start from beginning.
	 */
	if (*pos < iter->pos)
		reset_iter_read(iter);

2976 2977 2978 2979 2980
	/*
	 * For set_ftrace_filter reading, if we have the filter
	 * off, we can short cut and just print out that all
	 * functions are enabled.
	 */
2981
	if ((iter->flags & FTRACE_ITER_FILTER &&
2982
	     ftrace_hash_empty(ops->func_hash->filter_hash)) ||
2983
	    (iter->flags & FTRACE_ITER_NOTRACE &&
2984
	     ftrace_hash_empty(ops->func_hash->notrace_hash))) {
2985
		if (*pos > 0)
2986
			return t_hash_start(m, pos);
2987
		iter->flags |= FTRACE_ITER_PRINTALL;
2988 2989
		/* reset in case of seek/pread */
		iter->flags &= ~FTRACE_ITER_HASH;
2990 2991 2992
		return iter;
	}

2993 2994 2995
	if (iter->flags & FTRACE_ITER_HASH)
		return t_hash_start(m, pos);

2996 2997 2998 2999 3000
	/*
	 * Unfortunately, we need to restart at ftrace_pages_start
	 * every time we let go of the ftrace_mutex. This is because
	 * those pointers can change without the lock.
	 */
3001 3002 3003 3004 3005 3006
	iter->pg = ftrace_pages_start;
	iter->idx = 0;
	for (l = 0; l <= *pos; ) {
		p = t_next(m, p, &l);
		if (!p)
			break;
3007
	}
3008

3009 3010
	if (!p)
		return t_hash_start(m, pos);
3011 3012

	return iter;
3013 3014 3015 3016
}

static void t_stop(struct seq_file *m, void *p)
{
3017
	mutex_unlock(&ftrace_lock);
3018 3019
}

3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035
void * __weak
arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
{
	return NULL;
}

static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
				struct dyn_ftrace *rec)
{
	void *ptr;

	ptr = arch_ftrace_trampoline_func(ops, rec);
	if (ptr)
		seq_printf(m, " ->%pS", ptr);
}

3036 3037
static int t_show(struct seq_file *m, void *v)
{
3038
	struct ftrace_iterator *iter = m->private;
3039
	struct dyn_ftrace *rec;
3040

3041
	if (iter->flags & FTRACE_ITER_HASH)
3042
		return t_hash_show(m, iter);
3043

3044
	if (iter->flags & FTRACE_ITER_PRINTALL) {
3045
		if (iter->flags & FTRACE_ITER_NOTRACE)
3046
			seq_puts(m, "#### no functions disabled ####\n");
3047
		else
3048
			seq_puts(m, "#### all functions enabled ####\n");
3049 3050 3051
		return 0;
	}

3052 3053
	rec = iter->func;

3054 3055 3056
	if (!rec)
		return 0;

3057
	seq_printf(m, "%ps", (void *)rec->ip);
3058
	if (iter->flags & FTRACE_ITER_ENABLED) {
3059 3060
		struct ftrace_ops *ops = NULL;

3061
		seq_printf(m, " (%ld)%s",
3062
			   ftrace_rec_count(rec),
3063 3064
			   rec->flags & FTRACE_FL_REGS ? " R" : "  ");
		if (rec->flags & FTRACE_FL_TRAMP_EN) {
3065
			ops = ftrace_find_tramp_ops_any(rec);
3066
			if (ops)
3067 3068 3069
				seq_printf(m, "\ttramp: %pS",
					   (void *)ops->trampoline);
			else
3070
				seq_puts(m, "\ttramp: ERROR!");
3071

3072
		}
3073
		add_trampoline_func(m, ops, rec);
3074 3075
	}	

3076
	seq_putc(m, '\n');
3077 3078 3079 3080

	return 0;
}

J
James Morris 已提交
3081
static const struct seq_operations show_ftrace_seq_ops = {
3082 3083 3084 3085 3086 3087
	.start = t_start,
	.next = t_next,
	.stop = t_stop,
	.show = t_show,
};

I
Ingo Molnar 已提交
3088
static int
3089 3090 3091 3092
ftrace_avail_open(struct inode *inode, struct file *file)
{
	struct ftrace_iterator *iter;

3093 3094 3095
	if (unlikely(ftrace_disabled))
		return -ENODEV;

3096 3097 3098 3099
	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
	if (iter) {
		iter->pg = ftrace_pages_start;
		iter->ops = &global_ops;
I
Ingo Molnar 已提交
3100
	}
3101

3102
	return iter ? 0 : -ENOMEM;
3103 3104
}

3105 3106 3107 3108 3109
static int
ftrace_enabled_open(struct inode *inode, struct file *file)
{
	struct ftrace_iterator *iter;

3110 3111 3112 3113 3114
	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
	if (iter) {
		iter->pg = ftrace_pages_start;
		iter->flags = FTRACE_ITER_ENABLED;
		iter->ops = &global_ops;
3115 3116
	}

3117
	return iter ? 0 : -ENOMEM;
3118 3119
}

3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132
/**
 * ftrace_regex_open - initialize function tracer filter files
 * @ops: The ftrace_ops that hold the hash filters
 * @flag: The type of filter to process
 * @inode: The inode, usually passed in to your open routine
 * @file: The file, usually passed in to your open routine
 *
 * ftrace_regex_open() initializes the filter files for the
 * @ops. Depending on @flag it may process the filter hash or
 * the notrace hash of @ops. With this called from the open
 * routine, you can use ftrace_filter_write() for the write
 * routine if @flag has FTRACE_ITER_FILTER set, or
 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
3133
 * tracing_lseek() should be used as the lseek routine, and
3134 3135 3136
 * release must call ftrace_regex_release().
 */
int
3137
ftrace_regex_open(struct ftrace_ops *ops, int flag,
3138
		  struct inode *inode, struct file *file)
3139 3140
{
	struct ftrace_iterator *iter;
3141
	struct ftrace_hash *hash;
3142 3143
	int ret = 0;

3144 3145
	ftrace_ops_init(ops);

3146 3147 3148
	if (unlikely(ftrace_disabled))
		return -ENODEV;

3149 3150 3151 3152
	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
	if (!iter)
		return -ENOMEM;

3153 3154 3155 3156 3157
	if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
		kfree(iter);
		return -ENOMEM;
	}

3158 3159 3160
	iter->ops = ops;
	iter->flags = flag;

3161
	mutex_lock(&ops->func_hash->regex_lock);
3162

3163
	if (flag & FTRACE_ITER_NOTRACE)
3164
		hash = ops->func_hash->notrace_hash;
3165
	else
3166
		hash = ops->func_hash->filter_hash;
3167

3168
	if (file->f_mode & FMODE_WRITE) {
3169 3170 3171 3172 3173 3174 3175
		const int size_bits = FTRACE_HASH_DEFAULT_BITS;

		if (file->f_flags & O_TRUNC)
			iter->hash = alloc_ftrace_hash(size_bits);
		else
			iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);

3176 3177 3178
		if (!iter->hash) {
			trace_parser_put(&iter->parser);
			kfree(iter);
3179 3180
			ret = -ENOMEM;
			goto out_unlock;
3181 3182
		}
	}
3183

3184 3185 3186 3187 3188 3189 3190
	if (file->f_mode & FMODE_READ) {
		iter->pg = ftrace_pages_start;

		ret = seq_open(file, &show_ftrace_seq_ops);
		if (!ret) {
			struct seq_file *m = file->private_data;
			m->private = iter;
3191
		} else {
3192 3193
			/* Failed */
			free_ftrace_hash(iter->hash);
3194
			trace_parser_put(&iter->parser);
3195
			kfree(iter);
3196
		}
3197 3198
	} else
		file->private_data = iter;
3199 3200

 out_unlock:
3201
	mutex_unlock(&ops->func_hash->regex_lock);
3202 3203 3204 3205

	return ret;
}

3206 3207 3208
static int
ftrace_filter_open(struct inode *inode, struct file *file)
{
3209 3210 3211
	struct ftrace_ops *ops = inode->i_private;

	return ftrace_regex_open(ops,
3212 3213
			FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
			inode, file);
3214 3215 3216 3217 3218
}

static int
ftrace_notrace_open(struct inode *inode, struct file *file)
{
3219 3220 3221
	struct ftrace_ops *ops = inode->i_private;

	return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
3222
				 inode, file);
3223 3224
}

3225
static int ftrace_match(char *str, char *regex, int len, int type)
3226 3227
{
	int matched = 0;
3228
	int slen;
3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243

	switch (type) {
	case MATCH_FULL:
		if (strcmp(str, regex) == 0)
			matched = 1;
		break;
	case MATCH_FRONT_ONLY:
		if (strncmp(str, regex, len) == 0)
			matched = 1;
		break;
	case MATCH_MIDDLE_ONLY:
		if (strstr(str, regex))
			matched = 1;
		break;
	case MATCH_END_ONLY:
3244 3245
		slen = strlen(str);
		if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
3246 3247 3248 3249 3250 3251 3252
			matched = 1;
		break;
	}

	return matched;
}

3253
static int
3254
enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
3255
{
3256 3257 3258
	struct ftrace_func_entry *entry;
	int ret = 0;

3259 3260 3261 3262 3263
	entry = ftrace_lookup_ip(hash, rec->ip);
	if (not) {
		/* Do nothing if it doesn't exist */
		if (!entry)
			return 0;
3264

3265
		free_hash_entry(hash, entry);
3266 3267 3268 3269
	} else {
		/* Do nothing if it exists */
		if (entry)
			return 0;
3270

3271
		ret = add_hash_entry(hash, rec->ip);
3272 3273
	}
	return ret;
3274 3275
}

3276
static int
3277 3278
ftrace_match_record(struct dyn_ftrace *rec, char *mod,
		    char *regex, int len, int type)
3279 3280
{
	char str[KSYM_SYMBOL_LEN];
3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293
	char *modname;

	kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);

	if (mod) {
		/* module lookup requires matching the module */
		if (!modname || strcmp(modname, mod))
			return 0;

		/* blank search means to match all funcs in the mod */
		if (!len)
			return 1;
	}
3294 3295 3296 3297

	return ftrace_match(str, regex, len, type);
}

3298 3299 3300
static int
match_records(struct ftrace_hash *hash, char *buff,
	      int len, char *mod, int not)
3301
{
3302
	unsigned search_len = 0;
3303 3304
	struct ftrace_page *pg;
	struct dyn_ftrace *rec;
3305 3306
	int type = MATCH_FULL;
	char *search = buff;
3307
	int found = 0;
3308
	int ret;
3309

3310 3311 3312 3313
	if (len) {
		type = filter_parse_regex(buff, len, &search, &not);
		search_len = strlen(search);
	}
3314

3315
	mutex_lock(&ftrace_lock);
3316

3317 3318
	if (unlikely(ftrace_disabled))
		goto out_unlock;
3319

3320
	do_for_each_ftrace_rec(pg, rec) {
3321
		if (ftrace_match_record(rec, mod, search, search_len, type)) {
3322
			ret = enter_record(hash, rec, not);
3323 3324 3325 3326
			if (ret < 0) {
				found = ret;
				goto out_unlock;
			}
3327
			found = 1;
3328 3329
		}
	} while_for_each_ftrace_rec();
3330
 out_unlock:
3331
	mutex_unlock(&ftrace_lock);
3332 3333

	return found;
3334 3335
}

3336
static int
3337
ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
3338
{
3339
	return match_records(hash, buff, len, NULL, 0);
3340 3341
}

3342 3343
static int
ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
3344 3345
{
	int not = 0;
S
Steven Rostedt 已提交
3346

3347 3348 3349 3350 3351 3352 3353 3354 3355 3356
	/* blank or '*' mean the same */
	if (strcmp(buff, "*") == 0)
		buff[0] = 0;

	/* handle the case of 'dont filter this module' */
	if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
		buff[0] = 0;
		not = 1;
	}

3357
	return match_records(hash, buff, strlen(buff), mod, not);
3358 3359
}

3360 3361 3362 3363 3364 3365
/*
 * We register the module command as a template to show others how
 * to register the a command as well.
 */

static int
3366 3367
ftrace_mod_callback(struct ftrace_hash *hash,
		    char *func, char *cmd, char *param, int enable)
3368 3369
{
	char *mod;
3370
	int ret = -EINVAL;
3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381

	/*
	 * cmd == 'mod' because we only registered this func
	 * for the 'mod' ftrace_func_command.
	 * But if you register one func with multiple commands,
	 * you can tell which command was used by the cmd
	 * parameter.
	 */

	/* we must have a module name */
	if (!param)
3382
		return ret;
3383 3384 3385

	mod = strsep(&param, ":");
	if (!strlen(mod))
3386
		return ret;
3387

3388
	ret = ftrace_match_module_records(hash, func, mod);
3389 3390 3391 3392 3393 3394
	if (!ret)
		ret = -EINVAL;
	if (ret < 0)
		return ret;

	return 0;
3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405
}

static struct ftrace_func_command ftrace_mod_cmd = {
	.name			= "mod",
	.func			= ftrace_mod_callback,
};

static int __init ftrace_mod_cmd_init(void)
{
	return register_ftrace_command(&ftrace_mod_cmd);
}
3406
core_initcall(ftrace_mod_cmd_init);
3407

3408
static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
3409
				      struct ftrace_ops *op, struct pt_regs *pt_regs)
3410
{
S
Steven Rostedt 已提交
3411
	struct ftrace_func_probe *entry;
3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426
	struct hlist_head *hhd;
	unsigned long key;

	key = hash_long(ip, FTRACE_HASH_BITS);

	hhd = &ftrace_func_hash[key];

	if (hlist_empty(hhd))
		return;

	/*
	 * Disable preemption for these calls to prevent a RCU grace
	 * period. This syncs the hash iteration and freeing of items
	 * on the hash. rcu_read_lock is too dangerous here.
	 */
3427
	preempt_disable_notrace();
3428
	hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
3429 3430 3431
		if (entry->ip == ip)
			entry->ops->func(ip, parent_ip, &entry->data);
	}
3432
	preempt_enable_notrace();
3433 3434
}

S
Steven Rostedt 已提交
3435
static struct ftrace_ops trace_probe_ops __read_mostly =
3436
{
3437
	.func		= function_trace_probe_call,
3438
	.flags		= FTRACE_OPS_FL_INITIALIZED,
3439
	INIT_OPS_HASH(trace_probe_ops)
3440 3441
};

S
Steven Rostedt 已提交
3442
static int ftrace_probe_registered;
3443

3444
static void __enable_ftrace_function_probe(struct ftrace_hash *old_hash)
3445
{
3446
	int ret;
3447 3448
	int i;

3449 3450 3451
	if (ftrace_probe_registered) {
		/* still need to update the function call sites */
		if (ftrace_enabled)
3452 3453
			ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
					       old_hash);
3454
		return;
3455
	}
3456 3457 3458 3459 3460 3461 3462 3463 3464 3465

	for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
		struct hlist_head *hhd = &ftrace_func_hash[i];
		if (hhd->first)
			break;
	}
	/* Nothing registered? */
	if (i == FTRACE_FUNC_HASHSIZE)
		return;

3466
	ret = ftrace_startup(&trace_probe_ops, 0);
3467

S
Steven Rostedt 已提交
3468
	ftrace_probe_registered = 1;
3469 3470
}

S
Steven Rostedt 已提交
3471
static void __disable_ftrace_function_probe(void)
3472 3473 3474
{
	int i;

S
Steven Rostedt 已提交
3475
	if (!ftrace_probe_registered)
3476 3477 3478 3479 3480 3481 3482 3483 3484
		return;

	for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
		struct hlist_head *hhd = &ftrace_func_hash[i];
		if (hhd->first)
			return;
	}

	/* no more funcs left */
3485
	ftrace_shutdown(&trace_probe_ops, 0);
3486

S
Steven Rostedt 已提交
3487
	ftrace_probe_registered = 0;
3488 3489 3490
}


3491
static void ftrace_free_entry(struct ftrace_func_probe *entry)
3492 3493
{
	if (entry->ops->free)
3494
		entry->ops->free(entry->ops, entry->ip, &entry->data);
3495 3496 3497 3498
	kfree(entry);
}

int
S
Steven Rostedt 已提交
3499
register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3500 3501
			      void *data)
{
S
Steven Rostedt 已提交
3502
	struct ftrace_func_probe *entry;
3503
	struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3504
	struct ftrace_hash *old_hash = *orig_hash;
3505
	struct ftrace_hash *hash;
3506 3507 3508
	struct ftrace_page *pg;
	struct dyn_ftrace *rec;
	int type, len, not;
S
Steven Rostedt 已提交
3509
	unsigned long key;
3510 3511
	int count = 0;
	char *search;
3512
	int ret;
3513

3514
	type = filter_parse_regex(glob, strlen(glob), &search, &not);
3515 3516
	len = strlen(search);

S
Steven Rostedt 已提交
3517
	/* we do not support '!' for function probes */
3518 3519 3520
	if (WARN_ON(not))
		return -EINVAL;

3521
	mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3522

3523
	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
3524 3525
	if (!hash) {
		count = -ENOMEM;
3526
		goto out;
3527 3528 3529 3530
	}

	if (unlikely(ftrace_disabled)) {
		count = -ENODEV;
3531
		goto out;
3532
	}
3533

3534 3535
	mutex_lock(&ftrace_lock);

3536
	do_for_each_ftrace_rec(pg, rec) {
3537

3538
		if (!ftrace_match_record(rec, NULL, search, len, type))
3539 3540 3541 3542
			continue;

		entry = kmalloc(sizeof(*entry), GFP_KERNEL);
		if (!entry) {
S
Steven Rostedt 已提交
3543
			/* If we did not process any, then return error */
3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557
			if (!count)
				count = -ENOMEM;
			goto out_unlock;
		}

		count++;

		entry->data = data;

		/*
		 * The caller might want to do something special
		 * for each function we find. We call the callback
		 * to give the caller an opportunity to do so.
		 */
3558 3559
		if (ops->init) {
			if (ops->init(ops, rec->ip, &entry->data) < 0) {
3560 3561 3562 3563 3564 3565
				/* caller does not like this func */
				kfree(entry);
				continue;
			}
		}

3566 3567 3568 3569 3570 3571 3572
		ret = enter_record(hash, rec, 0);
		if (ret < 0) {
			kfree(entry);
			count = ret;
			goto out_unlock;
		}

3573 3574 3575 3576 3577 3578 3579
		entry->ops = ops;
		entry->ip = rec->ip;

		key = hash_long(entry->ip, FTRACE_HASH_BITS);
		hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);

	} while_for_each_ftrace_rec();
3580 3581

	ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3582 3583 3584

	__enable_ftrace_function_probe(old_hash);

3585 3586 3587
	if (!ret)
		free_ftrace_hash_rcu(old_hash);
	else
3588 3589
		count = ret;

3590
 out_unlock:
3591 3592
	mutex_unlock(&ftrace_lock);
 out:
3593
	mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
3594
	free_ftrace_hash(hash);
3595 3596 3597 3598 3599

	return count;
}

enum {
S
Steven Rostedt 已提交
3600 3601
	PROBE_TEST_FUNC		= 1,
	PROBE_TEST_DATA		= 2
3602 3603 3604
};

static void
S
Steven Rostedt 已提交
3605
__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3606 3607
				  void *data, int flags)
{
3608
	struct ftrace_func_entry *rec_entry;
S
Steven Rostedt 已提交
3609
	struct ftrace_func_probe *entry;
3610
	struct ftrace_func_probe *p;
3611
	struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3612
	struct ftrace_hash *old_hash = *orig_hash;
3613
	struct list_head free_list;
3614
	struct ftrace_hash *hash;
3615
	struct hlist_node *tmp;
3616 3617 3618 3619
	char str[KSYM_SYMBOL_LEN];
	int type = MATCH_FULL;
	int i, len = 0;
	char *search;
3620
	int ret;
3621

3622
	if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3623
		glob = NULL;
3624
	else if (glob) {
3625 3626
		int not;

3627
		type = filter_parse_regex(glob, strlen(glob), &search, &not);
3628 3629
		len = strlen(search);

S
Steven Rostedt 已提交
3630
		/* we do not support '!' for function probes */
3631 3632 3633 3634
		if (WARN_ON(not))
			return;
	}

3635
	mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3636 3637 3638 3639 3640 3641

	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
	if (!hash)
		/* Hmm, should report this somehow */
		goto out_unlock;

3642 3643
	INIT_LIST_HEAD(&free_list);

3644 3645 3646
	for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
		struct hlist_head *hhd = &ftrace_func_hash[i];

3647
		hlist_for_each_entry_safe(entry, tmp, hhd, node) {
3648 3649

			/* break up if statements for readability */
S
Steven Rostedt 已提交
3650
			if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3651 3652
				continue;

S
Steven Rostedt 已提交
3653
			if ((flags & PROBE_TEST_DATA) && entry->data != data)
3654 3655 3656 3657 3658 3659 3660 3661 3662 3663
				continue;

			/* do this last, since it is the most expensive */
			if (glob) {
				kallsyms_lookup(entry->ip, NULL, NULL,
						NULL, str);
				if (!ftrace_match(str, glob, len, type))
					continue;
			}

3664 3665 3666 3667 3668
			rec_entry = ftrace_lookup_ip(hash, entry->ip);
			/* It is possible more than one entry had this ip */
			if (rec_entry)
				free_hash_entry(hash, rec_entry);

3669
			hlist_del_rcu(&entry->node);
3670
			list_add(&entry->free_list, &free_list);
3671 3672
		}
	}
3673
	mutex_lock(&ftrace_lock);
S
Steven Rostedt 已提交
3674
	__disable_ftrace_function_probe();
3675 3676 3677 3678
	/*
	 * Remove after the disable is called. Otherwise, if the last
	 * probe is removed, a null hash means *all enabled*.
	 */
3679
	ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3680
	synchronize_sched();
3681 3682 3683
	if (!ret)
		free_ftrace_hash_rcu(old_hash);

3684 3685 3686 3687
	list_for_each_entry_safe(entry, p, &free_list, free_list) {
		list_del(&entry->free_list);
		ftrace_free_entry(entry);
	}
3688
	mutex_unlock(&ftrace_lock);
3689
		
3690
 out_unlock:
3691
	mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
3692
	free_ftrace_hash(hash);
3693 3694 3695
}

void
S
Steven Rostedt 已提交
3696
unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3697 3698
				void *data)
{
S
Steven Rostedt 已提交
3699 3700
	__unregister_ftrace_function_probe(glob, ops, data,
					  PROBE_TEST_FUNC | PROBE_TEST_DATA);
3701 3702 3703
}

void
S
Steven Rostedt 已提交
3704
unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
3705
{
S
Steven Rostedt 已提交
3706
	__unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
3707 3708
}

S
Steven Rostedt 已提交
3709
void unregister_ftrace_function_probe_all(char *glob)
3710
{
S
Steven Rostedt 已提交
3711
	__unregister_ftrace_function_probe(glob, NULL, NULL, 0);
3712 3713
}

3714 3715 3716
static LIST_HEAD(ftrace_commands);
static DEFINE_MUTEX(ftrace_cmd_mutex);

3717 3718 3719 3720 3721
/*
 * Currently we only register ftrace commands from __init, so mark this
 * __init too.
 */
__init int register_ftrace_command(struct ftrace_func_command *cmd)
3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739
{
	struct ftrace_func_command *p;
	int ret = 0;

	mutex_lock(&ftrace_cmd_mutex);
	list_for_each_entry(p, &ftrace_commands, list) {
		if (strcmp(cmd->name, p->name) == 0) {
			ret = -EBUSY;
			goto out_unlock;
		}
	}
	list_add(&cmd->list, &ftrace_commands);
 out_unlock:
	mutex_unlock(&ftrace_cmd_mutex);

	return ret;
}

3740 3741 3742 3743 3744
/*
 * Currently we only unregister ftrace commands from __init, so mark
 * this __init too.
 */
__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762
{
	struct ftrace_func_command *p, *n;
	int ret = -ENODEV;

	mutex_lock(&ftrace_cmd_mutex);
	list_for_each_entry_safe(p, n, &ftrace_commands, list) {
		if (strcmp(cmd->name, p->name) == 0) {
			ret = 0;
			list_del_init(&p->list);
			goto out_unlock;
		}
	}
 out_unlock:
	mutex_unlock(&ftrace_cmd_mutex);

	return ret;
}

3763 3764
static int ftrace_process_regex(struct ftrace_hash *hash,
				char *buff, int len, int enable)
3765
{
3766
	char *func, *command, *next = buff;
S
Steven Rostedt 已提交
3767
	struct ftrace_func_command *p;
3768
	int ret = -EINVAL;
3769 3770 3771 3772

	func = strsep(&next, ":");

	if (!next) {
3773
		ret = ftrace_match_records(hash, func, len);
3774 3775 3776 3777 3778
		if (!ret)
			ret = -EINVAL;
		if (ret < 0)
			return ret;
		return 0;
3779 3780
	}

3781
	/* command found */
3782 3783 3784

	command = strsep(&next, ":");

3785 3786 3787
	mutex_lock(&ftrace_cmd_mutex);
	list_for_each_entry(p, &ftrace_commands, list) {
		if (strcmp(p->name, command) == 0) {
3788
			ret = p->func(hash, func, command, next, enable);
3789 3790
			goto out_unlock;
		}
3791
	}
3792 3793
 out_unlock:
	mutex_unlock(&ftrace_cmd_mutex);
3794

3795
	return ret;
3796 3797
}

I
Ingo Molnar 已提交
3798
static ssize_t
3799 3800
ftrace_regex_write(struct file *file, const char __user *ubuf,
		   size_t cnt, loff_t *ppos, int enable)
3801 3802
{
	struct ftrace_iterator *iter;
3803 3804
	struct trace_parser *parser;
	ssize_t ret, read;
3805

3806
	if (!cnt)
3807 3808 3809 3810 3811 3812 3813 3814
		return 0;

	if (file->f_mode & FMODE_READ) {
		struct seq_file *m = file->private_data;
		iter = m->private;
	} else
		iter = file->private_data;

3815
	if (unlikely(ftrace_disabled))
3816 3817 3818
		return -ENODEV;

	/* iter->hash is a local copy, so we don't need regex_lock */
3819

3820 3821
	parser = &iter->parser;
	read = trace_get_user(parser, ubuf, cnt, ppos);
3822

3823
	if (read >= 0 && trace_parser_loaded(parser) &&
3824
	    !trace_parser_cont(parser)) {
3825
		ret = ftrace_process_regex(iter->hash, parser->buffer,
3826
					   parser->idx, enable);
3827
		trace_parser_clear(parser);
3828
		if (ret < 0)
3829
			goto out;
3830
	}
3831 3832

	ret = read;
3833
 out:
3834 3835 3836
	return ret;
}

3837
ssize_t
3838 3839 3840 3841 3842 3843
ftrace_filter_write(struct file *file, const char __user *ubuf,
		    size_t cnt, loff_t *ppos)
{
	return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
}

3844
ssize_t
3845 3846 3847 3848 3849 3850
ftrace_notrace_write(struct file *file, const char __user *ubuf,
		     size_t cnt, loff_t *ppos)
{
	return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
}

3851
static int
3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869
ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
{
	struct ftrace_func_entry *entry;

	if (!ftrace_location(ip))
		return -EINVAL;

	if (remove) {
		entry = ftrace_lookup_ip(hash, ip);
		if (!entry)
			return -ENOENT;
		free_hash_entry(hash, entry);
		return 0;
	}

	return add_hash_entry(hash, ip);
}

3870 3871
static void ftrace_ops_update_code(struct ftrace_ops *ops,
				   struct ftrace_hash *old_hash)
3872 3873
{
	if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
3874
		ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
3875 3876
}

3877 3878 3879
static int
ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
		unsigned long ip, int remove, int reset, int enable)
3880
{
3881
	struct ftrace_hash **orig_hash;
3882
	struct ftrace_hash *old_hash;
3883
	struct ftrace_hash *hash;
3884
	int ret;
3885

3886
	if (unlikely(ftrace_disabled))
3887
		return -ENODEV;
3888

3889
	mutex_lock(&ops->func_hash->regex_lock);
3890

3891
	if (enable)
3892
		orig_hash = &ops->func_hash->filter_hash;
3893
	else
3894
		orig_hash = &ops->func_hash->notrace_hash;
3895

3896 3897 3898 3899 3900
	if (reset)
		hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
	else
		hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);

3901 3902 3903 3904
	if (!hash) {
		ret = -ENOMEM;
		goto out_regex_unlock;
	}
3905

3906 3907 3908 3909
	if (buf && !ftrace_match_records(hash, buf, len)) {
		ret = -EINVAL;
		goto out_regex_unlock;
	}
3910 3911 3912 3913 3914
	if (ip) {
		ret = ftrace_match_addr(hash, ip, remove);
		if (ret < 0)
			goto out_regex_unlock;
	}
3915 3916

	mutex_lock(&ftrace_lock);
3917
	old_hash = *orig_hash;
3918
	ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3919
	if (!ret) {
3920
		ftrace_ops_update_code(ops, old_hash);
3921 3922
		free_ftrace_hash_rcu(old_hash);
	}
3923 3924
	mutex_unlock(&ftrace_lock);

3925
 out_regex_unlock:
3926
	mutex_unlock(&ops->func_hash->regex_lock);
3927 3928 3929

	free_ftrace_hash(hash);
	return ret;
3930 3931
}

3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951
static int
ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
		int reset, int enable)
{
	return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
}

/**
 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
 * @ops - the ops to set the filter with
 * @ip - the address to add to or remove from the filter.
 * @remove - non zero to remove the ip from the filter
 * @reset - non zero to reset all filters before applying this filter.
 *
 * Filters denote which functions should be enabled when tracing is enabled
 * If @ip is NULL, it failes to update filter.
 */
int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
			 int remove, int reset)
{
3952
	ftrace_ops_init(ops);
3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963
	return ftrace_set_addr(ops, ip, remove, reset, 1);
}
EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);

static int
ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
		 int reset, int enable)
{
	return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
}

3964 3965
/**
 * ftrace_set_filter - set a function to filter on in ftrace
3966 3967 3968 3969 3970 3971 3972 3973
 * @ops - the ops to set the filter with
 * @buf - the string that holds the function filter text.
 * @len - the length of the string.
 * @reset - non zero to reset all filters before applying this filter.
 *
 * Filters denote which functions should be enabled when tracing is enabled.
 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
 */
3974
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3975 3976
		       int len, int reset)
{
3977
	ftrace_ops_init(ops);
3978
	return ftrace_set_regex(ops, buf, len, reset, 1);
3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992
}
EXPORT_SYMBOL_GPL(ftrace_set_filter);

/**
 * ftrace_set_notrace - set a function to not trace in ftrace
 * @ops - the ops to set the notrace filter with
 * @buf - the string that holds the function notrace text.
 * @len - the length of the string.
 * @reset - non zero to reset all filters before applying this filter.
 *
 * Notrace Filters denote which functions should not be enabled when tracing
 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
 * for tracing.
 */
3993
int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3994 3995
			int len, int reset)
{
3996
	ftrace_ops_init(ops);
3997
	return ftrace_set_regex(ops, buf, len, reset, 0);
3998 3999 4000
}
EXPORT_SYMBOL_GPL(ftrace_set_notrace);
/**
4001
 * ftrace_set_global_filter - set a function to filter on with global tracers
4002 4003 4004 4005 4006 4007 4008
 * @buf - the string that holds the function filter text.
 * @len - the length of the string.
 * @reset - non zero to reset all filters before applying this filter.
 *
 * Filters denote which functions should be enabled when tracing is enabled.
 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
 */
4009
void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
4010
{
4011
	ftrace_set_regex(&global_ops, buf, len, reset, 1);
4012
}
4013
EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
4014

4015
/**
4016
 * ftrace_set_global_notrace - set a function to not trace with global tracers
4017 4018 4019 4020 4021 4022 4023 4024
 * @buf - the string that holds the function notrace text.
 * @len - the length of the string.
 * @reset - non zero to reset all filters before applying this filter.
 *
 * Notrace Filters denote which functions should not be enabled when tracing
 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
 * for tracing.
 */
4025
void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
4026
{
4027
	ftrace_set_regex(&global_ops, buf, len, reset, 0);
4028
}
4029
EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
4030

4031 4032 4033 4034 4035 4036 4037
/*
 * command line interface to allow users to set filters on boot up.
 */
#define FTRACE_FILTER_SIZE		COMMAND_LINE_SIZE
static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;

4038 4039 4040
/* Used by function selftest to not test if filter is set */
bool ftrace_filter_param __initdata;

4041 4042
static int __init set_ftrace_notrace(char *str)
{
4043
	ftrace_filter_param = true;
4044
	strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
4045 4046 4047 4048 4049 4050
	return 1;
}
__setup("ftrace_notrace=", set_ftrace_notrace);

static int __init set_ftrace_filter(char *str)
{
4051
	ftrace_filter_param = true;
4052
	strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
4053 4054 4055 4056
	return 1;
}
__setup("ftrace_filter=", set_ftrace_filter);

4057
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4058
static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
4059
static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
4060
static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
4061

4062 4063 4064
static unsigned long save_global_trampoline;
static unsigned long save_global_flags;

4065 4066
static int __init set_graph_function(char *str)
{
4067
	strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
4068 4069 4070 4071
	return 1;
}
__setup("ftrace_graph_filter=", set_graph_function);

4072 4073 4074 4075 4076 4077 4078 4079
static int __init set_graph_notrace_function(char *str)
{
	strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
	return 1;
}
__setup("ftrace_graph_notrace=", set_graph_notrace_function);

static void __init set_ftrace_early_graph(char *buf, int enable)
4080 4081 4082
{
	int ret;
	char *func;
4083 4084 4085 4086 4087 4088 4089
	unsigned long *table = ftrace_graph_funcs;
	int *count = &ftrace_graph_count;

	if (!enable) {
		table = ftrace_graph_notrace_funcs;
		count = &ftrace_graph_notrace_count;
	}
4090 4091 4092 4093

	while (buf) {
		func = strsep(&buf, ",");
		/* we allow only one expression at a time */
4094
		ret = ftrace_set_func(table, count, FTRACE_GRAPH_MAX_FUNCS, func);
4095 4096 4097 4098 4099 4100 4101
		if (ret)
			printk(KERN_DEBUG "ftrace: function %s not "
					  "traceable\n", func);
	}
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */

4102 4103
void __init
ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
4104 4105 4106
{
	char *func;

4107 4108
	ftrace_ops_init(ops);

4109 4110
	while (buf) {
		func = strsep(&buf, ",");
4111
		ftrace_set_regex(ops, func, strlen(func), 0, enable);
4112 4113 4114 4115 4116 4117
	}
}

static void __init set_ftrace_early_filters(void)
{
	if (ftrace_filter_buf[0])
4118
		ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
4119
	if (ftrace_notrace_buf[0])
4120
		ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
4121 4122
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	if (ftrace_graph_buf[0])
4123 4124 4125
		set_ftrace_early_graph(ftrace_graph_buf, 1);
	if (ftrace_graph_notrace_buf[0])
		set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
4126
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4127 4128
}

4129
int ftrace_regex_release(struct inode *inode, struct file *file)
4130 4131 4132
{
	struct seq_file *m = (struct seq_file *)file->private_data;
	struct ftrace_iterator *iter;
4133
	struct ftrace_hash **orig_hash;
4134
	struct ftrace_hash *old_hash;
4135
	struct trace_parser *parser;
4136
	int filter_hash;
4137
	int ret;
4138 4139 4140 4141 4142 4143 4144

	if (file->f_mode & FMODE_READ) {
		iter = m->private;
		seq_release(inode, file);
	} else
		iter = file->private_data;

4145 4146 4147
	parser = &iter->parser;
	if (trace_parser_loaded(parser)) {
		parser->buffer[parser->idx] = 0;
4148
		ftrace_match_records(iter->hash, parser->buffer, parser->idx);
4149 4150
	}

4151 4152
	trace_parser_put(parser);

4153
	mutex_lock(&iter->ops->func_hash->regex_lock);
4154

4155
	if (file->f_mode & FMODE_WRITE) {
4156 4157 4158
		filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);

		if (filter_hash)
4159
			orig_hash = &iter->ops->func_hash->filter_hash;
4160
		else
4161
			orig_hash = &iter->ops->func_hash->notrace_hash;
4162

4163
		mutex_lock(&ftrace_lock);
4164
		old_hash = *orig_hash;
4165 4166
		ret = ftrace_hash_move(iter->ops, filter_hash,
				       orig_hash, iter->hash);
4167
		if (!ret) {
4168
			ftrace_ops_update_code(iter->ops, old_hash);
4169 4170
			free_ftrace_hash_rcu(old_hash);
		}
4171 4172
		mutex_unlock(&ftrace_lock);
	}
4173

4174
	mutex_unlock(&iter->ops->func_hash->regex_lock);
4175 4176
	free_ftrace_hash(iter->hash);
	kfree(iter);
4177

4178 4179 4180
	return 0;
}

4181
static const struct file_operations ftrace_avail_fops = {
4182 4183 4184
	.open = ftrace_avail_open,
	.read = seq_read,
	.llseek = seq_lseek,
L
Li Zefan 已提交
4185
	.release = seq_release_private,
4186 4187
};

4188 4189 4190 4191 4192 4193 4194
static const struct file_operations ftrace_enabled_fops = {
	.open = ftrace_enabled_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = seq_release_private,
};

4195
static const struct file_operations ftrace_filter_fops = {
4196
	.open = ftrace_filter_open,
L
Lai Jiangshan 已提交
4197
	.read = seq_read,
4198
	.write = ftrace_filter_write,
4199
	.llseek = tracing_lseek,
4200
	.release = ftrace_regex_release,
4201 4202
};

4203
static const struct file_operations ftrace_notrace_fops = {
4204
	.open = ftrace_notrace_open,
L
Lai Jiangshan 已提交
4205
	.read = seq_read,
4206
	.write = ftrace_notrace_write,
4207
	.llseek = tracing_lseek,
4208
	.release = ftrace_regex_release,
4209 4210
};

4211 4212 4213 4214 4215
#ifdef CONFIG_FUNCTION_GRAPH_TRACER

static DEFINE_MUTEX(graph_lock);

int ftrace_graph_count;
4216
int ftrace_graph_notrace_count;
4217
unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
4218
unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
4219

4220 4221 4222 4223 4224 4225 4226
struct ftrace_graph_data {
	unsigned long *table;
	size_t size;
	int *count;
	const struct seq_operations *seq_ops;
};

4227
static void *
4228
__g_next(struct seq_file *m, loff_t *pos)
4229
{
4230 4231 4232
	struct ftrace_graph_data *fgd = m->private;

	if (*pos >= *fgd->count)
4233
		return NULL;
4234
	return &fgd->table[*pos];
4235
}
4236

4237 4238 4239 4240 4241
static void *
g_next(struct seq_file *m, void *v, loff_t *pos)
{
	(*pos)++;
	return __g_next(m, pos);
4242 4243 4244 4245
}

static void *g_start(struct seq_file *m, loff_t *pos)
{
4246 4247
	struct ftrace_graph_data *fgd = m->private;

4248 4249
	mutex_lock(&graph_lock);

4250
	/* Nothing, tell g_show to print all functions are enabled */
4251
	if (!*fgd->count && !*pos)
4252 4253
		return (void *)1;

4254
	return __g_next(m, pos);
4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268
}

static void g_stop(struct seq_file *m, void *p)
{
	mutex_unlock(&graph_lock);
}

static int g_show(struct seq_file *m, void *v)
{
	unsigned long *ptr = v;

	if (!ptr)
		return 0;

4269
	if (ptr == (unsigned long *)1) {
4270 4271 4272
		struct ftrace_graph_data *fgd = m->private;

		if (fgd->table == ftrace_graph_funcs)
4273
			seq_puts(m, "#### all functions enabled ####\n");
4274
		else
4275
			seq_puts(m, "#### no functions disabled ####\n");
4276 4277 4278
		return 0;
	}

4279
	seq_printf(m, "%ps\n", (void *)*ptr);
4280 4281 4282 4283

	return 0;
}

J
James Morris 已提交
4284
static const struct seq_operations ftrace_graph_seq_ops = {
4285 4286 4287 4288 4289 4290 4291
	.start = g_start,
	.next = g_next,
	.stop = g_stop,
	.show = g_show,
};

static int
4292 4293
__ftrace_graph_open(struct inode *inode, struct file *file,
		    struct ftrace_graph_data *fgd)
4294 4295 4296 4297 4298
{
	int ret = 0;

	mutex_lock(&graph_lock);
	if ((file->f_mode & FMODE_WRITE) &&
4299
	    (file->f_flags & O_TRUNC)) {
4300 4301
		*fgd->count = 0;
		memset(fgd->table, 0, fgd->size * sizeof(*fgd->table));
4302
	}
4303
	mutex_unlock(&graph_lock);
4304

4305 4306 4307 4308 4309 4310 4311 4312
	if (file->f_mode & FMODE_READ) {
		ret = seq_open(file, fgd->seq_ops);
		if (!ret) {
			struct seq_file *m = file->private_data;
			m->private = fgd;
		}
	} else
		file->private_data = fgd;
4313 4314 4315 4316

	return ret;
}

4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336
static int
ftrace_graph_open(struct inode *inode, struct file *file)
{
	struct ftrace_graph_data *fgd;

	if (unlikely(ftrace_disabled))
		return -ENODEV;

	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
	if (fgd == NULL)
		return -ENOMEM;

	fgd->table = ftrace_graph_funcs;
	fgd->size = FTRACE_GRAPH_MAX_FUNCS;
	fgd->count = &ftrace_graph_count;
	fgd->seq_ops = &ftrace_graph_seq_ops;

	return __ftrace_graph_open(inode, file, fgd);
}

4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356
static int
ftrace_graph_notrace_open(struct inode *inode, struct file *file)
{
	struct ftrace_graph_data *fgd;

	if (unlikely(ftrace_disabled))
		return -ENODEV;

	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
	if (fgd == NULL)
		return -ENOMEM;

	fgd->table = ftrace_graph_notrace_funcs;
	fgd->size = FTRACE_GRAPH_MAX_FUNCS;
	fgd->count = &ftrace_graph_notrace_count;
	fgd->seq_ops = &ftrace_graph_seq_ops;

	return __ftrace_graph_open(inode, file, fgd);
}

4357 4358 4359
static int
ftrace_graph_release(struct inode *inode, struct file *file)
{
4360 4361 4362 4363
	if (file->f_mode & FMODE_READ) {
		struct seq_file *m = file->private_data;

		kfree(m->private);
4364
		seq_release(inode, file);
4365 4366 4367 4368
	} else {
		kfree(file->private_data);
	}

4369 4370 4371
	return 0;
}

4372
static int
4373
ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
4374 4375 4376
{
	struct dyn_ftrace *rec;
	struct ftrace_page *pg;
4377
	int search_len;
4378
	int fail = 1;
4379 4380 4381 4382
	int type, not;
	char *search;
	bool exists;
	int i;
4383

4384
	/* decode regex */
4385
	type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
4386
	if (!not && *idx >= size)
4387
		return -EBUSY;
4388 4389 4390

	search_len = strlen(search);

4391
	mutex_lock(&ftrace_lock);
4392 4393 4394 4395 4396 4397

	if (unlikely(ftrace_disabled)) {
		mutex_unlock(&ftrace_lock);
		return -ENODEV;
	}

4398 4399
	do_for_each_ftrace_rec(pg, rec) {

4400
		if (ftrace_match_record(rec, NULL, search, search_len, type)) {
4401
			/* if it is in the array */
4402
			exists = false;
4403
			for (i = 0; i < *idx; i++) {
4404 4405
				if (array[i] == rec->ip) {
					exists = true;
4406 4407
					break;
				}
4408 4409 4410 4411 4412 4413
			}

			if (!not) {
				fail = 0;
				if (!exists) {
					array[(*idx)++] = rec->ip;
4414
					if (*idx >= size)
4415 4416 4417 4418 4419 4420 4421 4422 4423
						goto out;
				}
			} else {
				if (exists) {
					array[i] = array[--(*idx)];
					array[*idx] = 0;
					fail = 0;
				}
			}
4424
		}
4425
	} while_for_each_ftrace_rec();
4426
out:
4427
	mutex_unlock(&ftrace_lock);
4428

4429 4430 4431 4432
	if (fail)
		return -EINVAL;

	return 0;
4433 4434 4435 4436 4437 4438
}

static ssize_t
ftrace_graph_write(struct file *file, const char __user *ubuf,
		   size_t cnt, loff_t *ppos)
{
4439
	struct trace_parser parser;
4440
	ssize_t read, ret = 0;
4441
	struct ftrace_graph_data *fgd = file->private_data;
4442

4443
	if (!cnt)
4444 4445
		return 0;

4446 4447
	if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX))
		return -ENOMEM;
4448

4449
	read = trace_get_user(&parser, ubuf, cnt, ppos);
4450

4451
	if (read >= 0 && trace_parser_loaded((&parser))) {
4452 4453
		parser.buffer[parser.idx] = 0;

4454 4455
		mutex_lock(&graph_lock);

4456
		/* we allow only one expression at a time */
4457 4458
		ret = ftrace_set_func(fgd->table, fgd->count, fgd->size,
				      parser.buffer);
4459 4460

		mutex_unlock(&graph_lock);
4461 4462
	}

4463 4464
	if (!ret)
		ret = read;
4465

4466
	trace_parser_put(&parser);
4467 4468 4469 4470 4471

	return ret;
}

static const struct file_operations ftrace_graph_fops = {
4472 4473 4474
	.open		= ftrace_graph_open,
	.read		= seq_read,
	.write		= ftrace_graph_write,
4475
	.llseek		= tracing_lseek,
4476
	.release	= ftrace_graph_release,
4477
};
4478 4479 4480 4481 4482

static const struct file_operations ftrace_graph_notrace_fops = {
	.open		= ftrace_graph_notrace_open,
	.read		= seq_read,
	.write		= ftrace_graph_write,
4483
	.llseek		= tracing_lseek,
4484 4485
	.release	= ftrace_graph_release,
};
4486 4487
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */

4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517
void ftrace_create_filter_files(struct ftrace_ops *ops,
				struct dentry *parent)
{

	trace_create_file("set_ftrace_filter", 0644, parent,
			  ops, &ftrace_filter_fops);

	trace_create_file("set_ftrace_notrace", 0644, parent,
			  ops, &ftrace_notrace_fops);
}

/*
 * The name "destroy_filter_files" is really a misnomer. Although
 * in the future, it may actualy delete the files, but this is
 * really intended to make sure the ops passed in are disabled
 * and that when this function returns, the caller is free to
 * free the ops.
 *
 * The "destroy" name is only to match the "create" name that this
 * should be paired with.
 */
void ftrace_destroy_filter_files(struct ftrace_ops *ops)
{
	mutex_lock(&ftrace_lock);
	if (ops->flags & FTRACE_OPS_FL_ENABLED)
		ftrace_shutdown(ops, 0);
	ops->flags |= FTRACE_OPS_FL_DELETED;
	mutex_unlock(&ftrace_lock);
}

4518
static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
4519 4520
{

4521 4522
	trace_create_file("available_filter_functions", 0444,
			d_tracer, NULL, &ftrace_avail_fops);
4523

4524 4525 4526
	trace_create_file("enabled_functions", 0444,
			d_tracer, NULL, &ftrace_enabled_fops);

4527
	ftrace_create_filter_files(&global_ops, d_tracer);
4528

4529
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4530
	trace_create_file("set_graph_function", 0444, d_tracer,
4531 4532
				    NULL,
				    &ftrace_graph_fops);
4533 4534 4535
	trace_create_file("set_graph_notrace", 0444, d_tracer,
				    NULL,
				    &ftrace_graph_notrace_fops);
4536 4537
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */

4538 4539 4540
	return 0;
}

4541
static int ftrace_cmp_ips(const void *a, const void *b)
4542
{
4543 4544
	const unsigned long *ipa = a;
	const unsigned long *ipb = b;
4545

4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561
	if (*ipa > *ipb)
		return 1;
	if (*ipa < *ipb)
		return -1;
	return 0;
}

static void ftrace_swap_ips(void *a, void *b, int size)
{
	unsigned long *ipa = a;
	unsigned long *ipb = b;
	unsigned long t;

	t = *ipa;
	*ipa = *ipb;
	*ipb = t;
4562 4563
}

4564
static int ftrace_process_locs(struct module *mod,
4565
			       unsigned long *start,
4566 4567
			       unsigned long *end)
{
4568
	struct ftrace_page *start_pg;
4569
	struct ftrace_page *pg;
4570
	struct dyn_ftrace *rec;
4571
	unsigned long count;
4572 4573
	unsigned long *p;
	unsigned long addr;
4574
	unsigned long flags = 0; /* Shut up gcc */
4575 4576 4577 4578 4579 4580 4581
	int ret = -ENOMEM;

	count = end - start;

	if (!count)
		return 0;

4582 4583 4584
	sort(start, count, sizeof(*start),
	     ftrace_cmp_ips, ftrace_swap_ips);

4585 4586
	start_pg = ftrace_allocate_pages(count);
	if (!start_pg)
4587
		return -ENOMEM;
4588

S
Steven Rostedt 已提交
4589
	mutex_lock(&ftrace_lock);
4590

4591 4592 4593 4594 4595
	/*
	 * Core and each module needs their own pages, as
	 * modules will free them when they are removed.
	 * Force a new page to be allocated for modules.
	 */
4596 4597 4598
	if (!mod) {
		WARN_ON(ftrace_pages || ftrace_pages_start);
		/* First initialization */
4599
		ftrace_pages = ftrace_pages_start = start_pg;
4600
	} else {
4601
		if (!ftrace_pages)
4602
			goto out;
4603

4604 4605 4606 4607
		if (WARN_ON(ftrace_pages->next)) {
			/* Hmm, we have free pages? */
			while (ftrace_pages->next)
				ftrace_pages = ftrace_pages->next;
4608
		}
4609

4610
		ftrace_pages->next = start_pg;
4611 4612
	}

4613
	p = start;
4614
	pg = start_pg;
4615 4616
	while (p < end) {
		addr = ftrace_call_adjust(*p++);
4617 4618 4619 4620 4621 4622 4623 4624
		/*
		 * Some architecture linkers will pad between
		 * the different mcount_loc sections of different
		 * object files to satisfy alignments.
		 * Skip any NULL pointers.
		 */
		if (!addr)
			continue;
4625 4626 4627 4628 4629 4630 4631 4632 4633 4634

		if (pg->index == pg->size) {
			/* We should have allocated enough */
			if (WARN_ON(!pg->next))
				break;
			pg = pg->next;
		}

		rec = &pg->records[pg->index++];
		rec->ip = addr;
4635 4636
	}

4637 4638 4639 4640 4641 4642
	/* We should have used all pages */
	WARN_ON(pg->next);

	/* Assign the last page to ftrace_pages */
	ftrace_pages = pg;

4643
	/*
4644 4645 4646 4647 4648 4649
	 * We only need to disable interrupts on start up
	 * because we are modifying code that an interrupt
	 * may execute, and the modification is not atomic.
	 * But for modules, nothing runs the code we modify
	 * until we are finished with it, and there's no
	 * reason to cause large interrupt latencies while we do it.
4650
	 */
4651 4652
	if (!mod)
		local_irq_save(flags);
4653
	ftrace_update_code(mod, start_pg);
4654 4655
	if (!mod)
		local_irq_restore(flags);
4656 4657
	ret = 0;
 out:
S
Steven Rostedt 已提交
4658
	mutex_unlock(&ftrace_lock);
4659

4660
	return ret;
4661 4662
}

4663
#ifdef CONFIG_MODULES
4664 4665 4666

#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)

4667
void ftrace_release_mod(struct module *mod)
4668 4669
{
	struct dyn_ftrace *rec;
4670
	struct ftrace_page **last_pg;
4671
	struct ftrace_page *pg;
4672
	int order;
4673

4674 4675
	mutex_lock(&ftrace_lock);

4676
	if (ftrace_disabled)
4677
		goto out_unlock;
4678

4679 4680 4681 4682 4683 4684 4685
	/*
	 * Each module has its own ftrace_pages, remove
	 * them from the list.
	 */
	last_pg = &ftrace_pages_start;
	for (pg = ftrace_pages_start; pg; pg = *last_pg) {
		rec = &pg->records[0];
4686
		if (within_module_core(rec->ip, mod)) {
4687
			/*
4688 4689
			 * As core pages are first, the first
			 * page should never be a module page.
4690
			 */
4691 4692 4693 4694 4695 4696 4697 4698
			if (WARN_ON(pg == ftrace_pages_start))
				goto out_unlock;

			/* Check if we are deleting the last page */
			if (pg == ftrace_pages)
				ftrace_pages = next_to_ftrace_page(last_pg);

			*last_pg = pg->next;
4699 4700 4701
			order = get_count_order(pg->size / ENTRIES_PER_PAGE);
			free_pages((unsigned long)pg->records, order);
			kfree(pg);
4702 4703 4704
		} else
			last_pg = &pg->next;
	}
4705
 out_unlock:
4706 4707 4708 4709 4710
	mutex_unlock(&ftrace_lock);
}

static void ftrace_init_module(struct module *mod,
			       unsigned long *start, unsigned long *end)
4711
{
4712
	if (ftrace_disabled || start == end)
4713
		return;
4714
	ftrace_process_locs(mod, start, end);
4715 4716
}

4717
void ftrace_module_init(struct module *mod)
4718
{
4719 4720 4721
	ftrace_init_module(mod, mod->ftrace_callsites,
			   mod->ftrace_callsites +
			   mod->num_ftrace_callsites);
4722 4723 4724 4725 4726 4727 4728 4729
}

static int ftrace_module_notify_exit(struct notifier_block *self,
				     unsigned long val, void *data)
{
	struct module *mod = data;

	if (val == MODULE_STATE_GOING)
4730
		ftrace_release_mod(mod);
4731 4732 4733 4734

	return 0;
}
#else
4735 4736
static int ftrace_module_notify_exit(struct notifier_block *self,
				     unsigned long val, void *data)
4737 4738 4739 4740 4741
{
	return 0;
}
#endif /* CONFIG_MODULES */

4742 4743 4744 4745 4746
struct notifier_block ftrace_module_exit_nb = {
	.notifier_call = ftrace_module_notify_exit,
	.priority = INT_MIN,	/* Run after anything that can remove kprobes */
};

4747 4748
void __init ftrace_init(void)
{
4749 4750
	extern unsigned long __start_mcount_loc[];
	extern unsigned long __stop_mcount_loc[];
4751
	unsigned long count, flags;
4752 4753 4754
	int ret;

	local_irq_save(flags);
4755
	ret = ftrace_dyn_arch_init();
4756
	local_irq_restore(flags);
4757
	if (ret)
4758 4759 4760
		goto failed;

	count = __stop_mcount_loc - __start_mcount_loc;
4761 4762
	if (!count) {
		pr_info("ftrace: No functions to be traced?\n");
4763
		goto failed;
4764 4765 4766 4767
	}

	pr_info("ftrace: allocating %ld entries in %ld pages\n",
		count, count / ENTRIES_PER_PAGE + 1);
4768 4769 4770

	last_ftrace_enabled = ftrace_enabled = 1;

4771
	ret = ftrace_process_locs(NULL,
4772
				  __start_mcount_loc,
4773 4774
				  __stop_mcount_loc);

4775
	ret = register_module_notifier(&ftrace_module_exit_nb);
4776
	if (ret)
4777
		pr_warning("Failed to register trace ftrace module exit notifier\n");
4778

4779 4780
	set_ftrace_early_filters();

4781 4782 4783 4784 4785
	return;
 failed:
	ftrace_disabled = 1;
}

4786 4787 4788 4789 4790 4791 4792
/* Do nothing if arch does not support this */
void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
{
}

static void ftrace_update_trampoline(struct ftrace_ops *ops)
{
4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803

/*
 * Currently there's no safe way to free a trampoline when the kernel
 * is configured with PREEMPT. That is because a task could be preempted
 * when it jumped to the trampoline, it may be preempted for a long time
 * depending on the system load, and currently there's no way to know
 * when it will be off the trampoline. If the trampoline is freed
 * too early, when the task runs again, it will be executing on freed
 * memory and crash.
 */
#ifdef CONFIG_PREEMPT
4804 4805 4806
	/* Currently, only non dynamic ops can have a trampoline */
	if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
		return;
4807
#endif
4808 4809 4810 4811

	arch_ftrace_update_trampoline(ops);
}

4812
#else
4813

4814
static struct ftrace_ops global_ops = {
4815
	.func			= ftrace_stub,
4816
	.flags			= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4817 4818
};

4819 4820 4821 4822 4823
static int __init ftrace_nodyn_init(void)
{
	ftrace_enabled = 1;
	return 0;
}
4824
core_initcall(ftrace_nodyn_init);
4825

4826 4827
static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
static inline void ftrace_startup_enable(int command) { }
4828
static inline void ftrace_startup_all(int command) { }
4829
/* Keep as macros so we do not need to define the commands */
4830 4831 4832 4833 4834 4835
# define ftrace_startup(ops, command)					\
	({								\
		int ___ret = __register_ftrace_function(ops);		\
		if (!___ret)						\
			(ops)->flags |= FTRACE_OPS_FL_ENABLED;		\
		___ret;							\
4836
	})
4837 4838 4839 4840 4841 4842 4843
# define ftrace_shutdown(ops, command)					\
	({								\
		int ___ret = __unregister_ftrace_function(ops);		\
		if (!___ret)						\
			(ops)->flags &= ~FTRACE_OPS_FL_ENABLED;		\
		___ret;							\
	})
4844

I
Ingo Molnar 已提交
4845 4846
# define ftrace_startup_sysctl()	do { } while (0)
# define ftrace_shutdown_sysctl()	do { } while (0)
4847 4848

static inline int
4849
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
4850 4851 4852 4853
{
	return 1;
}

4854 4855 4856 4857
static void ftrace_update_trampoline(struct ftrace_ops *ops)
{
}

4858 4859
#endif /* CONFIG_DYNAMIC_FTRACE */

4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887
__init void ftrace_init_global_array_ops(struct trace_array *tr)
{
	tr->ops = &global_ops;
	tr->ops->private = tr;
}

void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
{
	/* If we filter on pids, update to use the pid function */
	if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
		if (WARN_ON(tr->ops->func != ftrace_stub))
			printk("ftrace ops had %pS for function\n",
			       tr->ops->func);
		/* Only the top level instance does pid tracing */
		if (!list_empty(&ftrace_pids)) {
			set_ftrace_pid_function(func);
			func = ftrace_pid_func;
		}
	}
	tr->ops->func = func;
	tr->ops->private = tr;
}

void ftrace_reset_array_ops(struct trace_array *tr)
{
	tr->ops->func = ftrace_stub;
}

4888
static void
4889
ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4890
			struct ftrace_ops *op, struct pt_regs *regs)
4891 4892 4893 4894 4895 4896 4897 4898 4899 4900
{
	if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
		return;

	/*
	 * Some of the ops may be dynamically allocated,
	 * they must be freed after a synchronize_sched().
	 */
	preempt_disable_notrace();
	trace_recursion_set(TRACE_CONTROL_BIT);
4901 4902 4903 4904 4905 4906 4907 4908

	/*
	 * Control funcs (perf) uses RCU. Only trace if
	 * RCU is currently active.
	 */
	if (!rcu_is_watching())
		goto out;

4909
	do_for_each_ftrace_op(op, ftrace_control_list) {
4910 4911
		if (!(op->flags & FTRACE_OPS_FL_STUB) &&
		    !ftrace_function_local_disabled(op) &&
4912
		    ftrace_ops_test(op, ip, regs))
4913
			op->func(ip, parent_ip, op, regs);
4914
	} while_for_each_ftrace_op(op);
4915
 out:
4916 4917 4918 4919 4920
	trace_recursion_clear(TRACE_CONTROL_BIT);
	preempt_enable_notrace();
}

static struct ftrace_ops control_ops = {
4921 4922
	.func	= ftrace_ops_control_func,
	.flags	= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4923
	INIT_OPS_HASH(control_ops)
4924 4925
};

4926 4927
static inline void
__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4928
		       struct ftrace_ops *ignored, struct pt_regs *regs)
4929
{
4930
	struct ftrace_ops *op;
4931
	int bit;
4932

4933 4934 4935
	bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
	if (bit < 0)
		return;
4936

4937 4938 4939 4940 4941
	/*
	 * Some of the ops may be dynamically allocated,
	 * they must be freed after a synchronize_sched().
	 */
	preempt_disable_notrace();
4942
	do_for_each_ftrace_op(op, ftrace_ops_list) {
4943
		if (ftrace_ops_test(op, ip, regs)) {
4944 4945
			if (FTRACE_WARN_ON(!op->func)) {
				pr_warn("op=%p %pS\n", op, op);
4946 4947
				goto out;
			}
4948
			op->func(ip, parent_ip, op, regs);
4949
		}
4950
	} while_for_each_ftrace_op(op);
4951
out:
4952
	preempt_enable_notrace();
4953
	trace_clear_recursion(bit);
4954 4955
}

4956 4957 4958 4959 4960
/*
 * Some archs only support passing ip and parent_ip. Even though
 * the list function ignores the op parameter, we do not want any
 * C side effects, where a function is called without the caller
 * sending a third parameter.
4961 4962 4963
 * Archs are to support both the regs and ftrace_ops at the same time.
 * If they support ftrace_ops, it is assumed they support regs.
 * If call backs want to use regs, they must either check for regs
4964 4965
 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
4966 4967
 * An architecture can pass partial regs with ftrace_ops and still
 * set the ARCH_SUPPORT_FTARCE_OPS.
4968 4969 4970
 */
#if ARCH_SUPPORTS_FTRACE_OPS
static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4971
				 struct ftrace_ops *op, struct pt_regs *regs)
4972
{
4973
	__ftrace_ops_list_func(ip, parent_ip, NULL, regs);
4974 4975 4976 4977
}
#else
static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
{
4978
	__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
4979 4980 4981
}
#endif

4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000
/*
 * If there's only one function registered but it does not support
 * recursion, this function will be called by the mcount trampoline.
 * This function will handle recursion protection.
 */
static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
				   struct ftrace_ops *op, struct pt_regs *regs)
{
	int bit;

	bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
	if (bit < 0)
		return;

	op->func(ip, parent_ip, op, regs);

	trace_clear_recursion(bit);
}

5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031
/**
 * ftrace_ops_get_func - get the function a trampoline should call
 * @ops: the ops to get the function for
 *
 * Normally the mcount trampoline will call the ops->func, but there
 * are times that it should not. For example, if the ops does not
 * have its own recursion protection, then it should call the
 * ftrace_ops_recurs_func() instead.
 *
 * Returns the function that the trampoline should call for @ops.
 */
ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
{
	/*
	 * If this is a dynamic ops or we force list func,
	 * then it needs to call the list anyway.
	 */
	if (ops->flags & FTRACE_OPS_FL_DYNAMIC || FTRACE_FORCE_LIST_FUNC)
		return ftrace_ops_list_func;

	/*
	 * If the func handles its own recursion, call it directly.
	 * Otherwise call the recursion protected function that
	 * will call the ftrace ops function.
	 */
	if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE))
		return ftrace_ops_recurs_func;

	return ops->func;
}

5032
static void clear_ftrace_swapper(void)
S
Steven Rostedt 已提交
5033 5034
{
	struct task_struct *p;
5035
	int cpu;
S
Steven Rostedt 已提交
5036

5037 5038 5039
	get_online_cpus();
	for_each_online_cpu(cpu) {
		p = idle_task(cpu);
S
Steven Rostedt 已提交
5040
		clear_tsk_trace_trace(p);
5041 5042 5043
	}
	put_online_cpus();
}
S
Steven Rostedt 已提交
5044

5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055
static void set_ftrace_swapper(void)
{
	struct task_struct *p;
	int cpu;

	get_online_cpus();
	for_each_online_cpu(cpu) {
		p = idle_task(cpu);
		set_tsk_trace_trace(p);
	}
	put_online_cpus();
S
Steven Rostedt 已提交
5056 5057
}

5058 5059 5060 5061
static void clear_ftrace_pid(struct pid *pid)
{
	struct task_struct *p;

5062
	rcu_read_lock();
5063 5064 5065
	do_each_pid_task(pid, PIDTYPE_PID, p) {
		clear_tsk_trace_trace(p);
	} while_each_pid_task(pid, PIDTYPE_PID, p);
5066 5067
	rcu_read_unlock();

5068 5069 5070 5071
	put_pid(pid);
}

static void set_ftrace_pid(struct pid *pid)
S
Steven Rostedt 已提交
5072 5073 5074
{
	struct task_struct *p;

5075
	rcu_read_lock();
S
Steven Rostedt 已提交
5076 5077 5078
	do_each_pid_task(pid, PIDTYPE_PID, p) {
		set_tsk_trace_trace(p);
	} while_each_pid_task(pid, PIDTYPE_PID, p);
5079
	rcu_read_unlock();
S
Steven Rostedt 已提交
5080 5081
}

5082
static void clear_ftrace_pid_task(struct pid *pid)
5083
{
5084
	if (pid == ftrace_swapper_pid)
5085 5086
		clear_ftrace_swapper();
	else
5087
		clear_ftrace_pid(pid);
5088 5089 5090 5091 5092 5093 5094 5095 5096 5097
}

static void set_ftrace_pid_task(struct pid *pid)
{
	if (pid == ftrace_swapper_pid)
		set_ftrace_swapper();
	else
		set_ftrace_pid(pid);
}

5098
static int ftrace_pid_add(int p)
5099
{
S
Steven Rostedt 已提交
5100
	struct pid *pid;
5101 5102
	struct ftrace_pid *fpid;
	int ret = -EINVAL;
5103

5104
	mutex_lock(&ftrace_lock);
5105

5106 5107 5108 5109
	if (!p)
		pid = ftrace_swapper_pid;
	else
		pid = find_get_pid(p);
5110

5111 5112
	if (!pid)
		goto out;
5113

5114
	ret = 0;
5115

5116 5117 5118
	list_for_each_entry(fpid, &ftrace_pids, list)
		if (fpid->pid == pid)
			goto out_put;
S
Steven Rostedt 已提交
5119

5120
	ret = -ENOMEM;
5121

5122 5123 5124
	fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
	if (!fpid)
		goto out_put;
5125

5126 5127
	list_add(&fpid->list, &ftrace_pids);
	fpid->pid = pid;
5128

5129
	set_ftrace_pid_task(pid);
S
Steven Rostedt 已提交
5130

5131
	ftrace_update_pid_func();
5132 5133

	ftrace_startup_all(0);
5134 5135 5136 5137 5138 5139 5140

	mutex_unlock(&ftrace_lock);
	return 0;

out_put:
	if (pid != ftrace_swapper_pid)
		put_pid(pid);
S
Steven Rostedt 已提交
5141

5142 5143 5144 5145 5146 5147 5148 5149
out:
	mutex_unlock(&ftrace_lock);
	return ret;
}

static void ftrace_pid_reset(void)
{
	struct ftrace_pid *fpid, *safe;
S
Steven Rostedt 已提交
5150

5151 5152 5153 5154 5155 5156 5157 5158
	mutex_lock(&ftrace_lock);
	list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
		struct pid *pid = fpid->pid;

		clear_ftrace_pid_task(pid);

		list_del(&fpid->list);
		kfree(fpid);
5159 5160 5161
	}

	ftrace_update_pid_func();
5162
	ftrace_startup_all(0);
5163

S
Steven Rostedt 已提交
5164
	mutex_unlock(&ftrace_lock);
5165
}
5166

5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194
static void *fpid_start(struct seq_file *m, loff_t *pos)
{
	mutex_lock(&ftrace_lock);

	if (list_empty(&ftrace_pids) && (!*pos))
		return (void *) 1;

	return seq_list_start(&ftrace_pids, *pos);
}

static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
{
	if (v == (void *)1)
		return NULL;

	return seq_list_next(v, &ftrace_pids, pos);
}

static void fpid_stop(struct seq_file *m, void *p)
{
	mutex_unlock(&ftrace_lock);
}

static int fpid_show(struct seq_file *m, void *v)
{
	const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);

	if (v == (void *)1) {
5195
		seq_puts(m, "no pid\n");
5196 5197 5198 5199
		return 0;
	}

	if (fpid->pid == ftrace_swapper_pid)
5200
		seq_puts(m, "swapper tasks\n");
5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228
	else
		seq_printf(m, "%u\n", pid_vnr(fpid->pid));

	return 0;
}

static const struct seq_operations ftrace_pid_sops = {
	.start = fpid_start,
	.next = fpid_next,
	.stop = fpid_stop,
	.show = fpid_show,
};

static int
ftrace_pid_open(struct inode *inode, struct file *file)
{
	int ret = 0;

	if ((file->f_mode & FMODE_WRITE) &&
	    (file->f_flags & O_TRUNC))
		ftrace_pid_reset();

	if (file->f_mode & FMODE_READ)
		ret = seq_open(file, &ftrace_pid_sops);

	return ret;
}

5229 5230 5231 5232
static ssize_t
ftrace_pid_write(struct file *filp, const char __user *ubuf,
		   size_t cnt, loff_t *ppos)
{
5233
	char buf[64], *tmp;
5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244
	long val;
	int ret;

	if (cnt >= sizeof(buf))
		return -EINVAL;

	if (copy_from_user(&buf, ubuf, cnt))
		return -EFAULT;

	buf[cnt] = 0;

5245 5246 5247 5248
	/*
	 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
	 * to clean the filter quietly.
	 */
5249 5250
	tmp = strstrip(buf);
	if (strlen(tmp) == 0)
5251 5252
		return 1;

5253
	ret = kstrtol(tmp, 10, &val);
5254 5255 5256
	if (ret < 0)
		return ret;

5257
	ret = ftrace_pid_add(val);
5258

5259 5260
	return ret ? ret : cnt;
}
5261

5262 5263 5264 5265 5266
static int
ftrace_pid_release(struct inode *inode, struct file *file)
{
	if (file->f_mode & FMODE_READ)
		seq_release(inode, file);
5267

5268
	return 0;
5269 5270
}

5271
static const struct file_operations ftrace_pid_fops = {
5272 5273 5274
	.open		= ftrace_pid_open,
	.write		= ftrace_pid_write,
	.read		= seq_read,
5275
	.llseek		= tracing_lseek,
5276
	.release	= ftrace_pid_release,
5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288
};

static __init int ftrace_init_debugfs(void)
{
	struct dentry *d_tracer;

	d_tracer = tracing_init_dentry();
	if (!d_tracer)
		return 0;

	ftrace_init_dyn_debugfs(d_tracer);

5289 5290
	trace_create_file("set_ftrace_pid", 0644, d_tracer,
			    NULL, &ftrace_pid_fops);
5291 5292 5293

	ftrace_profile_debugfs(d_tracer);

5294 5295 5296 5297
	return 0;
}
fs_initcall(ftrace_init_debugfs);

S
Steven Rostedt 已提交
5298
/**
5299
 * ftrace_kill - kill ftrace
S
Steven Rostedt 已提交
5300 5301 5302 5303 5304
 *
 * This function should be used by panic code. It stops ftrace
 * but in a not so nice way. If you need to simply kill ftrace
 * from a non-atomic section, use ftrace_kill.
 */
5305
void ftrace_kill(void)
S
Steven Rostedt 已提交
5306 5307 5308 5309 5310 5311
{
	ftrace_disabled = 1;
	ftrace_enabled = 0;
	clear_ftrace_function();
}

5312 5313 5314 5315 5316 5317 5318 5319
/**
 * Test if ftrace is dead or not.
 */
int ftrace_is_dead(void)
{
	return ftrace_disabled;
}

5320
/**
5321 5322
 * register_ftrace_function - register a function for profiling
 * @ops - ops structure that holds the function for profiling.
5323
 *
5324 5325 5326 5327 5328 5329
 * Register a function to be called by all functions in the
 * kernel.
 *
 * Note: @ops->func and all the functions it calls must be labeled
 *       with "notrace", otherwise it will go into a
 *       recursive loop.
5330
 */
5331
int register_ftrace_function(struct ftrace_ops *ops)
5332
{
5333
	int ret = -1;
5334

5335 5336
	ftrace_ops_init(ops);

S
Steven Rostedt 已提交
5337
	mutex_lock(&ftrace_lock);
5338

5339
	ret = ftrace_startup(ops, 0);
5340

S
Steven Rostedt 已提交
5341
	mutex_unlock(&ftrace_lock);
5342

5343
	return ret;
5344
}
5345
EXPORT_SYMBOL_GPL(register_ftrace_function);
5346 5347

/**
5348
 * unregister_ftrace_function - unregister a function for profiling.
5349 5350 5351 5352 5353 5354 5355 5356
 * @ops - ops structure that holds the function to unregister
 *
 * Unregister a function that was added to be called by ftrace profiling.
 */
int unregister_ftrace_function(struct ftrace_ops *ops)
{
	int ret;

S
Steven Rostedt 已提交
5357
	mutex_lock(&ftrace_lock);
5358
	ret = ftrace_shutdown(ops, 0);
S
Steven Rostedt 已提交
5359
	mutex_unlock(&ftrace_lock);
5360 5361 5362

	return ret;
}
5363
EXPORT_SYMBOL_GPL(unregister_ftrace_function);
5364

I
Ingo Molnar 已提交
5365
int
5366
ftrace_enable_sysctl(struct ctl_table *table, int write,
5367
		     void __user *buffer, size_t *lenp,
5368 5369
		     loff_t *ppos)
{
5370
	int ret = -ENODEV;
5371

S
Steven Rostedt 已提交
5372
	mutex_lock(&ftrace_lock);
5373

5374 5375 5376 5377
	if (unlikely(ftrace_disabled))
		goto out;

	ret = proc_dointvec(table, write, buffer, lenp, ppos);
5378

5379
	if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
5380 5381
		goto out;

5382
	last_ftrace_enabled = !!ftrace_enabled;
5383 5384 5385 5386 5387 5388

	if (ftrace_enabled) {

		ftrace_startup_sysctl();

		/* we are starting ftrace again */
5389 5390
		if (ftrace_ops_list != &ftrace_list_end)
			update_ftrace_function();
5391 5392 5393 5394 5395 5396 5397 5398 5399

	} else {
		/* stopping ftrace calls (just send to ftrace_stub) */
		ftrace_trace_function = ftrace_stub;

		ftrace_shutdown_sysctl();
	}

 out:
S
Steven Rostedt 已提交
5400
	mutex_unlock(&ftrace_lock);
5401
	return ret;
5402
}
5403

5404
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5405

5406 5407 5408 5409 5410 5411 5412
static struct ftrace_ops graph_ops = {
	.func			= ftrace_stub,
	.flags			= FTRACE_OPS_FL_RECURSION_SAFE |
				   FTRACE_OPS_FL_INITIALIZED |
				   FTRACE_OPS_FL_STUB,
#ifdef FTRACE_GRAPH_TRAMP_ADDR
	.trampoline		= FTRACE_GRAPH_TRAMP_ADDR,
5413
	/* trampoline_size is only needed for dynamically allocated tramps */
5414 5415 5416 5417
#endif
	ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
};

5418
static int ftrace_graph_active;
5419

5420 5421 5422 5423 5424
int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
{
	return 0;
}

5425 5426 5427
/* The callbacks that hook a function */
trace_func_graph_ret_t ftrace_graph_return =
			(trace_func_graph_ret_t)ftrace_stub;
5428
trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
5429
static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459

/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
{
	int i;
	int ret = 0;
	unsigned long flags;
	int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
	struct task_struct *g, *t;

	for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
		ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
					* sizeof(struct ftrace_ret_stack),
					GFP_KERNEL);
		if (!ret_stack_list[i]) {
			start = 0;
			end = i;
			ret = -ENOMEM;
			goto free;
		}
	}

	read_lock_irqsave(&tasklist_lock, flags);
	do_each_thread(g, t) {
		if (start == end) {
			ret = -EAGAIN;
			goto unlock;
		}

		if (t->ret_stack == NULL) {
5460
			atomic_set(&t->tracing_graph_pause, 0);
5461
			atomic_set(&t->trace_overrun, 0);
5462 5463 5464 5465
			t->curr_ret_stack = -1;
			/* Make sure the tasks see the -1 first: */
			smp_wmb();
			t->ret_stack = ret_stack_list[start++];
5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476
		}
	} while_each_thread(g, t);

unlock:
	read_unlock_irqrestore(&tasklist_lock, flags);
free:
	for (i = start; i < end; i++)
		kfree(ret_stack_list[i]);
	return ret;
}

5477
static void
5478 5479
ftrace_graph_probe_sched_switch(void *ignore,
			struct task_struct *prev, struct task_struct *next)
5480 5481 5482 5483
{
	unsigned long long timestamp;
	int index;

5484 5485 5486 5487 5488 5489 5490
	/*
	 * Does the user want to count the time a function was asleep.
	 * If so, do not update the time stamps.
	 */
	if (trace_flags & TRACE_ITER_SLEEP_TIME)
		return;

5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508
	timestamp = trace_clock_local();

	prev->ftrace_timestamp = timestamp;

	/* only process tasks that we timestamped */
	if (!next->ftrace_timestamp)
		return;

	/*
	 * Update all the counters in next to make up for the
	 * time next was sleeping.
	 */
	timestamp -= next->ftrace_timestamp;

	for (index = next->curr_ret_stack; index >= 0; index--)
		next->ret_stack[index].calltime += timestamp;
}

5509
/* Allocate a return stack for each task */
5510
static int start_graph_tracing(void)
5511 5512
{
	struct ftrace_ret_stack **ret_stack_list;
5513
	int ret, cpu;
5514 5515 5516 5517 5518 5519 5520 5521

	ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
				sizeof(struct ftrace_ret_stack *),
				GFP_KERNEL);

	if (!ret_stack_list)
		return -ENOMEM;

5522
	/* The cpu_boot init_task->ret_stack will never be freed */
5523 5524
	for_each_online_cpu(cpu) {
		if (!idle_task(cpu)->ret_stack)
5525
			ftrace_graph_init_idle_task(idle_task(cpu), cpu);
5526
	}
5527

5528 5529 5530 5531
	do {
		ret = alloc_retstack_tasklist(ret_stack_list);
	} while (ret == -EAGAIN);

5532
	if (!ret) {
5533
		ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5534 5535 5536 5537 5538
		if (ret)
			pr_info("ftrace_graph: Couldn't activate tracepoint"
				" probe to kernel_sched_switch\n");
	}

5539 5540 5541 5542
	kfree(ret_stack_list);
	return ret;
}

5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563
/*
 * Hibernation protection.
 * The state of the current task is too much unstable during
 * suspend/restore to disk. We want to protect against that.
 */
static int
ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
							void *unused)
{
	switch (state) {
	case PM_HIBERNATION_PREPARE:
		pause_graph_tracing();
		break;

	case PM_POST_HIBERNATION:
		unpause_graph_tracing();
		break;
	}
	return NOTIFY_DONE;
}

5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579
static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
{
	if (!ftrace_ops_test(&global_ops, trace->func, NULL))
		return 0;
	return __ftrace_graph_entry(trace);
}

/*
 * The function graph tracer should only trace the functions defined
 * by set_ftrace_filter and set_ftrace_notrace. If another function
 * tracer ops is registered, the graph tracer requires testing the
 * function against the global ops, and not just trace any function
 * that any ftrace_ops registered.
 */
static void update_function_graph_func(void)
{
5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598
	struct ftrace_ops *op;
	bool do_test = false;

	/*
	 * The graph and global ops share the same set of functions
	 * to test. If any other ops is on the list, then
	 * the graph tracing needs to test if its the function
	 * it should call.
	 */
	do_for_each_ftrace_op(op, ftrace_ops_list) {
		if (op != &global_ops && op != &graph_ops &&
		    op != &ftrace_list_end) {
			do_test = true;
			/* in double loop, break out with goto */
			goto out;
		}
	} while_for_each_ftrace_op(op);
 out:
	if (do_test)
5599
		ftrace_graph_entry = ftrace_graph_entry_test;
5600 5601
	else
		ftrace_graph_entry = __ftrace_graph_entry;
5602 5603
}

5604 5605 5606 5607
static struct notifier_block ftrace_suspend_notifier = {
	.notifier_call = ftrace_suspend_notifier_call,
};

5608 5609
int register_ftrace_graph(trace_func_graph_ret_t retfunc,
			trace_func_graph_ent_t entryfunc)
5610
{
5611 5612
	int ret = 0;

S
Steven Rostedt 已提交
5613
	mutex_lock(&ftrace_lock);
5614

5615
	/* we currently allow only one tracer registered at a time */
5616
	if (ftrace_graph_active) {
5617 5618 5619 5620
		ret = -EBUSY;
		goto out;
	}

5621 5622
	register_pm_notifier(&ftrace_suspend_notifier);

5623
	ftrace_graph_active++;
5624
	ret = start_graph_tracing();
5625
	if (ret) {
5626
		ftrace_graph_active--;
5627 5628
		goto out;
	}
5629

5630
	ftrace_graph_return = retfunc;
5631 5632 5633 5634 5635 5636 5637 5638 5639 5640

	/*
	 * Update the indirect function to the entryfunc, and the
	 * function that gets called to the entry_test first. Then
	 * call the update fgraph entry function to determine if
	 * the entryfunc should be called directly or not.
	 */
	__ftrace_graph_entry = entryfunc;
	ftrace_graph_entry = ftrace_graph_entry_test;
	update_function_graph_func();
5641

5642
	ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
5643
out:
S
Steven Rostedt 已提交
5644
	mutex_unlock(&ftrace_lock);
5645
	return ret;
5646 5647
}

5648
void unregister_ftrace_graph(void)
5649
{
S
Steven Rostedt 已提交
5650
	mutex_lock(&ftrace_lock);
5651

5652
	if (unlikely(!ftrace_graph_active))
5653 5654
		goto out;

5655
	ftrace_graph_active--;
5656
	ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
5657
	ftrace_graph_entry = ftrace_graph_entry_stub;
5658
	__ftrace_graph_entry = ftrace_graph_entry_stub;
5659
	ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
5660
	unregister_pm_notifier(&ftrace_suspend_notifier);
5661
	unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5662

5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673
#ifdef CONFIG_DYNAMIC_FTRACE
	/*
	 * Function graph does not allocate the trampoline, but
	 * other global_ops do. We need to reset the ALLOC_TRAMP flag
	 * if one was used.
	 */
	global_ops.trampoline = save_global_trampoline;
	if (save_global_flags & FTRACE_OPS_FL_ALLOC_TRAMP)
		global_ops.flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
#endif

5674
 out:
S
Steven Rostedt 已提交
5675
	mutex_unlock(&ftrace_lock);
5676
}
5677

5678 5679 5680 5681 5682 5683 5684 5685
static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);

static void
graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
{
	atomic_set(&t->tracing_graph_pause, 0);
	atomic_set(&t->trace_overrun, 0);
	t->ftrace_timestamp = 0;
L
Lucas De Marchi 已提交
5686
	/* make curr_ret_stack visible before we add the ret_stack */
5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720
	smp_wmb();
	t->ret_stack = ret_stack;
}

/*
 * Allocate a return stack for the idle task. May be the first
 * time through, or it may be done by CPU hotplug online.
 */
void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
{
	t->curr_ret_stack = -1;
	/*
	 * The idle task has no parent, it either has its own
	 * stack or no stack at all.
	 */
	if (t->ret_stack)
		WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));

	if (ftrace_graph_active) {
		struct ftrace_ret_stack *ret_stack;

		ret_stack = per_cpu(idle_ret_stack, cpu);
		if (!ret_stack) {
			ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
					    * sizeof(struct ftrace_ret_stack),
					    GFP_KERNEL);
			if (!ret_stack)
				return;
			per_cpu(idle_ret_stack, cpu) = ret_stack;
		}
		graph_init_task(t, ret_stack);
	}
}

5721
/* Allocate a return stack for newly created task */
5722
void ftrace_graph_init_task(struct task_struct *t)
5723
{
5724 5725
	/* Make sure we do not use the parent ret_stack */
	t->ret_stack = NULL;
5726
	t->curr_ret_stack = -1;
5727

5728
	if (ftrace_graph_active) {
5729 5730 5731
		struct ftrace_ret_stack *ret_stack;

		ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
5732 5733
				* sizeof(struct ftrace_ret_stack),
				GFP_KERNEL);
5734
		if (!ret_stack)
5735
			return;
5736
		graph_init_task(t, ret_stack);
5737
	}
5738 5739
}

5740
void ftrace_graph_exit_task(struct task_struct *t)
5741
{
5742 5743
	struct ftrace_ret_stack	*ret_stack = t->ret_stack;

5744
	t->ret_stack = NULL;
5745 5746 5747 5748
	/* NULL must become visible to IRQs before we free it: */
	barrier();

	kfree(ret_stack);
5749
}
5750
#endif