ftrace.c 141.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * Infrastructure for profiling code inserted by 'gcc -pg'.
 *
 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
 *
 * Originally ported from the -rt patch by:
 *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
 *
 * Based on code in the latency_tracer, that is:
 *
 *  Copyright (C) 2004-2006 Ingo Molnar
13
 *  Copyright (C) 2004 Nadia Yvette Chambers
14 15
 */

16 17 18
#include <linux/stop_machine.h>
#include <linux/clocksource.h>
#include <linux/kallsyms.h>
19
#include <linux/seq_file.h>
20
#include <linux/suspend.h>
21
#include <linux/tracefs.h>
22
#include <linux/hardirq.h>
I
Ingo Molnar 已提交
23
#include <linux/kthread.h>
24
#include <linux/uaccess.h>
25
#include <linux/bsearch.h>
26
#include <linux/module.h>
I
Ingo Molnar 已提交
27
#include <linux/ftrace.h>
28
#include <linux/sysctl.h>
29
#include <linux/slab.h>
30
#include <linux/ctype.h>
31
#include <linux/sort.h>
32
#include <linux/list.h>
33
#include <linux/hash.h>
34
#include <linux/rcupdate.h>
35

36
#include <trace/events/sched.h>
37

38
#include <asm/setup.h>
39

40
#include "trace_output.h"
S
Steven Rostedt 已提交
41
#include "trace_stat.h"
42

43
#define FTRACE_WARN_ON(cond)			\
44 45 46
	({					\
		int ___r = cond;		\
		if (WARN_ON(___r))		\
47
			ftrace_kill();		\
48 49
		___r;				\
	})
50 51

#define FTRACE_WARN_ON_ONCE(cond)		\
52 53 54
	({					\
		int ___r = cond;		\
		if (WARN_ON_ONCE(___r))		\
55
			ftrace_kill();		\
56 57
		___r;				\
	})
58

59 60 61
/* hash bits for specific function selection */
#define FTRACE_HASH_BITS 7
#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62 63
#define FTRACE_HASH_DEFAULT_BITS 10
#define FTRACE_HASH_MAX_BITS 12
64

65
#ifdef CONFIG_DYNAMIC_FTRACE
66 67 68
#define INIT_OPS_HASH(opsname)	\
	.func_hash		= &opsname.local_hash,			\
	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
69 70 71
#define ASSIGN_OPS_HASH(opsname, val) \
	.func_hash		= val, \
	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
72
#else
73
#define INIT_OPS_HASH(opsname)
74
#define ASSIGN_OPS_HASH(opsname, val)
75 76
#endif

77 78
static struct ftrace_ops ftrace_list_end __read_mostly = {
	.func		= ftrace_stub,
79
	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
80
	INIT_OPS_HASH(ftrace_list_end)
81 82
};

83 84
/* ftrace_enabled is a method to turn ftrace on or off */
int ftrace_enabled __read_mostly;
85
static int last_ftrace_enabled;
86

87 88
/* Current function tracing op */
struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
89 90
/* What to set function_trace_op to */
static struct ftrace_ops *set_function_trace_op;
91

92
static bool ftrace_pids_enabled(struct ftrace_ops *ops)
93
{
94 95 96 97 98 99 100 101
	struct trace_array *tr;

	if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
		return false;

	tr = ops->private;

	return tr->function_pids != NULL;
102 103 104 105
}

static void ftrace_update_trampoline(struct ftrace_ops *ops);

106 107 108 109 110 111
/*
 * ftrace_disabled is set when an anomaly is discovered.
 * ftrace_disabled is much stronger than ftrace_enabled.
 */
static int ftrace_disabled __read_mostly;

112
static DEFINE_MUTEX(ftrace_lock);
113

114
static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
115
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
116
static struct ftrace_ops global_ops;
117

118 119
#if ARCH_SUPPORTS_FTRACE_OPS
static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
120
				 struct ftrace_ops *op, struct pt_regs *regs);
121 122 123 124 125
#else
/* See comment below, where ftrace_ops_list_func is defined */
static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
#endif
126

127 128
/*
 * Traverse the ftrace_global_list, invoking all entries.  The reason that we
129
 * can use rcu_dereference_raw_notrace() is that elements removed from this list
130
 * are simply leaked, so there is no need to interact with a grace-period
131
 * mechanism.  The rcu_dereference_raw_notrace() calls are needed to handle
132 133 134 135 136
 * concurrent insertions into the ftrace_global_list.
 *
 * Silly Alpha and silly pointer-speculation compiler optimizations!
 */
#define do_for_each_ftrace_op(op, list)			\
137
	op = rcu_dereference_raw_notrace(list);			\
138 139 140 141 142 143
	do

/*
 * Optimized for just a single item in the list (as that is the normal case).
 */
#define while_for_each_ftrace_op(op)				\
144
	while (likely(op = rcu_dereference_raw_notrace((op)->next)) &&	\
145 146
	       unlikely((op) != &ftrace_list_end))

147 148 149 150
static inline void ftrace_ops_init(struct ftrace_ops *ops)
{
#ifdef CONFIG_DYNAMIC_FTRACE
	if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
151 152
		mutex_init(&ops->local_hash.regex_lock);
		ops->func_hash = &ops->local_hash;
153 154 155 156 157
		ops->flags |= FTRACE_OPS_FL_INITIALIZED;
	}
#endif
}

158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
/**
 * ftrace_nr_registered_ops - return number of ops registered
 *
 * Returns the number of ftrace_ops registered and tracing functions
 */
int ftrace_nr_registered_ops(void)
{
	struct ftrace_ops *ops;
	int cnt = 0;

	mutex_lock(&ftrace_lock);

	for (ops = ftrace_ops_list;
	     ops != &ftrace_list_end; ops = ops->next)
		cnt++;

	mutex_unlock(&ftrace_lock);

	return cnt;
}

179
static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
180
			    struct ftrace_ops *op, struct pt_regs *regs)
181
{
182 183 184
	struct trace_array *tr = op->private;

	if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid))
185 186
		return;

187
	op->saved_func(ip, parent_ip, op, regs);
188 189
}

190
/**
191
 * clear_ftrace_function - reset the ftrace function
192
 *
193 194
 * This NULLs the ftrace function and in essence stops
 * tracing.  There may be lag
195
 */
196
void clear_ftrace_function(void)
197
{
198 199 200
	ftrace_trace_function = ftrace_stub;
}

201
static void per_cpu_ops_disable_all(struct ftrace_ops *ops)
202 203 204 205 206 207 208
{
	int cpu;

	for_each_possible_cpu(cpu)
		*per_cpu_ptr(ops->disabled, cpu) = 1;
}

209
static int per_cpu_ops_alloc(struct ftrace_ops *ops)
210 211 212
{
	int __percpu *disabled;

213 214 215
	if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
		return -EINVAL;

216 217 218 219 220
	disabled = alloc_percpu(int);
	if (!disabled)
		return -ENOMEM;

	ops->disabled = disabled;
221
	per_cpu_ops_disable_all(ops);
222 223 224
	return 0;
}

225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
static void ftrace_sync(struct work_struct *work)
{
	/*
	 * This function is just a stub to implement a hard force
	 * of synchronize_sched(). This requires synchronizing
	 * tasks even in userspace and idle.
	 *
	 * Yes, function tracing is rude.
	 */
}

static void ftrace_sync_ipi(void *data)
{
	/* Probably not needed, but do it anyway */
	smp_rmb();
}

242 243
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static void update_function_graph_func(void);
244 245 246 247 248

/* Both enabled by default (can be cleared by function_graph tracer flags */
static bool fgraph_sleep_time = true;
static bool fgraph_graph_time = true;

249 250 251 252
#else
static inline void update_function_graph_func(void) { }
#endif

253 254 255 256

static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
{
	/*
257
	 * If this is a dynamic, RCU, or per CPU ops, or we force list func,
258 259
	 * then it needs to call the list anyway.
	 */
260 261
	if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU |
			  FTRACE_OPS_FL_RCU) || FTRACE_FORCE_LIST_FUNC)
262 263 264 265 266
		return ftrace_ops_list_func;

	return ftrace_ops_get_func(ops);
}

267 268 269 270
static void update_ftrace_function(void)
{
	ftrace_func_t func;

271 272 273 274 275 276 277 278 279 280 281
	/*
	 * Prepare the ftrace_ops that the arch callback will use.
	 * If there's only one ftrace_ops registered, the ftrace_ops_list
	 * will point to the ops we want.
	 */
	set_function_trace_op = ftrace_ops_list;

	/* If there's no ftrace_ops registered, just call the stub function */
	if (ftrace_ops_list == &ftrace_list_end) {
		func = ftrace_stub;

282 283
	/*
	 * If we are at the end of the list and this ops is
284 285
	 * recursion safe and not dynamic and the arch supports passing ops,
	 * then have the mcount trampoline call the function directly.
286
	 */
287
	} else if (ftrace_ops_list->next == &ftrace_list_end) {
288
		func = ftrace_ops_get_list_func(ftrace_ops_list);
289

290 291
	} else {
		/* Just use the default ftrace_ops */
292
		set_function_trace_op = &ftrace_list_end;
293
		func = ftrace_ops_list_func;
294
	}
295

296 297
	update_function_graph_func();

298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
	/* If there's no change, then do nothing more here */
	if (ftrace_trace_function == func)
		return;

	/*
	 * If we are using the list function, it doesn't care
	 * about the function_trace_ops.
	 */
	if (func == ftrace_ops_list_func) {
		ftrace_trace_function = func;
		/*
		 * Don't even bother setting function_trace_ops,
		 * it would be racy to do so anyway.
		 */
		return;
	}

#ifndef CONFIG_DYNAMIC_FTRACE
	/*
	 * For static tracing, we need to be a bit more careful.
	 * The function change takes affect immediately. Thus,
	 * we need to coorditate the setting of the function_trace_ops
	 * with the setting of the ftrace_trace_function.
	 *
	 * Set the function to the list ops, which will call the
	 * function we want, albeit indirectly, but it handles the
	 * ftrace_ops and doesn't depend on function_trace_op.
	 */
	ftrace_trace_function = ftrace_ops_list_func;
	/*
	 * Make sure all CPUs see this. Yes this is slow, but static
	 * tracing is slow and nasty to have enabled.
	 */
	schedule_on_each_cpu(ftrace_sync);
	/* Now all cpus are using the list ops. */
	function_trace_op = set_function_trace_op;
	/* Make sure the function_trace_op is visible on all CPUs */
	smp_wmb();
	/* Nasty way to force a rmb on all cpus */
	smp_call_function(ftrace_sync_ipi, NULL, 1);
	/* OK, we are all set to update the ftrace_trace_function now! */
#endif /* !CONFIG_DYNAMIC_FTRACE */

341 342 343
	ftrace_trace_function = func;
}

344 345 346 347 348
int using_ftrace_ops_list_func(void)
{
	return ftrace_trace_function == ftrace_ops_list_func;
}

349
static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
350
{
351
	ops->next = *list;
352
	/*
353
	 * We are entering ops into the list but another
354 355
	 * CPU might be walking that list. We need to make sure
	 * the ops->next pointer is valid before another CPU sees
356
	 * the ops pointer included into the list.
357
	 */
358
	rcu_assign_pointer(*list, ops);
359 360
}

361
static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
362 363 364 365
{
	struct ftrace_ops **p;

	/*
366 367
	 * If we are removing the last function, then simply point
	 * to the ftrace_stub.
368
	 */
369 370
	if (*list == ops && ops->next == &ftrace_list_end) {
		*list = &ftrace_list_end;
S
Steven Rostedt 已提交
371
		return 0;
372 373
	}

374
	for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
375 376 377
		if (*p == ops)
			break;

S
Steven Rostedt 已提交
378 379
	if (*p != ops)
		return -1;
380 381

	*p = (*p)->next;
382 383
	return 0;
}
384

385 386
static void ftrace_update_trampoline(struct ftrace_ops *ops);

387 388
static int __register_ftrace_function(struct ftrace_ops *ops)
{
389 390 391
	if (ops->flags & FTRACE_OPS_FL_DELETED)
		return -EINVAL;

392 393 394
	if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
		return -EBUSY;

395
#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
396 397 398 399 400 401 402 403 404 405 406 407 408
	/*
	 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
	 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
	 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
	 */
	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
	    !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
		return -EINVAL;

	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
		ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
#endif

409 410 411
	if (!core_kernel_data((unsigned long)ops))
		ops->flags |= FTRACE_OPS_FL_DYNAMIC;

412 413
	if (ops->flags & FTRACE_OPS_FL_PER_CPU) {
		if (per_cpu_ops_alloc(ops))
414
			return -ENOMEM;
415 416 417
	}

	add_ftrace_ops(&ftrace_ops_list, ops);
418

419 420 421
	/* Always save the function, and reset at unregistering */
	ops->saved_func = ops->func;

422
	if (ftrace_pids_enabled(ops))
423 424
		ops->func = ftrace_pid_func;

425 426
	ftrace_update_trampoline(ops);

427 428 429 430 431 432 433 434 435 436
	if (ftrace_enabled)
		update_ftrace_function();

	return 0;
}

static int __unregister_ftrace_function(struct ftrace_ops *ops)
{
	int ret;

437 438 439
	if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
		return -EBUSY;

440
	ret = remove_ftrace_ops(&ftrace_ops_list, ops);
441

442 443
	if (ret < 0)
		return ret;
444

445 446
	if (ftrace_enabled)
		update_ftrace_function();
447

448 449
	ops->func = ops->saved_func;

S
Steven Rostedt 已提交
450
	return 0;
451 452
}

453 454
static void ftrace_update_pid_func(void)
{
455 456
	struct ftrace_ops *op;

457
	/* Only do something if we are tracing something */
458
	if (ftrace_trace_function == ftrace_stub)
459
		return;
460

461 462
	do_for_each_ftrace_op(op, ftrace_ops_list) {
		if (op->flags & FTRACE_OPS_FL_PID) {
463 464
			op->func = ftrace_pids_enabled(op) ?
				ftrace_pid_func : op->saved_func;
465 466 467 468
			ftrace_update_trampoline(op);
		}
	} while_for_each_ftrace_op(op);

469
	update_ftrace_function();
470 471
}

472 473 474 475 476
#ifdef CONFIG_FUNCTION_PROFILER
struct ftrace_profile {
	struct hlist_node		node;
	unsigned long			ip;
	unsigned long			counter;
477 478
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	unsigned long long		time;
479
	unsigned long long		time_squared;
480
#endif
481 482
};

483 484 485 486
struct ftrace_profile_page {
	struct ftrace_profile_page	*next;
	unsigned long			index;
	struct ftrace_profile		records[];
487 488
};

489 490 491 492 493 494 495 496
struct ftrace_profile_stat {
	atomic_t			disabled;
	struct hlist_head		*hash;
	struct ftrace_profile_page	*pages;
	struct ftrace_profile_page	*start;
	struct tracer_stat		stat;
};

497 498
#define PROFILE_RECORDS_SIZE						\
	(PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
499

500 501
#define PROFILES_PER_PAGE					\
	(PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
502

503 504 505
static int ftrace_profile_enabled __read_mostly;

/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
S
Steven Rostedt 已提交
506 507
static DEFINE_MUTEX(ftrace_profile_lock);

508
static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
509

510 511
#define FTRACE_PROFILE_HASH_BITS 10
#define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
512

S
Steven Rostedt 已提交
513 514 515
static void *
function_stat_next(void *v, int idx)
{
516 517
	struct ftrace_profile *rec = v;
	struct ftrace_profile_page *pg;
S
Steven Rostedt 已提交
518

519
	pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
S
Steven Rostedt 已提交
520 521

 again:
L
Li Zefan 已提交
522 523 524
	if (idx != 0)
		rec++;

S
Steven Rostedt 已提交
525 526 527 528 529
	if ((void *)rec >= (void *)&pg->records[pg->index]) {
		pg = pg->next;
		if (!pg)
			return NULL;
		rec = &pg->records[0];
530 531
		if (!rec->counter)
			goto again;
S
Steven Rostedt 已提交
532 533 534 535 536 537 538
	}

	return rec;
}

static void *function_stat_start(struct tracer_stat *trace)
{
539 540 541 542 543 544 545
	struct ftrace_profile_stat *stat =
		container_of(trace, struct ftrace_profile_stat, stat);

	if (!stat || !stat->start)
		return NULL;

	return function_stat_next(&stat->start->records[0], 0);
S
Steven Rostedt 已提交
546 547
}

548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* function graph compares on total time */
static int function_stat_cmp(void *p1, void *p2)
{
	struct ftrace_profile *a = p1;
	struct ftrace_profile *b = p2;

	if (a->time < b->time)
		return -1;
	if (a->time > b->time)
		return 1;
	else
		return 0;
}
#else
/* not function graph compares against hits */
S
Steven Rostedt 已提交
564 565
static int function_stat_cmp(void *p1, void *p2)
{
566 567
	struct ftrace_profile *a = p1;
	struct ftrace_profile *b = p2;
S
Steven Rostedt 已提交
568 569 570 571 572 573 574 575

	if (a->counter < b->counter)
		return -1;
	if (a->counter > b->counter)
		return 1;
	else
		return 0;
}
576
#endif
S
Steven Rostedt 已提交
577 578 579

static int function_stat_headers(struct seq_file *m)
{
580
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
581 582 583 584
	seq_puts(m, "  Function                               "
		 "Hit    Time            Avg             s^2\n"
		    "  --------                               "
		 "---    ----            ---             ---\n");
585
#else
586 587
	seq_puts(m, "  Function                               Hit\n"
		    "  --------                               ---\n");
588
#endif
S
Steven Rostedt 已提交
589 590 591 592 593
	return 0;
}

static int function_stat_show(struct seq_file *m, void *v)
{
594
	struct ftrace_profile *rec = v;
S
Steven Rostedt 已提交
595
	char str[KSYM_SYMBOL_LEN];
596
	int ret = 0;
597
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
598 599
	static struct trace_seq s;
	unsigned long long avg;
600
	unsigned long long stddev;
601
#endif
602 603 604 605 606 607 608
	mutex_lock(&ftrace_profile_lock);

	/* we raced with function_profile_reset() */
	if (unlikely(rec->counter == 0)) {
		ret = -EBUSY;
		goto out;
	}
S
Steven Rostedt 已提交
609

610 611 612 613 614 615 616
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	avg = rec->time;
	do_div(avg, rec->counter);
	if (tracing_thresh && (avg < tracing_thresh))
		goto out;
#endif

S
Steven Rostedt 已提交
617
	kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
618 619 620
	seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
621
	seq_puts(m, "    ");
622

623 624 625 626
	/* Sample standard deviation (s^2) */
	if (rec->counter <= 1)
		stddev = 0;
	else {
627 628 629 630 631 632 633
		/*
		 * Apply Welford's method:
		 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
		 */
		stddev = rec->counter * rec->time_squared -
			 rec->time * rec->time;

634 635 636 637
		/*
		 * Divide only 1000 for ns^2 -> us^2 conversion.
		 * trace_print_graph_duration will divide 1000 again.
		 */
638
		do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
639 640
	}

641 642 643 644
	trace_seq_init(&s);
	trace_print_graph_duration(rec->time, &s);
	trace_seq_puts(&s, "    ");
	trace_print_graph_duration(avg, &s);
645 646
	trace_seq_puts(&s, "    ");
	trace_print_graph_duration(stddev, &s);
647 648 649
	trace_print_seq(m, &s);
#endif
	seq_putc(m, '\n');
650 651
out:
	mutex_unlock(&ftrace_profile_lock);
S
Steven Rostedt 已提交
652

653
	return ret;
S
Steven Rostedt 已提交
654 655
}

656
static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
S
Steven Rostedt 已提交
657
{
658
	struct ftrace_profile_page *pg;
S
Steven Rostedt 已提交
659

660
	pg = stat->pages = stat->start;
S
Steven Rostedt 已提交
661

662 663 664 665
	while (pg) {
		memset(pg->records, 0, PROFILE_RECORDS_SIZE);
		pg->index = 0;
		pg = pg->next;
S
Steven Rostedt 已提交
666 667
	}

668
	memset(stat->hash, 0,
669 670
	       FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
}
S
Steven Rostedt 已提交
671

672
int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
673 674
{
	struct ftrace_profile_page *pg;
675 676
	int functions;
	int pages;
677
	int i;
S
Steven Rostedt 已提交
678

679
	/* If we already allocated, do nothing */
680
	if (stat->pages)
681
		return 0;
S
Steven Rostedt 已提交
682

683 684
	stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
	if (!stat->pages)
685
		return -ENOMEM;
S
Steven Rostedt 已提交
686

687 688 689 690 691 692 693 694 695 696 697 698 699
#ifdef CONFIG_DYNAMIC_FTRACE
	functions = ftrace_update_tot_cnt;
#else
	/*
	 * We do not know the number of functions that exist because
	 * dynamic tracing is what counts them. With past experience
	 * we have around 20K functions. That should be more than enough.
	 * It is highly unlikely we will execute every function in
	 * the kernel.
	 */
	functions = 20000;
#endif

700
	pg = stat->start = stat->pages;
S
Steven Rostedt 已提交
701

702 703
	pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);

704
	for (i = 1; i < pages; i++) {
705 706
		pg->next = (void *)get_zeroed_page(GFP_KERNEL);
		if (!pg->next)
707
			goto out_free;
708 709 710 711
		pg = pg->next;
	}

	return 0;
712 713 714 715 716 717 718 719 720 721 722 723 724 725

 out_free:
	pg = stat->start;
	while (pg) {
		unsigned long tmp = (unsigned long)pg;

		pg = pg->next;
		free_page(tmp);
	}

	stat->pages = NULL;
	stat->start = NULL;

	return -ENOMEM;
S
Steven Rostedt 已提交
726 727
}

728
static int ftrace_profile_init_cpu(int cpu)
S
Steven Rostedt 已提交
729
{
730
	struct ftrace_profile_stat *stat;
731
	int size;
S
Steven Rostedt 已提交
732

733 734 735
	stat = &per_cpu(ftrace_profile_stats, cpu);

	if (stat->hash) {
736
		/* If the profile is already created, simply reset it */
737
		ftrace_profile_reset(stat);
738 739
		return 0;
	}
S
Steven Rostedt 已提交
740

741 742 743 744 745
	/*
	 * We are profiling all functions, but usually only a few thousand
	 * functions are hit. We'll make a hash of 1024 items.
	 */
	size = FTRACE_PROFILE_HASH_SIZE;
S
Steven Rostedt 已提交
746

747
	stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
748

749
	if (!stat->hash)
750 751
		return -ENOMEM;

752
	/* Preallocate the function profiling pages */
753 754 755
	if (ftrace_profile_pages_init(stat) < 0) {
		kfree(stat->hash);
		stat->hash = NULL;
756 757 758 759
		return -ENOMEM;
	}

	return 0;
S
Steven Rostedt 已提交
760 761
}

762 763 764 765 766
static int ftrace_profile_init(void)
{
	int cpu;
	int ret = 0;

767
	for_each_possible_cpu(cpu) {
768 769 770 771 772 773 774 775
		ret = ftrace_profile_init_cpu(cpu);
		if (ret)
			break;
	}

	return ret;
}

776
/* interrupts must be disabled */
777 778
static struct ftrace_profile *
ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
S
Steven Rostedt 已提交
779
{
780
	struct ftrace_profile *rec;
S
Steven Rostedt 已提交
781 782 783
	struct hlist_head *hhd;
	unsigned long key;

784
	key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
785
	hhd = &stat->hash[key];
S
Steven Rostedt 已提交
786 787 788 789

	if (hlist_empty(hhd))
		return NULL;

790
	hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
S
Steven Rostedt 已提交
791
		if (rec->ip == ip)
792 793 794 795 796 797
			return rec;
	}

	return NULL;
}

798 799
static void ftrace_add_profile(struct ftrace_profile_stat *stat,
			       struct ftrace_profile *rec)
800 801 802
{
	unsigned long key;

803
	key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
804
	hlist_add_head_rcu(&rec->node, &stat->hash[key]);
805 806
}

807 808 809
/*
 * The memory is already allocated, this simply finds a new record to use.
 */
810
static struct ftrace_profile *
811
ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
812 813 814
{
	struct ftrace_profile *rec = NULL;

815
	/* prevent recursion (from NMIs) */
816
	if (atomic_inc_return(&stat->disabled) != 1)
817 818 819
		goto out;

	/*
820 821
	 * Try to find the function again since an NMI
	 * could have added it
822
	 */
823
	rec = ftrace_find_profiled_func(stat, ip);
824
	if (rec)
825
		goto out;
826

827 828 829 830
	if (stat->pages->index == PROFILES_PER_PAGE) {
		if (!stat->pages->next)
			goto out;
		stat->pages = stat->pages->next;
S
Steven Rostedt 已提交
831
	}
832

833
	rec = &stat->pages->records[stat->pages->index++];
834
	rec->ip = ip;
835
	ftrace_add_profile(stat, rec);
836

S
Steven Rostedt 已提交
837
 out:
838
	atomic_dec(&stat->disabled);
S
Steven Rostedt 已提交
839 840 841 842 843

	return rec;
}

static void
844
function_profile_call(unsigned long ip, unsigned long parent_ip,
845
		      struct ftrace_ops *ops, struct pt_regs *regs)
S
Steven Rostedt 已提交
846
{
847
	struct ftrace_profile_stat *stat;
848
	struct ftrace_profile *rec;
S
Steven Rostedt 已提交
849 850 851 852 853 854
	unsigned long flags;

	if (!ftrace_profile_enabled)
		return;

	local_irq_save(flags);
855

856
	stat = this_cpu_ptr(&ftrace_profile_stats);
857
	if (!stat->hash || !ftrace_profile_enabled)
858 859 860
		goto out;

	rec = ftrace_find_profiled_func(stat, ip);
861
	if (!rec) {
862
		rec = ftrace_profile_alloc(stat, ip);
863 864 865
		if (!rec)
			goto out;
	}
S
Steven Rostedt 已提交
866 867 868 869 870 871

	rec->counter++;
 out:
	local_irq_restore(flags);
}

872 873 874
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static int profile_graph_entry(struct ftrace_graph_ent *trace)
{
875 876
	int index = trace->depth;

877
	function_profile_call(trace->func, 0, NULL, NULL);
878 879 880 881

	if (index >= 0 && index < FTRACE_RETFUNC_DEPTH)
		current->ret_stack[index].subtime = 0;

882 883 884 885 886
	return 1;
}

static void profile_graph_return(struct ftrace_graph_ret *trace)
{
887
	struct ftrace_profile_stat *stat;
888
	unsigned long long calltime;
889
	struct ftrace_profile *rec;
890
	unsigned long flags;
891 892

	local_irq_save(flags);
893
	stat = this_cpu_ptr(&ftrace_profile_stats);
894
	if (!stat->hash || !ftrace_profile_enabled)
895 896
		goto out;

897 898 899 900
	/* If the calltime was zero'd ignore it */
	if (!trace->calltime)
		goto out;

901 902
	calltime = trace->rettime - trace->calltime;

903
	if (!fgraph_graph_time) {
904 905 906 907 908 909 910 911 912 913 914 915 916 917
		int index;

		index = trace->depth;

		/* Append this call time to the parent time to subtract */
		if (index)
			current->ret_stack[index - 1].subtime += calltime;

		if (current->ret_stack[index].subtime < calltime)
			calltime -= current->ret_stack[index].subtime;
		else
			calltime = 0;
	}

918
	rec = ftrace_find_profiled_func(stat, trace->func);
919
	if (rec) {
920
		rec->time += calltime;
921 922
		rec->time_squared += calltime * calltime;
	}
923

924
 out:
925 926 927 928 929 930 931 932 933 934 935 936 937 938
	local_irq_restore(flags);
}

static int register_ftrace_profiler(void)
{
	return register_ftrace_graph(&profile_graph_return,
				     &profile_graph_entry);
}

static void unregister_ftrace_profiler(void)
{
	unregister_ftrace_graph();
}
#else
939
static struct ftrace_ops ftrace_profile_ops __read_mostly = {
940
	.func		= function_profile_call,
941
	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
942
	INIT_OPS_HASH(ftrace_profile_ops)
S
Steven Rostedt 已提交
943 944
};

945 946 947 948 949 950 951 952 953 954 955
static int register_ftrace_profiler(void)
{
	return register_ftrace_function(&ftrace_profile_ops);
}

static void unregister_ftrace_profiler(void)
{
	unregister_ftrace_function(&ftrace_profile_ops);
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */

S
Steven Rostedt 已提交
956 957 958 959 960 961 962
static ssize_t
ftrace_profile_write(struct file *filp, const char __user *ubuf,
		     size_t cnt, loff_t *ppos)
{
	unsigned long val;
	int ret;

963 964
	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
	if (ret)
S
Steven Rostedt 已提交
965 966 967 968 969 970 971
		return ret;

	val = !!val;

	mutex_lock(&ftrace_profile_lock);
	if (ftrace_profile_enabled ^ val) {
		if (val) {
972 973 974 975 976 977
			ret = ftrace_profile_init();
			if (ret < 0) {
				cnt = ret;
				goto out;
			}

978 979 980 981 982
			ret = register_ftrace_profiler();
			if (ret < 0) {
				cnt = ret;
				goto out;
			}
S
Steven Rostedt 已提交
983 984 985
			ftrace_profile_enabled = 1;
		} else {
			ftrace_profile_enabled = 0;
986 987 988 989
			/*
			 * unregister_ftrace_profiler calls stop_machine
			 * so this acts like an synchronize_sched.
			 */
990
			unregister_ftrace_profiler();
S
Steven Rostedt 已提交
991 992
		}
	}
993
 out:
S
Steven Rostedt 已提交
994 995
	mutex_unlock(&ftrace_profile_lock);

996
	*ppos += cnt;
S
Steven Rostedt 已提交
997 998 999 1000

	return cnt;
}

1001 1002 1003 1004
static ssize_t
ftrace_profile_read(struct file *filp, char __user *ubuf,
		     size_t cnt, loff_t *ppos)
{
1005
	char buf[64];		/* big enough to hold a number */
1006 1007 1008 1009 1010 1011
	int r;

	r = sprintf(buf, "%u\n", ftrace_profile_enabled);
	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}

S
Steven Rostedt 已提交
1012 1013 1014 1015
static const struct file_operations ftrace_profile_fops = {
	.open		= tracing_open_generic,
	.read		= ftrace_profile_read,
	.write		= ftrace_profile_write,
1016
	.llseek		= default_llseek,
S
Steven Rostedt 已提交
1017 1018
};

1019 1020
/* used to initialize the real stat files */
static struct tracer_stat function_stats __initdata = {
1021 1022 1023 1024 1025 1026
	.name		= "functions",
	.stat_start	= function_stat_start,
	.stat_next	= function_stat_next,
	.stat_cmp	= function_stat_cmp,
	.stat_headers	= function_stat_headers,
	.stat_show	= function_stat_show
1027 1028
};

1029
static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
S
Steven Rostedt 已提交
1030
{
1031
	struct ftrace_profile_stat *stat;
S
Steven Rostedt 已提交
1032
	struct dentry *entry;
1033
	char *name;
S
Steven Rostedt 已提交
1034
	int ret;
1035 1036 1037 1038 1039
	int cpu;

	for_each_possible_cpu(cpu) {
		stat = &per_cpu(ftrace_profile_stats, cpu);

1040
		name = kasprintf(GFP_KERNEL, "function%d", cpu);
1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
		if (!name) {
			/*
			 * The files created are permanent, if something happens
			 * we still do not free memory.
			 */
			WARN(1,
			     "Could not allocate stat file for cpu %d\n",
			     cpu);
			return;
		}
		stat->stat = function_stats;
		stat->stat.name = name;
		ret = register_stat_tracer(&stat->stat);
		if (ret) {
			WARN(1,
			     "Could not register function stat for cpu %d\n",
			     cpu);
			kfree(name);
			return;
		}
S
Steven Rostedt 已提交
1061 1062
	}

1063
	entry = tracefs_create_file("function_profile_enabled", 0644,
S
Steven Rostedt 已提交
1064 1065
				    d_tracer, NULL, &ftrace_profile_fops);
	if (!entry)
1066
		pr_warn("Could not create tracefs 'function_profile_enabled' entry\n");
S
Steven Rostedt 已提交
1067 1068 1069
}

#else /* CONFIG_FUNCTION_PROFILER */
1070
static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
S
Steven Rostedt 已提交
1071 1072 1073 1074
{
}
#endif /* CONFIG_FUNCTION_PROFILER */

1075 1076
static struct pid * const ftrace_swapper_pid = &init_struct_pid;

1077 1078 1079 1080 1081 1082
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static int ftrace_graph_active;
#else
# define ftrace_graph_active 0
#endif

1083 1084
#ifdef CONFIG_DYNAMIC_FTRACE

1085 1086
static struct ftrace_ops *removed_ops;

1087 1088 1089 1090 1091 1092
/*
 * Set when doing a global update, like enabling all recs or disabling them.
 * It is not set when just updating a single ftrace_ops.
 */
static bool update_all_ops;

1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
#ifndef CONFIG_FTRACE_MCOUNT_RECORD
# error Dynamic ftrace depends on MCOUNT_RECORD
#endif

static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;

struct ftrace_func_probe {
	struct hlist_node	node;
	struct ftrace_probe_ops	*ops;
	unsigned long		flags;
	unsigned long		ip;
	void			*data;
1105
	struct list_head	free_list;
1106 1107
};

1108 1109 1110 1111 1112 1113 1114 1115 1116
struct ftrace_func_entry {
	struct hlist_node hlist;
	unsigned long ip;
};

struct ftrace_hash {
	unsigned long		size_bits;
	struct hlist_head	*buckets;
	unsigned long		count;
1117
	struct rcu_head		rcu;
1118 1119
};

1120 1121 1122 1123 1124 1125 1126 1127 1128
/*
 * We make these constant because no one should touch them,
 * but they are used as the default "empty hash", to avoid allocating
 * it all the time. These are in a read only section such that if
 * anyone does try to modify it, it will cause an exception.
 */
static const struct hlist_head empty_buckets[1];
static const struct ftrace_hash empty_hash = {
	.buckets = (struct hlist_head *)empty_buckets,
1129
};
1130
#define EMPTY_HASH	((struct ftrace_hash *)&empty_hash)
1131

1132
static struct ftrace_ops global_ops = {
1133 1134 1135 1136 1137
	.func				= ftrace_stub,
	.local_hash.notrace_hash	= EMPTY_HASH,
	.local_hash.filter_hash		= EMPTY_HASH,
	INIT_OPS_HASH(global_ops)
	.flags				= FTRACE_OPS_FL_RECURSION_SAFE |
1138 1139
					  FTRACE_OPS_FL_INITIALIZED |
					  FTRACE_OPS_FL_PID,
1140 1141
};

1142 1143
/*
 * This is used by __kernel_text_address() to return true if the
1144
 * address is on a dynamically allocated trampoline that would
1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178
 * not return true for either core_kernel_text() or
 * is_module_text_address().
 */
bool is_ftrace_trampoline(unsigned long addr)
{
	struct ftrace_ops *op;
	bool ret = false;

	/*
	 * Some of the ops may be dynamically allocated,
	 * they are freed after a synchronize_sched().
	 */
	preempt_disable_notrace();

	do_for_each_ftrace_op(op, ftrace_ops_list) {
		/*
		 * This is to check for dynamically allocated trampolines.
		 * Trampolines that are in kernel text will have
		 * core_kernel_text() return true.
		 */
		if (op->trampoline && op->trampoline_size)
			if (addr >= op->trampoline &&
			    addr < op->trampoline + op->trampoline_size) {
				ret = true;
				goto out;
			}
	} while_for_each_ftrace_op(op);

 out:
	preempt_enable_notrace();

	return ret;
}

1179 1180
struct ftrace_page {
	struct ftrace_page	*next;
1181
	struct dyn_ftrace	*records;
1182
	int			index;
1183
	int			size;
1184 1185
};

1186 1187
#define ENTRY_SIZE sizeof(struct dyn_ftrace)
#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1188 1189 1190 1191 1192 1193 1194

/* estimate from running different kernels */
#define NR_TO_INIT		10000

static struct ftrace_page	*ftrace_pages_start;
static struct ftrace_page	*ftrace_pages;

1195
static bool __always_inline ftrace_hash_empty(struct ftrace_hash *hash)
1196 1197 1198 1199
{
	return !hash || !hash->count;
}

1200 1201 1202 1203 1204 1205 1206
static struct ftrace_func_entry *
ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
{
	unsigned long key;
	struct ftrace_func_entry *entry;
	struct hlist_head *hhd;

1207
	if (ftrace_hash_empty(hash))
1208 1209 1210 1211 1212 1213 1214 1215 1216
		return NULL;

	if (hash->size_bits > 0)
		key = hash_long(ip, hash->size_bits);
	else
		key = 0;

	hhd = &hash->buckets[key];

1217
	hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1218 1219 1220 1221 1222 1223
		if (entry->ip == ip)
			return entry;
	}
	return NULL;
}

1224 1225
static void __add_hash_entry(struct ftrace_hash *hash,
			     struct ftrace_func_entry *entry)
1226 1227 1228 1229 1230
{
	struct hlist_head *hhd;
	unsigned long key;

	if (hash->size_bits)
1231
		key = hash_long(entry->ip, hash->size_bits);
1232 1233 1234 1235 1236 1237
	else
		key = 0;

	hhd = &hash->buckets[key];
	hlist_add_head(&entry->hlist, hhd);
	hash->count++;
1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249
}

static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
{
	struct ftrace_func_entry *entry;

	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
	if (!entry)
		return -ENOMEM;

	entry->ip = ip;
	__add_hash_entry(hash, entry);
1250 1251 1252 1253 1254

	return 0;
}

static void
1255
free_hash_entry(struct ftrace_hash *hash,
1256 1257 1258 1259 1260 1261 1262
		  struct ftrace_func_entry *entry)
{
	hlist_del(&entry->hlist);
	kfree(entry);
	hash->count--;
}

1263 1264 1265 1266 1267 1268 1269 1270
static void
remove_hash_entry(struct ftrace_hash *hash,
		  struct ftrace_func_entry *entry)
{
	hlist_del(&entry->hlist);
	hash->count--;
}

1271 1272 1273
static void ftrace_hash_clear(struct ftrace_hash *hash)
{
	struct hlist_head *hhd;
1274
	struct hlist_node *tn;
1275 1276 1277 1278
	struct ftrace_func_entry *entry;
	int size = 1 << hash->size_bits;
	int i;

1279 1280 1281
	if (!hash->count)
		return;

1282 1283
	for (i = 0; i < size; i++) {
		hhd = &hash->buckets[i];
1284
		hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1285
			free_hash_entry(hash, entry);
1286 1287 1288 1289
	}
	FTRACE_WARN_ON(hash->count);
}

1290 1291 1292 1293 1294 1295 1296 1297 1298
static void free_ftrace_hash(struct ftrace_hash *hash)
{
	if (!hash || hash == EMPTY_HASH)
		return;
	ftrace_hash_clear(hash);
	kfree(hash->buckets);
	kfree(hash);
}

1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313
static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
{
	struct ftrace_hash *hash;

	hash = container_of(rcu, struct ftrace_hash, rcu);
	free_ftrace_hash(hash);
}

static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
{
	if (!hash || hash == EMPTY_HASH)
		return;
	call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
}

1314 1315
void ftrace_free_filter(struct ftrace_ops *ops)
{
1316
	ftrace_ops_init(ops);
1317 1318
	free_ftrace_hash(ops->func_hash->filter_hash);
	free_ftrace_hash(ops->func_hash->notrace_hash);
1319 1320
}

1321 1322 1323 1324 1325 1326 1327 1328 1329 1330
static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
{
	struct ftrace_hash *hash;
	int size;

	hash = kzalloc(sizeof(*hash), GFP_KERNEL);
	if (!hash)
		return NULL;

	size = 1 << size_bits;
1331
	hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356

	if (!hash->buckets) {
		kfree(hash);
		return NULL;
	}

	hash->size_bits = size_bits;

	return hash;
}

static struct ftrace_hash *
alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
{
	struct ftrace_func_entry *entry;
	struct ftrace_hash *new_hash;
	int size;
	int ret;
	int i;

	new_hash = alloc_ftrace_hash(size_bits);
	if (!new_hash)
		return NULL;

	/* Empty hash? */
1357
	if (ftrace_hash_empty(hash))
1358 1359 1360 1361
		return new_hash;

	size = 1 << hash->size_bits;
	for (i = 0; i < size; i++) {
1362
		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377
			ret = add_hash_entry(new_hash, entry->ip);
			if (ret < 0)
				goto free_hash;
		}
	}

	FTRACE_WARN_ON(new_hash->count != hash->count);

	return new_hash;

 free_hash:
	free_ftrace_hash(new_hash);
	return NULL;
}

1378
static void
1379
ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
1380
static void
1381
ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
1382

1383 1384 1385
static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
				       struct ftrace_hash *new_hash);

1386
static int
1387 1388
ftrace_hash_move(struct ftrace_ops *ops, int enable,
		 struct ftrace_hash **dst, struct ftrace_hash *src)
1389 1390
{
	struct ftrace_func_entry *entry;
1391
	struct hlist_node *tn;
1392
	struct hlist_head *hhd;
1393
	struct ftrace_hash *new_hash;
1394 1395
	int size = src->count;
	int bits = 0;
1396
	int ret;
1397 1398
	int i;

1399 1400 1401 1402
	/* Reject setting notrace hash on IPMODIFY ftrace_ops */
	if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
		return -EINVAL;

1403 1404 1405 1406 1407
	/*
	 * If the new source is empty, just free dst and assign it
	 * the empty_hash.
	 */
	if (!src->count) {
1408 1409
		new_hash = EMPTY_HASH;
		goto update;
1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421
	}

	/*
	 * Make the hash size about 1/2 the # found
	 */
	for (size /= 2; size; size >>= 1)
		bits++;

	/* Don't allocate too much */
	if (bits > FTRACE_HASH_MAX_BITS)
		bits = FTRACE_HASH_MAX_BITS;

1422 1423
	new_hash = alloc_ftrace_hash(bits);
	if (!new_hash)
1424
		return -ENOMEM;
1425 1426 1427 1428

	size = 1 << src->size_bits;
	for (i = 0; i < size; i++) {
		hhd = &src->buckets[i];
1429
		hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1430
			remove_hash_entry(src, entry);
1431
			__add_hash_entry(new_hash, entry);
1432 1433 1434
		}
	}

1435
update:
1436 1437 1438 1439 1440 1441 1442 1443 1444 1445
	/* Make sure this can be applied if it is IPMODIFY ftrace_ops */
	if (enable) {
		/* IPMODIFY should be updated only when filter_hash updating */
		ret = ftrace_hash_ipmodify_update(ops, new_hash);
		if (ret < 0) {
			free_ftrace_hash(new_hash);
			return ret;
		}
	}

1446 1447 1448 1449
	/*
	 * Remove the current set, update the hash and add
	 * them back.
	 */
1450
	ftrace_hash_rec_disable_modify(ops, enable);
1451

1452 1453
	rcu_assign_pointer(*dst, new_hash);

1454
	ftrace_hash_rec_enable_modify(ops, enable);
1455

1456
	return 0;
1457 1458
}

1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473
static bool hash_contains_ip(unsigned long ip,
			     struct ftrace_ops_hash *hash)
{
	/*
	 * The function record is a match if it exists in the filter
	 * hash and not in the notrace hash. Note, an emty hash is
	 * considered a match for the filter hash, but an empty
	 * notrace hash is considered not in the notrace hash.
	 */
	return (ftrace_hash_empty(hash->filter_hash) ||
		ftrace_lookup_ip(hash->filter_hash, ip)) &&
		(ftrace_hash_empty(hash->notrace_hash) ||
		 !ftrace_lookup_ip(hash->notrace_hash, ip));
}

1474 1475 1476 1477 1478 1479 1480 1481
/*
 * Test the hashes for this ops to see if we want to call
 * the ops->func or not.
 *
 * It's a match if the ip is in the ops->filter_hash or
 * the filter_hash does not exist or is empty,
 *  AND
 * the ip is not in the ops->notrace_hash.
1482 1483 1484
 *
 * This needs to be called with preemption disabled as
 * the hashes are freed with call_rcu_sched().
1485 1486
 */
static int
1487
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1488
{
1489
	struct ftrace_ops_hash hash;
1490 1491
	int ret;

1492 1493 1494 1495 1496 1497 1498 1499 1500 1501
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
	/*
	 * There's a small race when adding ops that the ftrace handler
	 * that wants regs, may be called without them. We can not
	 * allow that handler to be called if regs is NULL.
	 */
	if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
		return 0;
#endif

1502 1503
	hash.filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
	hash.notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
1504

1505
	if (hash_contains_ip(ip, &hash))
1506 1507 1508 1509 1510 1511 1512
		ret = 1;
	else
		ret = 0;

	return ret;
}

1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526
/*
 * This is a double for. Do not use 'break' to break out of the loop,
 * you must use a goto.
 */
#define do_for_each_ftrace_rec(pg, rec)					\
	for (pg = ftrace_pages_start; pg; pg = pg->next) {		\
		int _____i;						\
		for (_____i = 0; _____i < pg->index; _____i++) {	\
			rec = &pg->records[_____i];

#define while_for_each_ftrace_rec()		\
		}				\
	}

1527 1528 1529

static int ftrace_cmp_recs(const void *a, const void *b)
{
1530 1531
	const struct dyn_ftrace *key = a;
	const struct dyn_ftrace *rec = b;
1532

1533
	if (key->flags < rec->ip)
1534
		return -1;
1535 1536
	if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
		return 1;
1537 1538 1539
	return 0;
}

1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552
/**
 * ftrace_location_range - return the first address of a traced location
 *	if it touches the given ip range
 * @start: start of range to search.
 * @end: end of range to search (inclusive). @end points to the last byte
 *	to check.
 *
 * Returns rec->ip if the related ftrace location is a least partly within
 * the given address range. That is, the first address of the instruction
 * that is either a NOP or call to the function tracer. It checks the ftrace
 * internal tables to determine if the address belongs or not.
 */
unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1553 1554 1555
{
	struct ftrace_page *pg;
	struct dyn_ftrace *rec;
1556
	struct dyn_ftrace key;
1557

1558 1559
	key.ip = start;
	key.flags = end;	/* overload flags, as it is unsigned long */
1560 1561

	for (pg = ftrace_pages_start; pg; pg = pg->next) {
1562 1563
		if (end < pg->records[0].ip ||
		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1564
			continue;
1565 1566 1567 1568
		rec = bsearch(&key, pg->records, pg->index,
			      sizeof(struct dyn_ftrace),
			      ftrace_cmp_recs);
		if (rec)
1569
			return rec->ip;
1570
	}
1571 1572 1573 1574

	return 0;
}

1575 1576 1577 1578
/**
 * ftrace_location - return true if the ip giving is a traced location
 * @ip: the instruction pointer to check
 *
1579
 * Returns rec->ip if @ip given is a pointer to a ftrace location.
1580 1581 1582 1583
 * That is, the instruction that is either a NOP or call to
 * the function tracer. It checks the ftrace internal tables to
 * determine if the address belongs or not.
 */
1584
unsigned long ftrace_location(unsigned long ip)
1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598
{
	return ftrace_location_range(ip, ip);
}

/**
 * ftrace_text_reserved - return true if range contains an ftrace location
 * @start: start of range to search
 * @end: end of range to search (inclusive). @end points to the last byte to check.
 *
 * Returns 1 if @start and @end contains a ftrace location.
 * That is, the instruction that is either a NOP or call to
 * the function tracer. It checks the ftrace internal tables to
 * determine if the address belongs or not.
 */
1599
int ftrace_text_reserved(const void *start, const void *end)
1600
{
1601 1602 1603 1604 1605 1606
	unsigned long ret;

	ret = ftrace_location_range((unsigned long)start,
				    (unsigned long)end);

	return (int)!!ret;
1607 1608
}

1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628
/* Test if ops registered to this rec needs regs */
static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
{
	struct ftrace_ops *ops;
	bool keep_regs = false;

	for (ops = ftrace_ops_list;
	     ops != &ftrace_list_end; ops = ops->next) {
		/* pass rec in as regs to have non-NULL val */
		if (ftrace_ops_test(ops, rec->ip, rec)) {
			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
				keep_regs = true;
				break;
			}
		}
	}

	return  keep_regs;
}

1629
static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
1630 1631 1632 1633 1634 1635 1636
				     int filter_hash,
				     bool inc)
{
	struct ftrace_hash *hash;
	struct ftrace_hash *other_hash;
	struct ftrace_page *pg;
	struct dyn_ftrace *rec;
1637
	bool update = false;
1638 1639 1640 1641 1642
	int count = 0;
	int all = 0;

	/* Only update if the ops has been registered */
	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1643
		return false;
1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656

	/*
	 * In the filter_hash case:
	 *   If the count is zero, we update all records.
	 *   Otherwise we just update the items in the hash.
	 *
	 * In the notrace_hash case:
	 *   We enable the update in the hash.
	 *   As disabling notrace means enabling the tracing,
	 *   and enabling notrace means disabling, the inc variable
	 *   gets inversed.
	 */
	if (filter_hash) {
1657 1658
		hash = ops->func_hash->filter_hash;
		other_hash = ops->func_hash->notrace_hash;
1659
		if (ftrace_hash_empty(hash))
1660 1661 1662
			all = 1;
	} else {
		inc = !inc;
1663 1664
		hash = ops->func_hash->notrace_hash;
		other_hash = ops->func_hash->filter_hash;
1665 1666 1667 1668
		/*
		 * If the notrace hash has no items,
		 * then there's nothing to do.
		 */
1669
		if (ftrace_hash_empty(hash))
1670
			return false;
1671 1672 1673 1674 1675 1676 1677
	}

	do_for_each_ftrace_rec(pg, rec) {
		int in_other_hash = 0;
		int in_hash = 0;
		int match = 0;

1678 1679 1680
		if (rec->flags & FTRACE_FL_DISABLED)
			continue;

1681 1682 1683 1684 1685
		if (all) {
			/*
			 * Only the filter_hash affects all records.
			 * Update if the record is not in the notrace hash.
			 */
1686
			if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1687 1688
				match = 1;
		} else {
1689 1690
			in_hash = !!ftrace_lookup_ip(hash, rec->ip);
			in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1691 1692

			/*
1693 1694
			 * If filter_hash is set, we want to match all functions
			 * that are in the hash but not in the other hash.
1695
			 *
1696 1697 1698 1699 1700
			 * If filter_hash is not set, then we are decrementing.
			 * That means we match anything that is in the hash
			 * and also in the other_hash. That is, we need to turn
			 * off functions in the other hash because they are disabled
			 * by this hash.
1701 1702 1703 1704
			 */
			if (filter_hash && in_hash && !in_other_hash)
				match = 1;
			else if (!filter_hash && in_hash &&
1705
				 (in_other_hash || ftrace_hash_empty(other_hash)))
1706 1707 1708 1709 1710 1711 1712
				match = 1;
		}
		if (!match)
			continue;

		if (inc) {
			rec->flags++;
1713
			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1714
				return false;
1715 1716 1717 1718 1719 1720

			/*
			 * If there's only a single callback registered to a
			 * function, and the ops has a trampoline registered
			 * for it, then we can call it directly.
			 */
1721
			if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1722
				rec->flags |= FTRACE_FL_TRAMP;
1723
			else
1724 1725 1726
				/*
				 * If we are adding another function callback
				 * to this function, and the previous had a
1727 1728
				 * custom trampoline in use, then we need to go
				 * back to the default trampoline.
1729
				 */
1730
				rec->flags &= ~FTRACE_FL_TRAMP;
1731

1732 1733 1734 1735 1736 1737
			/*
			 * If any ops wants regs saved for this function
			 * then all ops will get saved regs.
			 */
			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
				rec->flags |= FTRACE_FL_REGS;
1738
		} else {
1739
			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1740
				return false;
1741
			rec->flags--;
1742

1743 1744 1745 1746 1747 1748
			/*
			 * If the rec had REGS enabled and the ops that is
			 * being removed had REGS set, then see if there is
			 * still any ops for this record that wants regs.
			 * If not, we can stop recording them.
			 */
1749
			if (ftrace_rec_count(rec) > 0 &&
1750 1751 1752 1753 1754
			    rec->flags & FTRACE_FL_REGS &&
			    ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
				if (!test_rec_ops_needs_regs(rec))
					rec->flags &= ~FTRACE_FL_REGS;
			}
1755

1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766
			/*
			 * If the rec had TRAMP enabled, then it needs to
			 * be cleared. As TRAMP can only be enabled iff
			 * there is only a single ops attached to it.
			 * In otherwords, always disable it on decrementing.
			 * In the future, we may set it if rec count is
			 * decremented to one, and the ops that is left
			 * has a trampoline.
			 */
			rec->flags &= ~FTRACE_FL_TRAMP;

1767 1768 1769 1770
			/*
			 * flags will be cleared in ftrace_check_record()
			 * if rec count is zero.
			 */
1771 1772
		}
		count++;
1773 1774 1775 1776

		/* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
		update |= ftrace_test_record(rec, 1) != FTRACE_UPDATE_IGNORE;

1777 1778
		/* Shortcut, if we handled all records, we are done. */
		if (!all && count == hash->count)
1779
			return update;
1780
	} while_for_each_ftrace_rec();
1781 1782

	return update;
1783 1784
}

1785
static bool ftrace_hash_rec_disable(struct ftrace_ops *ops,
1786 1787
				    int filter_hash)
{
1788
	return __ftrace_hash_rec_update(ops, filter_hash, 0);
1789 1790
}

1791
static bool ftrace_hash_rec_enable(struct ftrace_ops *ops,
1792 1793
				   int filter_hash)
{
1794
	return __ftrace_hash_rec_update(ops, filter_hash, 1);
1795 1796
}

1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831
static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
					  int filter_hash, int inc)
{
	struct ftrace_ops *op;

	__ftrace_hash_rec_update(ops, filter_hash, inc);

	if (ops->func_hash != &global_ops.local_hash)
		return;

	/*
	 * If the ops shares the global_ops hash, then we need to update
	 * all ops that are enabled and use this hash.
	 */
	do_for_each_ftrace_op(op, ftrace_ops_list) {
		/* Already done */
		if (op == ops)
			continue;
		if (op->func_hash == &global_ops.local_hash)
			__ftrace_hash_rec_update(op, filter_hash, inc);
	} while_for_each_ftrace_op(op);
}

static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
					   int filter_hash)
{
	ftrace_hash_rec_update_modify(ops, filter_hash, 0);
}

static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
					  int filter_hash)
{
	ftrace_hash_rec_update_modify(ops, filter_hash, 1);
}

1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864
/*
 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
 * or no-needed to update, -EBUSY if it detects a conflict of the flag
 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
 * Note that old_hash and new_hash has below meanings
 *  - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
 *  - If the hash is EMPTY_HASH, it hits nothing
 *  - Anything else hits the recs which match the hash entries.
 */
static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
					 struct ftrace_hash *old_hash,
					 struct ftrace_hash *new_hash)
{
	struct ftrace_page *pg;
	struct dyn_ftrace *rec, *end = NULL;
	int in_old, in_new;

	/* Only update if the ops has been registered */
	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
		return 0;

	if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
		return 0;

	/*
	 * Since the IPMODIFY is a very address sensitive action, we do not
	 * allow ftrace_ops to set all functions to new hash.
	 */
	if (!new_hash || !old_hash)
		return -EINVAL;

	/* Update rec->flags */
	do_for_each_ftrace_rec(pg, rec) {
1865 1866 1867 1868

		if (rec->flags & FTRACE_FL_DISABLED)
			continue;

1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890
		/* We need to update only differences of filter_hash */
		in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
		in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
		if (in_old == in_new)
			continue;

		if (in_new) {
			/* New entries must ensure no others are using it */
			if (rec->flags & FTRACE_FL_IPMODIFY)
				goto rollback;
			rec->flags |= FTRACE_FL_IPMODIFY;
		} else /* Removed entry */
			rec->flags &= ~FTRACE_FL_IPMODIFY;
	} while_for_each_ftrace_rec();

	return 0;

rollback:
	end = rec;

	/* Roll back what we did above */
	do_for_each_ftrace_rec(pg, rec) {
1891 1892 1893 1894

		if (rec->flags & FTRACE_FL_DISABLED)
			continue;

1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947
		if (rec == end)
			goto err_out;

		in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
		in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
		if (in_old == in_new)
			continue;

		if (in_new)
			rec->flags &= ~FTRACE_FL_IPMODIFY;
		else
			rec->flags |= FTRACE_FL_IPMODIFY;
	} while_for_each_ftrace_rec();

err_out:
	return -EBUSY;
}

static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
{
	struct ftrace_hash *hash = ops->func_hash->filter_hash;

	if (ftrace_hash_empty(hash))
		hash = NULL;

	return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
}

/* Disabling always succeeds */
static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
{
	struct ftrace_hash *hash = ops->func_hash->filter_hash;

	if (ftrace_hash_empty(hash))
		hash = NULL;

	__ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
}

static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
				       struct ftrace_hash *new_hash)
{
	struct ftrace_hash *old_hash = ops->func_hash->filter_hash;

	if (ftrace_hash_empty(old_hash))
		old_hash = NULL;

	if (ftrace_hash_empty(new_hash))
		new_hash = NULL;

	return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
}

1948
static void print_ip_ins(const char *fmt, const unsigned char *p)
1949 1950 1951 1952 1953 1954 1955 1956 1957
{
	int i;

	printk(KERN_CONT "%s", fmt);

	for (i = 0; i < MCOUNT_INSN_SIZE; i++)
		printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
}

1958 1959
static struct ftrace_ops *
ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
1960 1961
static struct ftrace_ops *
ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
1962

1963
enum ftrace_bug_type ftrace_bug_type;
1964
const void *ftrace_expected;
1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985

static void print_bug_type(void)
{
	switch (ftrace_bug_type) {
	case FTRACE_BUG_UNKNOWN:
		break;
	case FTRACE_BUG_INIT:
		pr_info("Initializing ftrace call sites\n");
		break;
	case FTRACE_BUG_NOP:
		pr_info("Setting ftrace call site to NOP\n");
		break;
	case FTRACE_BUG_CALL:
		pr_info("Setting ftrace call site to call ftrace function\n");
		break;
	case FTRACE_BUG_UPDATE:
		pr_info("Updating ftrace call site to call a different ftrace function\n");
		break;
	}
}

1986 1987 1988
/**
 * ftrace_bug - report and shutdown function tracer
 * @failed: The failed type (EFAULT, EINVAL, EPERM)
1989
 * @rec: The record that failed
1990 1991 1992 1993 1994 1995 1996 1997
 *
 * The arch code that enables or disables the function tracing
 * can call ftrace_bug() when it has detected a problem in
 * modifying the code. @failed should be one of either:
 * EFAULT - if the problem happens on reading the @ip address
 * EINVAL - if what is read at @ip is not what was expected
 * EPERM - if the problem happens on writting to the @ip address
 */
1998
void ftrace_bug(int failed, struct dyn_ftrace *rec)
1999
{
2000 2001
	unsigned long ip = rec ? rec->ip : 0;

2002 2003 2004 2005 2006 2007 2008 2009 2010 2011
	switch (failed) {
	case -EFAULT:
		FTRACE_WARN_ON_ONCE(1);
		pr_info("ftrace faulted on modifying ");
		print_ip_sym(ip);
		break;
	case -EINVAL:
		FTRACE_WARN_ON_ONCE(1);
		pr_info("ftrace failed to modify ");
		print_ip_sym(ip);
2012
		print_ip_ins(" actual:   ", (unsigned char *)ip);
2013
		pr_cont("\n");
2014 2015 2016 2017
		if (ftrace_expected) {
			print_ip_ins(" expected: ", ftrace_expected);
			pr_cont("\n");
		}
2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028
		break;
	case -EPERM:
		FTRACE_WARN_ON_ONCE(1);
		pr_info("ftrace faulted on writing ");
		print_ip_sym(ip);
		break;
	default:
		FTRACE_WARN_ON_ONCE(1);
		pr_info("ftrace faulted on unknown error ");
		print_ip_sym(ip);
	}
2029
	print_bug_type();
2030 2031 2032 2033 2034 2035 2036 2037
	if (rec) {
		struct ftrace_ops *ops = NULL;

		pr_info("ftrace record flags: %lx\n", rec->flags);
		pr_cont(" (%ld)%s", ftrace_rec_count(rec),
			rec->flags & FTRACE_FL_REGS ? " R" : "  ");
		if (rec->flags & FTRACE_FL_TRAMP_EN) {
			ops = ftrace_find_tramp_ops_any(rec);
2038 2039 2040 2041 2042 2043 2044 2045
			if (ops) {
				do {
					pr_cont("\ttramp: %pS (%pS)",
						(void *)ops->trampoline,
						(void *)ops->func);
					ops = ftrace_find_tramp_ops_next(rec, ops);
				} while (ops);
			} else
2046 2047 2048 2049
				pr_cont("\ttramp: ERROR!");

		}
		ip = ftrace_get_addr_curr(rec);
2050
		pr_cont("\n expected tramp: %lx\n", ip);
2051
	}
2052 2053
}

2054
static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
2055
{
2056
	unsigned long flag = 0UL;
2057

2058 2059
	ftrace_bug_type = FTRACE_BUG_UNKNOWN;

2060 2061 2062
	if (rec->flags & FTRACE_FL_DISABLED)
		return FTRACE_UPDATE_IGNORE;

S
Steven Rostedt 已提交
2063
	/*
2064
	 * If we are updating calls:
S
Steven Rostedt 已提交
2065
	 *
2066 2067
	 *   If the record has a ref count, then we need to enable it
	 *   because someone is using it.
S
Steven Rostedt 已提交
2068
	 *
2069 2070
	 *   Otherwise we make sure its disabled.
	 *
2071
	 * If we are disabling calls, then disable all records that
2072
	 * are enabled.
S
Steven Rostedt 已提交
2073
	 */
2074
	if (enable && ftrace_rec_count(rec))
2075
		flag = FTRACE_FL_ENABLED;
S
Steven Rostedt 已提交
2076

2077
	/*
2078 2079 2080
	 * If enabling and the REGS flag does not match the REGS_EN, or
	 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
	 * this record. Set flags to fail the compare against ENABLED.
2081
	 */
2082 2083 2084 2085 2086 2087 2088 2089 2090
	if (flag) {
		if (!(rec->flags & FTRACE_FL_REGS) != 
		    !(rec->flags & FTRACE_FL_REGS_EN))
			flag |= FTRACE_FL_REGS;

		if (!(rec->flags & FTRACE_FL_TRAMP) != 
		    !(rec->flags & FTRACE_FL_TRAMP_EN))
			flag |= FTRACE_FL_TRAMP;
	}
2091

2092 2093
	/* If the state of this record hasn't changed, then do nothing */
	if ((rec->flags & FTRACE_FL_ENABLED) == flag)
2094
		return FTRACE_UPDATE_IGNORE;
S
Steven Rostedt 已提交
2095

2096
	if (flag) {
2097 2098 2099 2100
		/* Save off if rec is being enabled (for return value) */
		flag ^= rec->flags & FTRACE_FL_ENABLED;

		if (update) {
2101
			rec->flags |= FTRACE_FL_ENABLED;
2102 2103 2104 2105 2106 2107
			if (flag & FTRACE_FL_REGS) {
				if (rec->flags & FTRACE_FL_REGS)
					rec->flags |= FTRACE_FL_REGS_EN;
				else
					rec->flags &= ~FTRACE_FL_REGS_EN;
			}
2108 2109 2110 2111 2112 2113
			if (flag & FTRACE_FL_TRAMP) {
				if (rec->flags & FTRACE_FL_TRAMP)
					rec->flags |= FTRACE_FL_TRAMP_EN;
				else
					rec->flags &= ~FTRACE_FL_TRAMP_EN;
			}
2114 2115 2116 2117 2118 2119 2120
		}

		/*
		 * If this record is being updated from a nop, then
		 *   return UPDATE_MAKE_CALL.
		 * Otherwise,
		 *   return UPDATE_MODIFY_CALL to tell the caller to convert
2121
		 *   from the save regs, to a non-save regs function or
2122
		 *   vice versa, or from a trampoline call.
2123
		 */
2124 2125
		if (flag & FTRACE_FL_ENABLED) {
			ftrace_bug_type = FTRACE_BUG_CALL;
2126
			return FTRACE_UPDATE_MAKE_CALL;
2127
		}
2128

2129
		ftrace_bug_type = FTRACE_BUG_UPDATE;
2130
		return FTRACE_UPDATE_MODIFY_CALL;
2131 2132
	}

2133 2134
	if (update) {
		/* If there's no more users, clear all flags */
2135
		if (!ftrace_rec_count(rec))
2136 2137
			rec->flags = 0;
		else
2138 2139 2140 2141 2142 2143
			/*
			 * Just disable the record, but keep the ops TRAMP
			 * and REGS states. The _EN flags must be disabled though.
			 */
			rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
					FTRACE_FL_REGS_EN);
2144
	}
2145

2146
	ftrace_bug_type = FTRACE_BUG_NOP;
2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176
	return FTRACE_UPDATE_MAKE_NOP;
}

/**
 * ftrace_update_record, set a record that now is tracing or not
 * @rec: the record to update
 * @enable: set to 1 if the record is tracing, zero to force disable
 *
 * The records that represent all functions that can be traced need
 * to be updated when tracing has been enabled.
 */
int ftrace_update_record(struct dyn_ftrace *rec, int enable)
{
	return ftrace_check_record(rec, enable, 1);
}

/**
 * ftrace_test_record, check if the record has been enabled or not
 * @rec: the record to test
 * @enable: set to 1 to check if enabled, 0 if it is disabled
 *
 * The arch code may need to test if a record is already set to
 * tracing to determine how to modify the function code that it
 * represents.
 */
int ftrace_test_record(struct dyn_ftrace *rec, int enable)
{
	return ftrace_check_record(rec, enable, 0);
}

2177 2178 2179 2180
static struct ftrace_ops *
ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
{
	struct ftrace_ops *op;
2181
	unsigned long ip = rec->ip;
2182 2183 2184 2185 2186 2187

	do_for_each_ftrace_op(op, ftrace_ops_list) {

		if (!op->trampoline)
			continue;

2188
		if (hash_contains_ip(ip, op->func_hash))
2189 2190 2191 2192 2193 2194
			return op;
	} while_for_each_ftrace_op(op);

	return NULL;
}

2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212
static struct ftrace_ops *
ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
			   struct ftrace_ops *op)
{
	unsigned long ip = rec->ip;

	while_for_each_ftrace_op(op) {

		if (!op->trampoline)
			continue;

		if (hash_contains_ip(ip, op->func_hash))
			return op;
	} 

	return NULL;
}

2213 2214 2215 2216
static struct ftrace_ops *
ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
{
	struct ftrace_ops *op;
2217
	unsigned long ip = rec->ip;
2218

2219 2220 2221 2222 2223 2224 2225 2226
	/*
	 * Need to check removed ops first.
	 * If they are being removed, and this rec has a tramp,
	 * and this rec is in the ops list, then it would be the
	 * one with the tramp.
	 */
	if (removed_ops) {
		if (hash_contains_ip(ip, &removed_ops->old_hash))
2227 2228 2229
			return removed_ops;
	}

2230 2231 2232 2233 2234 2235 2236
	/*
	 * Need to find the current trampoline for a rec.
	 * Now, a trampoline is only attached to a rec if there
	 * was a single 'ops' attached to it. But this can be called
	 * when we are adding another op to the rec or removing the
	 * current one. Thus, if the op is being added, we can
	 * ignore it because it hasn't attached itself to the rec
2237 2238 2239 2240 2241 2242 2243 2244 2245 2246
	 * yet.
	 *
	 * If an ops is being modified (hooking to different functions)
	 * then we don't care about the new functions that are being
	 * added, just the old ones (that are probably being removed).
	 *
	 * If we are adding an ops to a function that already is using
	 * a trampoline, it needs to be removed (trampolines are only
	 * for single ops connected), then an ops that is not being
	 * modified also needs to be checked.
2247
	 */
2248
	do_for_each_ftrace_op(op, ftrace_ops_list) {
2249 2250 2251 2252 2253 2254 2255 2256 2257

		if (!op->trampoline)
			continue;

		/*
		 * If the ops is being added, it hasn't gotten to
		 * the point to be removed from this tree yet.
		 */
		if (op->flags & FTRACE_OPS_FL_ADDING)
2258 2259
			continue;

2260

2261
		/*
2262 2263 2264
		 * If the ops is being modified and is in the old
		 * hash, then it is probably being removed from this
		 * function.
2265 2266 2267
		 */
		if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
		    hash_contains_ip(ip, &op->old_hash))
2268
			return op;
2269 2270 2271 2272 2273 2274 2275 2276
		/*
		 * If the ops is not being added or modified, and it's
		 * in its normal filter hash, then this must be the one
		 * we want!
		 */
		if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
		    hash_contains_ip(ip, op->func_hash))
			return op;
2277 2278 2279 2280 2281 2282 2283 2284 2285 2286

	} while_for_each_ftrace_op(op);

	return NULL;
}

static struct ftrace_ops *
ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
{
	struct ftrace_ops *op;
2287
	unsigned long ip = rec->ip;
2288 2289 2290

	do_for_each_ftrace_op(op, ftrace_ops_list) {
		/* pass rec in as regs to have non-NULL val */
2291
		if (hash_contains_ip(ip, op->func_hash))
2292 2293 2294 2295 2296 2297
			return op;
	} while_for_each_ftrace_op(op);

	return NULL;
}

2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309
/**
 * ftrace_get_addr_new - Get the call address to set to
 * @rec:  The ftrace record descriptor
 *
 * If the record has the FTRACE_FL_REGS set, that means that it
 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
 * is not not set, then it wants to convert to the normal callback.
 *
 * Returns the address of the trampoline to set to
 */
unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
{
2310 2311 2312 2313 2314 2315
	struct ftrace_ops *ops;

	/* Trampolines take precedence over regs */
	if (rec->flags & FTRACE_FL_TRAMP) {
		ops = ftrace_find_tramp_ops_new(rec);
		if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2316 2317
			pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
				(void *)rec->ip, (void *)rec->ip, rec->flags);
2318 2319 2320 2321 2322 2323
			/* Ftrace is shutting down, return anything */
			return (unsigned long)FTRACE_ADDR;
		}
		return ops->trampoline;
	}

2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341
	if (rec->flags & FTRACE_FL_REGS)
		return (unsigned long)FTRACE_REGS_ADDR;
	else
		return (unsigned long)FTRACE_ADDR;
}

/**
 * ftrace_get_addr_curr - Get the call address that is already there
 * @rec:  The ftrace record descriptor
 *
 * The FTRACE_FL_REGS_EN is set when the record already points to
 * a function that saves all the regs. Basically the '_EN' version
 * represents the current state of the function.
 *
 * Returns the address of the trampoline that is currently being called
 */
unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
{
2342 2343 2344 2345 2346 2347
	struct ftrace_ops *ops;

	/* Trampolines take precedence over regs */
	if (rec->flags & FTRACE_FL_TRAMP_EN) {
		ops = ftrace_find_tramp_ops_curr(rec);
		if (FTRACE_WARN_ON(!ops)) {
2348 2349
			pr_warn("Bad trampoline accounting at: %p (%pS)\n",
				(void *)rec->ip, (void *)rec->ip);
2350 2351 2352 2353 2354 2355
			/* Ftrace is shutting down, return anything */
			return (unsigned long)FTRACE_ADDR;
		}
		return ops->trampoline;
	}

2356 2357 2358 2359 2360 2361
	if (rec->flags & FTRACE_FL_REGS_EN)
		return (unsigned long)FTRACE_REGS_ADDR;
	else
		return (unsigned long)FTRACE_ADDR;
}

2362 2363 2364
static int
__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
{
2365
	unsigned long ftrace_old_addr;
2366 2367 2368
	unsigned long ftrace_addr;
	int ret;

2369
	ftrace_addr = ftrace_get_addr_new(rec);
2370

2371 2372 2373 2374
	/* This needs to be done before we call ftrace_update_record */
	ftrace_old_addr = ftrace_get_addr_curr(rec);

	ret = ftrace_update_record(rec, enable);
2375

2376 2377
	ftrace_bug_type = FTRACE_BUG_UNKNOWN;

2378 2379 2380 2381 2382
	switch (ret) {
	case FTRACE_UPDATE_IGNORE:
		return 0;

	case FTRACE_UPDATE_MAKE_CALL:
2383
		ftrace_bug_type = FTRACE_BUG_CALL;
2384
		return ftrace_make_call(rec, ftrace_addr);
2385 2386

	case FTRACE_UPDATE_MAKE_NOP:
2387
		ftrace_bug_type = FTRACE_BUG_NOP;
2388
		return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2389 2390

	case FTRACE_UPDATE_MODIFY_CALL:
2391
		ftrace_bug_type = FTRACE_BUG_UPDATE;
2392
		return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2393 2394
	}

2395
	return -1; /* unknow ftrace bug */
2396 2397
}

2398
void __weak ftrace_replace_code(int enable)
2399 2400 2401
{
	struct dyn_ftrace *rec;
	struct ftrace_page *pg;
S
Steven Rostedt 已提交
2402
	int failed;
2403

2404 2405 2406
	if (unlikely(ftrace_disabled))
		return;

2407
	do_for_each_ftrace_rec(pg, rec) {
2408 2409 2410 2411

		if (rec->flags & FTRACE_FL_DISABLED)
			continue;

2412
		failed = __ftrace_replace_code(rec, enable);
2413
		if (failed) {
2414
			ftrace_bug(failed, rec);
2415 2416
			/* Stop processing */
			return;
2417
		}
2418
	} while_for_each_ftrace_rec();
2419 2420
}

2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492
struct ftrace_rec_iter {
	struct ftrace_page	*pg;
	int			index;
};

/**
 * ftrace_rec_iter_start, start up iterating over traced functions
 *
 * Returns an iterator handle that is used to iterate over all
 * the records that represent address locations where functions
 * are traced.
 *
 * May return NULL if no records are available.
 */
struct ftrace_rec_iter *ftrace_rec_iter_start(void)
{
	/*
	 * We only use a single iterator.
	 * Protected by the ftrace_lock mutex.
	 */
	static struct ftrace_rec_iter ftrace_rec_iter;
	struct ftrace_rec_iter *iter = &ftrace_rec_iter;

	iter->pg = ftrace_pages_start;
	iter->index = 0;

	/* Could have empty pages */
	while (iter->pg && !iter->pg->index)
		iter->pg = iter->pg->next;

	if (!iter->pg)
		return NULL;

	return iter;
}

/**
 * ftrace_rec_iter_next, get the next record to process.
 * @iter: The handle to the iterator.
 *
 * Returns the next iterator after the given iterator @iter.
 */
struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
{
	iter->index++;

	if (iter->index >= iter->pg->index) {
		iter->pg = iter->pg->next;
		iter->index = 0;

		/* Could have empty pages */
		while (iter->pg && !iter->pg->index)
			iter->pg = iter->pg->next;
	}

	if (!iter->pg)
		return NULL;

	return iter;
}

/**
 * ftrace_rec_iter_record, get the record at the iterator location
 * @iter: The current iterator location
 *
 * Returns the record that the current @iter is at.
 */
struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
{
	return &iter->pg->records[iter->index];
}

2493
static int
2494
ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
2495
{
2496
	int ret;
2497

2498 2499 2500
	if (unlikely(ftrace_disabled))
		return 0;

2501
	ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
2502
	if (ret) {
2503
		ftrace_bug_type = FTRACE_BUG_INIT;
2504
		ftrace_bug(ret, rec);
2505
		return 0;
2506
	}
2507
	return 1;
2508 2509
}

2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527
/*
 * archs can override this function if they must do something
 * before the modifying code is performed.
 */
int __weak ftrace_arch_code_modify_prepare(void)
{
	return 0;
}

/*
 * archs can override this function if they must do something
 * after the modifying code is performed.
 */
int __weak ftrace_arch_code_modify_post_process(void)
{
	return 0;
}

2528
void ftrace_modify_all_code(int command)
2529
{
2530
	int update = command & FTRACE_UPDATE_TRACE_FUNC;
2531
	int err = 0;
2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542

	/*
	 * If the ftrace_caller calls a ftrace_ops func directly,
	 * we need to make sure that it only traces functions it
	 * expects to trace. When doing the switch of functions,
	 * we need to update to the ftrace_ops_list_func first
	 * before the transition between old and new calls are set,
	 * as the ftrace_ops_list_func will check the ops hashes
	 * to make sure the ops are having the right functions
	 * traced.
	 */
2543 2544 2545 2546 2547
	if (update) {
		err = ftrace_update_ftrace_func(ftrace_ops_list_func);
		if (FTRACE_WARN_ON(err))
			return;
	}
2548

2549
	if (command & FTRACE_UPDATE_CALLS)
2550
		ftrace_replace_code(1);
2551
	else if (command & FTRACE_DISABLE_CALLS)
2552 2553
		ftrace_replace_code(0);

2554 2555 2556 2557 2558 2559
	if (update && ftrace_trace_function != ftrace_ops_list_func) {
		function_trace_op = set_function_trace_op;
		smp_wmb();
		/* If irqs are disabled, we are in stop machine */
		if (!irqs_disabled())
			smp_call_function(ftrace_sync_ipi, NULL, 1);
2560 2561 2562
		err = ftrace_update_ftrace_func(ftrace_trace_function);
		if (FTRACE_WARN_ON(err))
			return;
2563
	}
2564

2565
	if (command & FTRACE_START_FUNC_RET)
2566
		err = ftrace_enable_ftrace_graph_caller();
2567
	else if (command & FTRACE_STOP_FUNC_RET)
2568 2569
		err = ftrace_disable_ftrace_graph_caller();
	FTRACE_WARN_ON(err);
2570 2571 2572 2573 2574 2575 2576
}

static int __ftrace_modify_code(void *data)
{
	int *command = data;

	ftrace_modify_all_code(*command);
2577

2578
	return 0;
2579 2580
}

2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604
/**
 * ftrace_run_stop_machine, go back to the stop machine method
 * @command: The command to tell ftrace what to do
 *
 * If an arch needs to fall back to the stop machine method, the
 * it can call this function.
 */
void ftrace_run_stop_machine(int command)
{
	stop_machine(__ftrace_modify_code, &command, NULL);
}

/**
 * arch_ftrace_update_code, modify the code to trace or not trace
 * @command: The command that needs to be done
 *
 * Archs can override this function if it does not need to
 * run stop_machine() to modify code.
 */
void __weak arch_ftrace_update_code(int command)
{
	ftrace_run_stop_machine(command);
}

I
Ingo Molnar 已提交
2605
static void ftrace_run_update_code(int command)
2606
{
2607 2608 2609 2610 2611 2612 2613
	int ret;

	ret = ftrace_arch_code_modify_prepare();
	FTRACE_WARN_ON(ret);
	if (ret)
		return;

2614 2615 2616 2617 2618 2619 2620 2621
	/*
	 * By default we use stop_machine() to modify the code.
	 * But archs can do what ever they want as long as it
	 * is safe. The stop_machine() is the safest, but also
	 * produces the most overhead.
	 */
	arch_ftrace_update_code(command);

2622 2623
	ret = ftrace_arch_code_modify_post_process();
	FTRACE_WARN_ON(ret);
2624 2625
}

2626
static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2627
				   struct ftrace_ops_hash *old_hash)
2628 2629
{
	ops->flags |= FTRACE_OPS_FL_MODIFYING;
2630 2631
	ops->old_hash.filter_hash = old_hash->filter_hash;
	ops->old_hash.notrace_hash = old_hash->notrace_hash;
2632
	ftrace_run_update_code(command);
2633
	ops->old_hash.filter_hash = NULL;
2634
	ops->old_hash.notrace_hash = NULL;
2635 2636 2637
	ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
}

2638
static ftrace_func_t saved_ftrace_func;
2639
static int ftrace_start_up;
2640

2641 2642 2643 2644
void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
{
}

2645
static void per_cpu_ops_free(struct ftrace_ops *ops)
2646 2647 2648 2649
{
	free_percpu(ops->disabled);
}

2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661
static void ftrace_startup_enable(int command)
{
	if (saved_ftrace_func != ftrace_trace_function) {
		saved_ftrace_func = ftrace_trace_function;
		command |= FTRACE_UPDATE_TRACE_FUNC;
	}

	if (!command || !ftrace_enabled)
		return;

	ftrace_run_update_code(command);
}
2662

2663 2664 2665 2666 2667 2668 2669
static void ftrace_startup_all(int command)
{
	update_all_ops = true;
	ftrace_startup_enable(command);
	update_all_ops = false;
}

2670
static int ftrace_startup(struct ftrace_ops *ops, int command)
2671
{
2672
	int ret;
2673

2674
	if (unlikely(ftrace_disabled))
2675
		return -ENODEV;
2676

2677 2678 2679 2680
	ret = __register_ftrace_function(ops);
	if (ret)
		return ret;

2681
	ftrace_start_up++;
2682

2683 2684 2685 2686 2687 2688 2689 2690 2691
	/*
	 * Note that ftrace probes uses this to start up
	 * and modify functions it will probe. But we still
	 * set the ADDING flag for modification, as probes
	 * do not have trampolines. If they add them in the
	 * future, then the probes will need to distinguish
	 * between adding and updating probes.
	 */
	ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
2692

2693 2694 2695 2696 2697 2698 2699 2700 2701
	ret = ftrace_hash_ipmodify_enable(ops);
	if (ret < 0) {
		/* Rollback registration process */
		__unregister_ftrace_function(ops);
		ftrace_start_up--;
		ops->flags &= ~FTRACE_OPS_FL_ENABLED;
		return ret;
	}

2702 2703
	if (ftrace_hash_rec_enable(ops, 1))
		command |= FTRACE_UPDATE_CALLS;
2704

2705
	ftrace_startup_enable(command);
2706

2707 2708
	ops->flags &= ~FTRACE_OPS_FL_ADDING;

2709
	return 0;
2710 2711
}

2712
static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2713
{
2714
	int ret;
2715

2716
	if (unlikely(ftrace_disabled))
2717 2718 2719 2720 2721
		return -ENODEV;

	ret = __unregister_ftrace_function(ops);
	if (ret)
		return ret;
2722

2723
	ftrace_start_up--;
2724 2725 2726 2727 2728 2729 2730
	/*
	 * Just warn in case of unbalance, no need to kill ftrace, it's not
	 * critical but the ftrace_call callers may be never nopped again after
	 * further ftrace uses.
	 */
	WARN_ON_ONCE(ftrace_start_up < 0);

2731 2732
	/* Disabling ipmodify never fails */
	ftrace_hash_ipmodify_disable(ops);
2733

2734 2735
	if (ftrace_hash_rec_disable(ops, 1))
		command |= FTRACE_UPDATE_CALLS;
2736

2737
	ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2738

2739 2740 2741 2742
	if (saved_ftrace_func != ftrace_trace_function) {
		saved_ftrace_func = ftrace_trace_function;
		command |= FTRACE_UPDATE_TRACE_FUNC;
	}
2743

2744 2745
	if (!command || !ftrace_enabled) {
		/*
2746
		 * If these are per_cpu ops, they still need their
2747 2748 2749 2750
		 * per_cpu field freed. Since, function tracing is
		 * not currently active, we can just free them
		 * without synchronizing all CPUs.
		 */
2751 2752
		if (ops->flags & FTRACE_OPS_FL_PER_CPU)
			per_cpu_ops_free(ops);
2753
		return 0;
2754
	}
2755

2756 2757 2758 2759
	/*
	 * If the ops uses a trampoline, then it needs to be
	 * tested first on update.
	 */
2760
	ops->flags |= FTRACE_OPS_FL_REMOVING;
2761 2762
	removed_ops = ops;

2763 2764 2765 2766
	/* The trampoline logic checks the old hashes */
	ops->old_hash.filter_hash = ops->func_hash->filter_hash;
	ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;

2767
	ftrace_run_update_code(command);
2768

2769 2770 2771 2772 2773 2774 2775 2776 2777
	/*
	 * If there's no more ops registered with ftrace, run a
	 * sanity check to make sure all rec flags are cleared.
	 */
	if (ftrace_ops_list == &ftrace_list_end) {
		struct ftrace_page *pg;
		struct dyn_ftrace *rec;

		do_for_each_ftrace_rec(pg, rec) {
2778
			if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
2779 2780 2781 2782 2783
				pr_warn("  %pS flags:%lx\n",
					(void *)rec->ip, rec->flags);
		} while_for_each_ftrace_rec();
	}

2784 2785 2786 2787
	ops->old_hash.filter_hash = NULL;
	ops->old_hash.notrace_hash = NULL;

	removed_ops = NULL;
2788
	ops->flags &= ~FTRACE_OPS_FL_REMOVING;
2789

2790 2791 2792
	/*
	 * Dynamic ops may be freed, we must make sure that all
	 * callers are done before leaving this function.
2793
	 * The same goes for freeing the per_cpu data of the per_cpu
2794 2795 2796 2797 2798 2799 2800 2801 2802 2803
	 * ops.
	 *
	 * Again, normal synchronize_sched() is not good enough.
	 * We need to do a hard force of sched synchronization.
	 * This is because we use preempt_disable() to do RCU, but
	 * the function tracers can be called where RCU is not watching
	 * (like before user_exit()). We can not rely on the RCU
	 * infrastructure to do the synchronization, thus we must do it
	 * ourselves.
	 */
2804
	if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU)) {
2805 2806
		schedule_on_each_cpu(ftrace_sync);

2807 2808
		arch_ftrace_trampoline_free(ops);

2809 2810
		if (ops->flags & FTRACE_OPS_FL_PER_CPU)
			per_cpu_ops_free(ops);
2811 2812
	}

2813
	return 0;
2814 2815
}

I
Ingo Molnar 已提交
2816
static void ftrace_startup_sysctl(void)
2817
{
2818 2819
	int command;

2820 2821 2822
	if (unlikely(ftrace_disabled))
		return;

2823 2824
	/* Force update next time */
	saved_ftrace_func = NULL;
2825
	/* ftrace_start_up is true if we want ftrace running */
2826 2827 2828 2829
	if (ftrace_start_up) {
		command = FTRACE_UPDATE_CALLS;
		if (ftrace_graph_active)
			command |= FTRACE_START_FUNC_RET;
2830
		ftrace_startup_enable(command);
2831
	}
2832 2833
}

I
Ingo Molnar 已提交
2834
static void ftrace_shutdown_sysctl(void)
2835
{
2836 2837
	int command;

2838 2839 2840
	if (unlikely(ftrace_disabled))
		return;

2841
	/* ftrace_start_up is true if ftrace is running */
2842 2843 2844 2845 2846 2847
	if (ftrace_start_up) {
		command = FTRACE_DISABLE_CALLS;
		if (ftrace_graph_active)
			command |= FTRACE_STOP_FUNC_RET;
		ftrace_run_update_code(command);
	}
2848 2849
}

2850
static u64		ftrace_update_time;
2851 2852
unsigned long		ftrace_update_tot_cnt;

2853
static inline int ops_traces_mod(struct ftrace_ops *ops)
2854
{
2855 2856 2857 2858
	/*
	 * Filter_hash being empty will default to trace module.
	 * But notrace hash requires a test of individual module functions.
	 */
2859 2860
	return ftrace_hash_empty(ops->func_hash->filter_hash) &&
		ftrace_hash_empty(ops->func_hash->notrace_hash);
2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876
}

/*
 * Check if the current ops references the record.
 *
 * If the ops traces all functions, then it was already accounted for.
 * If the ops does not trace the current record function, skip it.
 * If the ops ignores the function via notrace filter, skip it.
 */
static inline bool
ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
{
	/* If ops isn't enabled, ignore it */
	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
		return 0;

2877
	/* If ops traces all then it includes this function */
2878
	if (ops_traces_mod(ops))
2879
		return 1;
2880 2881

	/* The function must be in the filter */
2882 2883
	if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
	    !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
2884
		return 0;
2885

2886
	/* If in notrace hash, we ignore it too */
2887
	if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
2888 2889 2890 2891 2892
		return 0;

	return 1;
}

2893
static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
2894
{
2895
	struct ftrace_page *pg;
2896
	struct dyn_ftrace *p;
2897
	u64 start, stop;
2898
	unsigned long update_cnt = 0;
2899
	unsigned long rec_flags = 0;
2900
	int i;
2901

2902 2903
	start = ftrace_now(raw_smp_processor_id());

2904
	/*
2905 2906 2907 2908 2909 2910 2911 2912 2913
	 * When a module is loaded, this function is called to convert
	 * the calls to mcount in its text to nops, and also to create
	 * an entry in the ftrace data. Now, if ftrace is activated
	 * after this call, but before the module sets its text to
	 * read-only, the modification of enabling ftrace can fail if
	 * the read-only is done while ftrace is converting the calls.
	 * To prevent this, the module's records are set as disabled
	 * and will be enabled after the call to set the module's text
	 * to read-only.
2914
	 */
2915 2916
	if (mod)
		rec_flags |= FTRACE_FL_DISABLED;
2917

2918
	for (pg = new_pgs; pg; pg = pg->next) {
2919

2920
		for (i = 0; i < pg->index; i++) {
2921

2922 2923 2924
			/* If something went wrong, bail without enabling anything */
			if (unlikely(ftrace_disabled))
				return -1;
2925

2926
			p = &pg->records[i];
2927
			p->flags = rec_flags;
2928

2929 2930 2931 2932 2933 2934
			/*
			 * Do the initial record conversion from mcount jump
			 * to the NOP instructions.
			 */
			if (!ftrace_code_disable(mod, p))
				break;
2935

2936
			update_cnt++;
2937
		}
2938 2939
	}

I
Ingo Molnar 已提交
2940
	stop = ftrace_now(raw_smp_processor_id());
2941
	ftrace_update_time = stop - start;
2942
	ftrace_update_tot_cnt += update_cnt;
2943

2944 2945 2946
	return 0;
}

2947
static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2948
{
2949
	int order;
2950 2951
	int cnt;

2952 2953 2954 2955
	if (WARN_ON(!count))
		return -EINVAL;

	order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2956 2957

	/*
2958 2959
	 * We want to fill as much as possible. No more than a page
	 * may be empty.
2960
	 */
2961 2962
	while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
		order--;
2963

2964 2965
 again:
	pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2966

2967 2968 2969 2970 2971 2972 2973
	if (!pg->records) {
		/* if we can't allocate this size, try something smaller */
		if (!order)
			return -ENOMEM;
		order >>= 1;
		goto again;
	}
2974

2975 2976
	cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
	pg->size = cnt;
2977

2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010
	if (cnt > count)
		cnt = count;

	return cnt;
}

static struct ftrace_page *
ftrace_allocate_pages(unsigned long num_to_init)
{
	struct ftrace_page *start_pg;
	struct ftrace_page *pg;
	int order;
	int cnt;

	if (!num_to_init)
		return 0;

	start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
	if (!pg)
		return NULL;

	/*
	 * Try to allocate as much as possible in one continues
	 * location that fills in all of the space. We want to
	 * waste as little space as possible.
	 */
	for (;;) {
		cnt = ftrace_allocate_records(pg, num_to_init);
		if (cnt < 0)
			goto free_pages;

		num_to_init -= cnt;
		if (!num_to_init)
3011 3012
			break;

3013 3014 3015 3016
		pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
		if (!pg->next)
			goto free_pages;

3017 3018 3019
		pg = pg->next;
	}

3020 3021 3022
	return start_pg;

 free_pages:
3023 3024
	pg = start_pg;
	while (pg) {
3025 3026 3027 3028 3029 3030 3031 3032 3033 3034
		order = get_count_order(pg->size / ENTRIES_PER_PAGE);
		free_pages((unsigned long)pg->records, order);
		start_pg = pg->next;
		kfree(pg);
		pg = start_pg;
	}
	pr_info("ftrace: FAILED to allocate memory for functions\n");
	return NULL;
}

3035 3036 3037
#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */

struct ftrace_iterator {
3038
	loff_t				pos;
3039 3040 3041 3042 3043
	loff_t				func_pos;
	struct ftrace_page		*pg;
	struct dyn_ftrace		*func;
	struct ftrace_func_probe	*probe;
	struct trace_parser		parser;
3044
	struct ftrace_hash		*hash;
3045
	struct ftrace_ops		*ops;
3046 3047 3048
	int				hidx;
	int				idx;
	unsigned			flags;
3049 3050
};

3051
static void *
3052
t_hash_next(struct seq_file *m, loff_t *pos)
3053 3054
{
	struct ftrace_iterator *iter = m->private;
3055
	struct hlist_node *hnd = NULL;
3056 3057 3058
	struct hlist_head *hhd;

	(*pos)++;
3059
	iter->pos = *pos;
3060

3061 3062
	if (iter->probe)
		hnd = &iter->probe->node;
3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084
 retry:
	if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
		return NULL;

	hhd = &ftrace_func_hash[iter->hidx];

	if (hlist_empty(hhd)) {
		iter->hidx++;
		hnd = NULL;
		goto retry;
	}

	if (!hnd)
		hnd = hhd->first;
	else {
		hnd = hnd->next;
		if (!hnd) {
			iter->hidx++;
			goto retry;
		}
	}

3085 3086 3087 3088 3089 3090
	if (WARN_ON_ONCE(!hnd))
		return NULL;

	iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);

	return iter;
3091 3092 3093 3094 3095 3096
}

static void *t_hash_start(struct seq_file *m, loff_t *pos)
{
	struct ftrace_iterator *iter = m->private;
	void *p = NULL;
L
Li Zefan 已提交
3097 3098
	loff_t l;

3099 3100 3101
	if (!(iter->flags & FTRACE_ITER_DO_HASH))
		return NULL;

3102 3103
	if (iter->func_pos > *pos)
		return NULL;
3104

L
Li Zefan 已提交
3105
	iter->hidx = 0;
3106
	for (l = 0; l <= (*pos - iter->func_pos); ) {
3107
		p = t_hash_next(m, &l);
L
Li Zefan 已提交
3108 3109 3110
		if (!p)
			break;
	}
3111 3112 3113
	if (!p)
		return NULL;

3114 3115 3116
	/* Only set this if we have an item */
	iter->flags |= FTRACE_ITER_HASH;

3117
	return iter;
3118 3119
}

3120 3121
static int
t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
3122
{
S
Steven Rostedt 已提交
3123
	struct ftrace_func_probe *rec;
3124

3125 3126 3127
	rec = iter->probe;
	if (WARN_ON_ONCE(!rec))
		return -EIO;
3128

3129 3130 3131
	if (rec->ops->print)
		return rec->ops->print(m, rec->ip, rec->ops, rec->data);

3132
	seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
3133 3134 3135 3136 3137 3138 3139 3140

	if (rec->data)
		seq_printf(m, ":%p", rec->data);
	seq_putc(m, '\n');

	return 0;
}

I
Ingo Molnar 已提交
3141
static void *
3142 3143 3144
t_next(struct seq_file *m, void *v, loff_t *pos)
{
	struct ftrace_iterator *iter = m->private;
3145
	struct ftrace_ops *ops = iter->ops;
3146 3147
	struct dyn_ftrace *rec = NULL;

3148 3149 3150
	if (unlikely(ftrace_disabled))
		return NULL;

3151
	if (iter->flags & FTRACE_ITER_HASH)
3152
		return t_hash_next(m, pos);
3153

3154
	(*pos)++;
3155
	iter->pos = iter->func_pos = *pos;
3156

3157
	if (iter->flags & FTRACE_ITER_PRINTALL)
3158
		return t_hash_start(m, pos);
3159

3160 3161 3162 3163 3164 3165 3166 3167 3168
 retry:
	if (iter->idx >= iter->pg->index) {
		if (iter->pg->next) {
			iter->pg = iter->pg->next;
			iter->idx = 0;
			goto retry;
		}
	} else {
		rec = &iter->pg->records[iter->idx++];
3169
		if (((iter->flags & FTRACE_ITER_FILTER) &&
3170
		     !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) ||
S
Steven Rostedt 已提交
3171

3172
		    ((iter->flags & FTRACE_ITER_NOTRACE) &&
3173
		     !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) ||
3174 3175

		    ((iter->flags & FTRACE_ITER_ENABLED) &&
3176
		     !(rec->flags & FTRACE_FL_ENABLED))) {
3177

3178 3179 3180 3181 3182
			rec = NULL;
			goto retry;
		}
	}

3183
	if (!rec)
3184
		return t_hash_start(m, pos);
3185 3186 3187 3188

	iter->func = rec;

	return iter;
3189 3190
}

3191 3192 3193 3194
static void reset_iter_read(struct ftrace_iterator *iter)
{
	iter->pos = 0;
	iter->func_pos = 0;
3195
	iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
3196 3197 3198 3199 3200
}

static void *t_start(struct seq_file *m, loff_t *pos)
{
	struct ftrace_iterator *iter = m->private;
3201
	struct ftrace_ops *ops = iter->ops;
3202
	void *p = NULL;
3203
	loff_t l;
3204

3205
	mutex_lock(&ftrace_lock);
3206 3207 3208 3209

	if (unlikely(ftrace_disabled))
		return NULL;

3210 3211 3212 3213 3214 3215
	/*
	 * If an lseek was done, then reset and start from beginning.
	 */
	if (*pos < iter->pos)
		reset_iter_read(iter);

3216 3217 3218 3219 3220
	/*
	 * For set_ftrace_filter reading, if we have the filter
	 * off, we can short cut and just print out that all
	 * functions are enabled.
	 */
3221
	if ((iter->flags & FTRACE_ITER_FILTER &&
3222
	     ftrace_hash_empty(ops->func_hash->filter_hash)) ||
3223
	    (iter->flags & FTRACE_ITER_NOTRACE &&
3224
	     ftrace_hash_empty(ops->func_hash->notrace_hash))) {
3225
		if (*pos > 0)
3226
			return t_hash_start(m, pos);
3227
		iter->flags |= FTRACE_ITER_PRINTALL;
3228 3229
		/* reset in case of seek/pread */
		iter->flags &= ~FTRACE_ITER_HASH;
3230 3231 3232
		return iter;
	}

3233 3234 3235
	if (iter->flags & FTRACE_ITER_HASH)
		return t_hash_start(m, pos);

3236 3237 3238 3239 3240
	/*
	 * Unfortunately, we need to restart at ftrace_pages_start
	 * every time we let go of the ftrace_mutex. This is because
	 * those pointers can change without the lock.
	 */
3241 3242 3243 3244 3245 3246
	iter->pg = ftrace_pages_start;
	iter->idx = 0;
	for (l = 0; l <= *pos; ) {
		p = t_next(m, p, &l);
		if (!p)
			break;
3247
	}
3248

3249 3250
	if (!p)
		return t_hash_start(m, pos);
3251 3252

	return iter;
3253 3254 3255 3256
}

static void t_stop(struct seq_file *m, void *p)
{
3257
	mutex_unlock(&ftrace_lock);
3258 3259
}

3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275
void * __weak
arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
{
	return NULL;
}

static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
				struct dyn_ftrace *rec)
{
	void *ptr;

	ptr = arch_ftrace_trampoline_func(ops, rec);
	if (ptr)
		seq_printf(m, " ->%pS", ptr);
}

3276 3277
static int t_show(struct seq_file *m, void *v)
{
3278
	struct ftrace_iterator *iter = m->private;
3279
	struct dyn_ftrace *rec;
3280

3281
	if (iter->flags & FTRACE_ITER_HASH)
3282
		return t_hash_show(m, iter);
3283

3284
	if (iter->flags & FTRACE_ITER_PRINTALL) {
3285
		if (iter->flags & FTRACE_ITER_NOTRACE)
3286
			seq_puts(m, "#### no functions disabled ####\n");
3287
		else
3288
			seq_puts(m, "#### all functions enabled ####\n");
3289 3290 3291
		return 0;
	}

3292 3293
	rec = iter->func;

3294 3295 3296
	if (!rec)
		return 0;

3297
	seq_printf(m, "%ps", (void *)rec->ip);
3298
	if (iter->flags & FTRACE_ITER_ENABLED) {
3299
		struct ftrace_ops *ops;
3300

3301
		seq_printf(m, " (%ld)%s%s",
3302
			   ftrace_rec_count(rec),
3303 3304
			   rec->flags & FTRACE_FL_REGS ? " R" : "  ",
			   rec->flags & FTRACE_FL_IPMODIFY ? " I" : "  ");
3305
		if (rec->flags & FTRACE_FL_TRAMP_EN) {
3306
			ops = ftrace_find_tramp_ops_any(rec);
3307 3308 3309 3310 3311
			if (ops) {
				do {
					seq_printf(m, "\ttramp: %pS (%pS)",
						   (void *)ops->trampoline,
						   (void *)ops->func);
3312
					add_trampoline_func(m, ops, rec);
3313 3314 3315
					ops = ftrace_find_tramp_ops_next(rec, ops);
				} while (ops);
			} else
3316
				seq_puts(m, "\ttramp: ERROR!");
3317 3318
		} else {
			add_trampoline_func(m, NULL, rec);
3319 3320 3321
		}
	}	

3322
	seq_putc(m, '\n');
3323 3324 3325 3326

	return 0;
}

J
James Morris 已提交
3327
static const struct seq_operations show_ftrace_seq_ops = {
3328 3329 3330 3331 3332 3333
	.start = t_start,
	.next = t_next,
	.stop = t_stop,
	.show = t_show,
};

I
Ingo Molnar 已提交
3334
static int
3335 3336 3337 3338
ftrace_avail_open(struct inode *inode, struct file *file)
{
	struct ftrace_iterator *iter;

3339 3340 3341
	if (unlikely(ftrace_disabled))
		return -ENODEV;

3342 3343 3344 3345
	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
	if (iter) {
		iter->pg = ftrace_pages_start;
		iter->ops = &global_ops;
I
Ingo Molnar 已提交
3346
	}
3347

3348
	return iter ? 0 : -ENOMEM;
3349 3350
}

3351 3352 3353 3354 3355
static int
ftrace_enabled_open(struct inode *inode, struct file *file)
{
	struct ftrace_iterator *iter;

3356 3357 3358 3359 3360
	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
	if (iter) {
		iter->pg = ftrace_pages_start;
		iter->flags = FTRACE_ITER_ENABLED;
		iter->ops = &global_ops;
3361 3362
	}

3363
	return iter ? 0 : -ENOMEM;
3364 3365
}

3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378
/**
 * ftrace_regex_open - initialize function tracer filter files
 * @ops: The ftrace_ops that hold the hash filters
 * @flag: The type of filter to process
 * @inode: The inode, usually passed in to your open routine
 * @file: The file, usually passed in to your open routine
 *
 * ftrace_regex_open() initializes the filter files for the
 * @ops. Depending on @flag it may process the filter hash or
 * the notrace hash of @ops. With this called from the open
 * routine, you can use ftrace_filter_write() for the write
 * routine if @flag has FTRACE_ITER_FILTER set, or
 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
3379
 * tracing_lseek() should be used as the lseek routine, and
3380 3381 3382
 * release must call ftrace_regex_release().
 */
int
3383
ftrace_regex_open(struct ftrace_ops *ops, int flag,
3384
		  struct inode *inode, struct file *file)
3385 3386
{
	struct ftrace_iterator *iter;
3387
	struct ftrace_hash *hash;
3388 3389
	int ret = 0;

3390 3391
	ftrace_ops_init(ops);

3392 3393 3394
	if (unlikely(ftrace_disabled))
		return -ENODEV;

3395 3396 3397 3398
	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
	if (!iter)
		return -ENOMEM;

3399 3400 3401 3402 3403
	if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
		kfree(iter);
		return -ENOMEM;
	}

3404 3405 3406
	iter->ops = ops;
	iter->flags = flag;

3407
	mutex_lock(&ops->func_hash->regex_lock);
3408

3409
	if (flag & FTRACE_ITER_NOTRACE)
3410
		hash = ops->func_hash->notrace_hash;
3411
	else
3412
		hash = ops->func_hash->filter_hash;
3413

3414
	if (file->f_mode & FMODE_WRITE) {
3415 3416 3417 3418 3419 3420 3421
		const int size_bits = FTRACE_HASH_DEFAULT_BITS;

		if (file->f_flags & O_TRUNC)
			iter->hash = alloc_ftrace_hash(size_bits);
		else
			iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);

3422 3423 3424
		if (!iter->hash) {
			trace_parser_put(&iter->parser);
			kfree(iter);
3425 3426
			ret = -ENOMEM;
			goto out_unlock;
3427 3428
		}
	}
3429

3430 3431 3432 3433 3434 3435 3436
	if (file->f_mode & FMODE_READ) {
		iter->pg = ftrace_pages_start;

		ret = seq_open(file, &show_ftrace_seq_ops);
		if (!ret) {
			struct seq_file *m = file->private_data;
			m->private = iter;
3437
		} else {
3438 3439
			/* Failed */
			free_ftrace_hash(iter->hash);
3440
			trace_parser_put(&iter->parser);
3441
			kfree(iter);
3442
		}
3443 3444
	} else
		file->private_data = iter;
3445 3446

 out_unlock:
3447
	mutex_unlock(&ops->func_hash->regex_lock);
3448 3449 3450 3451

	return ret;
}

3452 3453 3454
static int
ftrace_filter_open(struct inode *inode, struct file *file)
{
3455 3456 3457
	struct ftrace_ops *ops = inode->i_private;

	return ftrace_regex_open(ops,
3458 3459
			FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
			inode, file);
3460 3461 3462 3463 3464
}

static int
ftrace_notrace_open(struct inode *inode, struct file *file)
{
3465 3466 3467
	struct ftrace_ops *ops = inode->i_private;

	return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
3468
				 inode, file);
3469 3470
}

3471 3472 3473 3474 3475 3476 3477
/* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
struct ftrace_glob {
	char *search;
	unsigned len;
	int type;
};

3478 3479 3480 3481 3482 3483 3484 3485 3486 3487
/*
 * If symbols in an architecture don't correspond exactly to the user-visible
 * name of what they represent, it is possible to define this function to
 * perform the necessary adjustments.
*/
char * __weak arch_ftrace_match_adjust(char *str, const char *search)
{
	return str;
}

3488
static int ftrace_match(char *str, struct ftrace_glob *g)
3489 3490
{
	int matched = 0;
3491
	int slen;
3492

3493 3494
	str = arch_ftrace_match_adjust(str, g->search);

3495
	switch (g->type) {
3496
	case MATCH_FULL:
3497
		if (strcmp(str, g->search) == 0)
3498 3499 3500
			matched = 1;
		break;
	case MATCH_FRONT_ONLY:
3501
		if (strncmp(str, g->search, g->len) == 0)
3502 3503 3504
			matched = 1;
		break;
	case MATCH_MIDDLE_ONLY:
3505
		if (strstr(str, g->search))
3506 3507 3508
			matched = 1;
		break;
	case MATCH_END_ONLY:
3509
		slen = strlen(str);
3510 3511
		if (slen >= g->len &&
		    memcmp(str + slen - g->len, g->search, g->len) == 0)
3512 3513
			matched = 1;
		break;
3514 3515 3516 3517
	case MATCH_GLOB:
		if (glob_match(g->search, str))
			matched = 1;
		break;
3518 3519 3520 3521 3522
	}

	return matched;
}

3523
static int
3524
enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
3525
{
3526 3527 3528
	struct ftrace_func_entry *entry;
	int ret = 0;

3529
	entry = ftrace_lookup_ip(hash, rec->ip);
3530
	if (clear_filter) {
3531 3532 3533
		/* Do nothing if it doesn't exist */
		if (!entry)
			return 0;
3534

3535
		free_hash_entry(hash, entry);
3536 3537 3538 3539
	} else {
		/* Do nothing if it exists */
		if (entry)
			return 0;
3540

3541
		ret = add_hash_entry(hash, rec->ip);
3542 3543
	}
	return ret;
3544 3545
}

3546
static int
D
Dmitry Safonov 已提交
3547 3548
ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
		struct ftrace_glob *mod_g, int exclude_mod)
3549 3550
{
	char str[KSYM_SYMBOL_LEN];
3551 3552 3553 3554
	char *modname;

	kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);

D
Dmitry Safonov 已提交
3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574
	if (mod_g) {
		int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;

		/* blank module name to match all modules */
		if (!mod_g->len) {
			/* blank module globbing: modname xor exclude_mod */
			if ((!exclude_mod) != (!modname))
				goto func_match;
			return 0;
		}

		/* not matching the module */
		if (!modname || !mod_matches) {
			if (exclude_mod)
				goto func_match;
			else
				return 0;
		}

		if (mod_matches && exclude_mod)
3575 3576
			return 0;

D
Dmitry Safonov 已提交
3577
func_match:
3578
		/* blank search means to match all funcs in the mod */
3579
		if (!func_g->len)
3580 3581
			return 1;
	}
3582

3583
	return ftrace_match(str, func_g);
3584 3585
}

3586
static int
3587
match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
3588 3589 3590
{
	struct ftrace_page *pg;
	struct dyn_ftrace *rec;
3591
	struct ftrace_glob func_g = { .type = MATCH_FULL };
D
Dmitry Safonov 已提交
3592 3593 3594
	struct ftrace_glob mod_g = { .type = MATCH_FULL };
	struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
	int exclude_mod = 0;
3595
	int found = 0;
3596
	int ret;
3597
	int clear_filter;
3598

D
Dmitry Safonov 已提交
3599
	if (func) {
3600 3601 3602
		func_g.type = filter_parse_regex(func, len, &func_g.search,
						 &clear_filter);
		func_g.len = strlen(func_g.search);
3603
	}
3604

D
Dmitry Safonov 已提交
3605 3606 3607 3608
	if (mod) {
		mod_g.type = filter_parse_regex(mod, strlen(mod),
				&mod_g.search, &exclude_mod);
		mod_g.len = strlen(mod_g.search);
3609
	}
3610

3611
	mutex_lock(&ftrace_lock);
3612

3613 3614
	if (unlikely(ftrace_disabled))
		goto out_unlock;
3615

3616
	do_for_each_ftrace_rec(pg, rec) {
3617 3618 3619 3620

		if (rec->flags & FTRACE_FL_DISABLED)
			continue;

D
Dmitry Safonov 已提交
3621
		if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
3622
			ret = enter_record(hash, rec, clear_filter);
3623 3624 3625 3626
			if (ret < 0) {
				found = ret;
				goto out_unlock;
			}
3627
			found = 1;
3628 3629
		}
	} while_for_each_ftrace_rec();
3630
 out_unlock:
3631
	mutex_unlock(&ftrace_lock);
3632 3633

	return found;
3634 3635
}

3636
static int
3637
ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
3638
{
3639
	return match_records(hash, buff, len, NULL);
3640 3641 3642
}


3643 3644 3645 3646 3647 3648
/*
 * We register the module command as a template to show others how
 * to register the a command as well.
 */

static int
3649
ftrace_mod_callback(struct ftrace_hash *hash,
3650
		    char *func, char *cmd, char *module, int enable)
3651
{
3652
	int ret;
3653 3654 3655 3656 3657 3658 3659 3660

	/*
	 * cmd == 'mod' because we only registered this func
	 * for the 'mod' ftrace_func_command.
	 * But if you register one func with multiple commands,
	 * you can tell which command was used by the cmd
	 * parameter.
	 */
3661
	ret = match_records(hash, func, strlen(func), module);
3662
	if (!ret)
3663
		return -EINVAL;
3664 3665 3666
	if (ret < 0)
		return ret;
	return 0;
3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677
}

static struct ftrace_func_command ftrace_mod_cmd = {
	.name			= "mod",
	.func			= ftrace_mod_callback,
};

static int __init ftrace_mod_cmd_init(void)
{
	return register_ftrace_command(&ftrace_mod_cmd);
}
3678
core_initcall(ftrace_mod_cmd_init);
3679

3680
static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
3681
				      struct ftrace_ops *op, struct pt_regs *pt_regs)
3682
{
S
Steven Rostedt 已提交
3683
	struct ftrace_func_probe *entry;
3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698
	struct hlist_head *hhd;
	unsigned long key;

	key = hash_long(ip, FTRACE_HASH_BITS);

	hhd = &ftrace_func_hash[key];

	if (hlist_empty(hhd))
		return;

	/*
	 * Disable preemption for these calls to prevent a RCU grace
	 * period. This syncs the hash iteration and freeing of items
	 * on the hash. rcu_read_lock is too dangerous here.
	 */
3699
	preempt_disable_notrace();
3700
	hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
3701 3702 3703
		if (entry->ip == ip)
			entry->ops->func(ip, parent_ip, &entry->data);
	}
3704
	preempt_enable_notrace();
3705 3706
}

S
Steven Rostedt 已提交
3707
static struct ftrace_ops trace_probe_ops __read_mostly =
3708
{
3709
	.func		= function_trace_probe_call,
3710
	.flags		= FTRACE_OPS_FL_INITIALIZED,
3711
	INIT_OPS_HASH(trace_probe_ops)
3712 3713
};

S
Steven Rostedt 已提交
3714
static int ftrace_probe_registered;
3715

3716
static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash)
3717
{
3718
	int ret;
3719 3720
	int i;

3721 3722 3723
	if (ftrace_probe_registered) {
		/* still need to update the function call sites */
		if (ftrace_enabled)
3724 3725
			ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
					       old_hash);
3726
		return;
3727
	}
3728 3729 3730 3731 3732 3733 3734 3735 3736 3737

	for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
		struct hlist_head *hhd = &ftrace_func_hash[i];
		if (hhd->first)
			break;
	}
	/* Nothing registered? */
	if (i == FTRACE_FUNC_HASHSIZE)
		return;

3738
	ret = ftrace_startup(&trace_probe_ops, 0);
3739

S
Steven Rostedt 已提交
3740
	ftrace_probe_registered = 1;
3741 3742
}

S
Steven Rostedt 已提交
3743
static void __disable_ftrace_function_probe(void)
3744 3745 3746
{
	int i;

S
Steven Rostedt 已提交
3747
	if (!ftrace_probe_registered)
3748 3749 3750 3751 3752 3753 3754 3755 3756
		return;

	for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
		struct hlist_head *hhd = &ftrace_func_hash[i];
		if (hhd->first)
			return;
	}

	/* no more funcs left */
3757
	ftrace_shutdown(&trace_probe_ops, 0);
3758

S
Steven Rostedt 已提交
3759
	ftrace_probe_registered = 0;
3760 3761 3762
}


3763
static void ftrace_free_entry(struct ftrace_func_probe *entry)
3764 3765
{
	if (entry->ops->free)
3766
		entry->ops->free(entry->ops, entry->ip, &entry->data);
3767 3768 3769 3770
	kfree(entry);
}

int
S
Steven Rostedt 已提交
3771
register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3772 3773
			      void *data)
{
3774
	struct ftrace_ops_hash old_hash_ops;
S
Steven Rostedt 已提交
3775
	struct ftrace_func_probe *entry;
3776
	struct ftrace_glob func_g;
3777
	struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3778
	struct ftrace_hash *old_hash = *orig_hash;
3779
	struct ftrace_hash *hash;
3780 3781
	struct ftrace_page *pg;
	struct dyn_ftrace *rec;
3782
	int not;
S
Steven Rostedt 已提交
3783
	unsigned long key;
3784
	int count = 0;
3785
	int ret;
3786

3787 3788 3789
	func_g.type = filter_parse_regex(glob, strlen(glob),
			&func_g.search, &not);
	func_g.len = strlen(func_g.search);
3790

S
Steven Rostedt 已提交
3791
	/* we do not support '!' for function probes */
3792 3793 3794
	if (WARN_ON(not))
		return -EINVAL;

3795
	mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3796

3797 3798 3799 3800
	old_hash_ops.filter_hash = old_hash;
	/* Probes only have filters */
	old_hash_ops.notrace_hash = NULL;

3801
	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
3802 3803
	if (!hash) {
		count = -ENOMEM;
3804
		goto out;
3805 3806 3807 3808
	}

	if (unlikely(ftrace_disabled)) {
		count = -ENODEV;
3809
		goto out;
3810
	}
3811

3812 3813
	mutex_lock(&ftrace_lock);

3814
	do_for_each_ftrace_rec(pg, rec) {
3815

3816 3817 3818
		if (rec->flags & FTRACE_FL_DISABLED)
			continue;

D
Dmitry Safonov 已提交
3819
		if (!ftrace_match_record(rec, &func_g, NULL, 0))
3820 3821 3822 3823
			continue;

		entry = kmalloc(sizeof(*entry), GFP_KERNEL);
		if (!entry) {
S
Steven Rostedt 已提交
3824
			/* If we did not process any, then return error */
3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838
			if (!count)
				count = -ENOMEM;
			goto out_unlock;
		}

		count++;

		entry->data = data;

		/*
		 * The caller might want to do something special
		 * for each function we find. We call the callback
		 * to give the caller an opportunity to do so.
		 */
3839 3840
		if (ops->init) {
			if (ops->init(ops, rec->ip, &entry->data) < 0) {
3841 3842 3843 3844 3845 3846
				/* caller does not like this func */
				kfree(entry);
				continue;
			}
		}

3847 3848 3849 3850 3851 3852 3853
		ret = enter_record(hash, rec, 0);
		if (ret < 0) {
			kfree(entry);
			count = ret;
			goto out_unlock;
		}

3854 3855 3856 3857 3858 3859 3860
		entry->ops = ops;
		entry->ip = rec->ip;

		key = hash_long(entry->ip, FTRACE_HASH_BITS);
		hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);

	} while_for_each_ftrace_rec();
3861 3862

	ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3863

3864
	__enable_ftrace_function_probe(&old_hash_ops);
3865

3866 3867 3868
	if (!ret)
		free_ftrace_hash_rcu(old_hash);
	else
3869 3870
		count = ret;

3871
 out_unlock:
3872 3873
	mutex_unlock(&ftrace_lock);
 out:
3874
	mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
3875
	free_ftrace_hash(hash);
3876 3877 3878 3879 3880

	return count;
}

enum {
S
Steven Rostedt 已提交
3881 3882
	PROBE_TEST_FUNC		= 1,
	PROBE_TEST_DATA		= 2
3883 3884 3885
};

static void
S
Steven Rostedt 已提交
3886
__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3887 3888
				  void *data, int flags)
{
3889
	struct ftrace_func_entry *rec_entry;
S
Steven Rostedt 已提交
3890
	struct ftrace_func_probe *entry;
3891
	struct ftrace_func_probe *p;
3892
	struct ftrace_glob func_g;
3893
	struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3894
	struct ftrace_hash *old_hash = *orig_hash;
3895
	struct list_head free_list;
3896
	struct ftrace_hash *hash;
3897
	struct hlist_node *tmp;
3898
	char str[KSYM_SYMBOL_LEN];
3899
	int i, ret;
3900

3901
	if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3902
		func_g.search = NULL;
3903
	else if (glob) {
3904 3905
		int not;

3906 3907 3908 3909
		func_g.type = filter_parse_regex(glob, strlen(glob),
						 &func_g.search, &not);
		func_g.len = strlen(func_g.search);
		func_g.search = glob;
3910

S
Steven Rostedt 已提交
3911
		/* we do not support '!' for function probes */
3912 3913 3914 3915
		if (WARN_ON(not))
			return;
	}

3916
	mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3917 3918 3919 3920 3921 3922

	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
	if (!hash)
		/* Hmm, should report this somehow */
		goto out_unlock;

3923 3924
	INIT_LIST_HEAD(&free_list);

3925 3926 3927
	for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
		struct hlist_head *hhd = &ftrace_func_hash[i];

3928
		hlist_for_each_entry_safe(entry, tmp, hhd, node) {
3929 3930

			/* break up if statements for readability */
S
Steven Rostedt 已提交
3931
			if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3932 3933
				continue;

S
Steven Rostedt 已提交
3934
			if ((flags & PROBE_TEST_DATA) && entry->data != data)
3935 3936 3937
				continue;

			/* do this last, since it is the most expensive */
3938
			if (func_g.search) {
3939 3940
				kallsyms_lookup(entry->ip, NULL, NULL,
						NULL, str);
3941
				if (!ftrace_match(str, &func_g))
3942 3943 3944
					continue;
			}

3945 3946 3947 3948 3949
			rec_entry = ftrace_lookup_ip(hash, entry->ip);
			/* It is possible more than one entry had this ip */
			if (rec_entry)
				free_hash_entry(hash, rec_entry);

3950
			hlist_del_rcu(&entry->node);
3951
			list_add(&entry->free_list, &free_list);
3952 3953
		}
	}
3954
	mutex_lock(&ftrace_lock);
S
Steven Rostedt 已提交
3955
	__disable_ftrace_function_probe();
3956 3957 3958 3959
	/*
	 * Remove after the disable is called. Otherwise, if the last
	 * probe is removed, a null hash means *all enabled*.
	 */
3960
	ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3961
	synchronize_sched();
3962 3963 3964
	if (!ret)
		free_ftrace_hash_rcu(old_hash);

3965 3966 3967 3968
	list_for_each_entry_safe(entry, p, &free_list, free_list) {
		list_del(&entry->free_list);
		ftrace_free_entry(entry);
	}
3969
	mutex_unlock(&ftrace_lock);
3970

3971
 out_unlock:
3972
	mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
3973
	free_ftrace_hash(hash);
3974 3975 3976
}

void
S
Steven Rostedt 已提交
3977
unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3978 3979
				void *data)
{
S
Steven Rostedt 已提交
3980 3981
	__unregister_ftrace_function_probe(glob, ops, data,
					  PROBE_TEST_FUNC | PROBE_TEST_DATA);
3982 3983 3984
}

void
S
Steven Rostedt 已提交
3985
unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
3986
{
S
Steven Rostedt 已提交
3987
	__unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
3988 3989
}

S
Steven Rostedt 已提交
3990
void unregister_ftrace_function_probe_all(char *glob)
3991
{
S
Steven Rostedt 已提交
3992
	__unregister_ftrace_function_probe(glob, NULL, NULL, 0);
3993 3994
}

3995 3996 3997
static LIST_HEAD(ftrace_commands);
static DEFINE_MUTEX(ftrace_cmd_mutex);

3998 3999 4000 4001 4002
/*
 * Currently we only register ftrace commands from __init, so mark this
 * __init too.
 */
__init int register_ftrace_command(struct ftrace_func_command *cmd)
4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020
{
	struct ftrace_func_command *p;
	int ret = 0;

	mutex_lock(&ftrace_cmd_mutex);
	list_for_each_entry(p, &ftrace_commands, list) {
		if (strcmp(cmd->name, p->name) == 0) {
			ret = -EBUSY;
			goto out_unlock;
		}
	}
	list_add(&cmd->list, &ftrace_commands);
 out_unlock:
	mutex_unlock(&ftrace_cmd_mutex);

	return ret;
}

4021 4022 4023 4024 4025
/*
 * Currently we only unregister ftrace commands from __init, so mark
 * this __init too.
 */
__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043
{
	struct ftrace_func_command *p, *n;
	int ret = -ENODEV;

	mutex_lock(&ftrace_cmd_mutex);
	list_for_each_entry_safe(p, n, &ftrace_commands, list) {
		if (strcmp(cmd->name, p->name) == 0) {
			ret = 0;
			list_del_init(&p->list);
			goto out_unlock;
		}
	}
 out_unlock:
	mutex_unlock(&ftrace_cmd_mutex);

	return ret;
}

4044 4045
static int ftrace_process_regex(struct ftrace_hash *hash,
				char *buff, int len, int enable)
4046
{
4047
	char *func, *command, *next = buff;
S
Steven Rostedt 已提交
4048
	struct ftrace_func_command *p;
4049
	int ret = -EINVAL;
4050 4051 4052 4053

	func = strsep(&next, ":");

	if (!next) {
4054
		ret = ftrace_match_records(hash, func, len);
4055 4056 4057 4058 4059
		if (!ret)
			ret = -EINVAL;
		if (ret < 0)
			return ret;
		return 0;
4060 4061
	}

4062
	/* command found */
4063 4064 4065

	command = strsep(&next, ":");

4066 4067 4068
	mutex_lock(&ftrace_cmd_mutex);
	list_for_each_entry(p, &ftrace_commands, list) {
		if (strcmp(p->name, command) == 0) {
4069
			ret = p->func(hash, func, command, next, enable);
4070 4071
			goto out_unlock;
		}
4072
	}
4073 4074
 out_unlock:
	mutex_unlock(&ftrace_cmd_mutex);
4075

4076
	return ret;
4077 4078
}

I
Ingo Molnar 已提交
4079
static ssize_t
4080 4081
ftrace_regex_write(struct file *file, const char __user *ubuf,
		   size_t cnt, loff_t *ppos, int enable)
4082 4083
{
	struct ftrace_iterator *iter;
4084 4085
	struct trace_parser *parser;
	ssize_t ret, read;
4086

4087
	if (!cnt)
4088 4089 4090 4091 4092 4093 4094 4095
		return 0;

	if (file->f_mode & FMODE_READ) {
		struct seq_file *m = file->private_data;
		iter = m->private;
	} else
		iter = file->private_data;

4096
	if (unlikely(ftrace_disabled))
4097 4098 4099
		return -ENODEV;

	/* iter->hash is a local copy, so we don't need regex_lock */
4100

4101 4102
	parser = &iter->parser;
	read = trace_get_user(parser, ubuf, cnt, ppos);
4103

4104
	if (read >= 0 && trace_parser_loaded(parser) &&
4105
	    !trace_parser_cont(parser)) {
4106
		ret = ftrace_process_regex(iter->hash, parser->buffer,
4107
					   parser->idx, enable);
4108
		trace_parser_clear(parser);
4109
		if (ret < 0)
4110
			goto out;
4111
	}
4112 4113

	ret = read;
4114
 out:
4115 4116 4117
	return ret;
}

4118
ssize_t
4119 4120 4121 4122 4123 4124
ftrace_filter_write(struct file *file, const char __user *ubuf,
		    size_t cnt, loff_t *ppos)
{
	return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
}

4125
ssize_t
4126 4127 4128 4129 4130 4131
ftrace_notrace_write(struct file *file, const char __user *ubuf,
		     size_t cnt, loff_t *ppos)
{
	return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
}

4132
static int
4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150
ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
{
	struct ftrace_func_entry *entry;

	if (!ftrace_location(ip))
		return -EINVAL;

	if (remove) {
		entry = ftrace_lookup_ip(hash, ip);
		if (!entry)
			return -ENOENT;
		free_hash_entry(hash, entry);
		return 0;
	}

	return add_hash_entry(hash, ip);
}

4151
static void ftrace_ops_update_code(struct ftrace_ops *ops,
4152
				   struct ftrace_ops_hash *old_hash)
4153
{
4154 4155 4156 4157 4158 4159
	struct ftrace_ops *op;

	if (!ftrace_enabled)
		return;

	if (ops->flags & FTRACE_OPS_FL_ENABLED) {
4160
		ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179
		return;
	}

	/*
	 * If this is the shared global_ops filter, then we need to
	 * check if there is another ops that shares it, is enabled.
	 * If so, we still need to run the modify code.
	 */
	if (ops->func_hash != &global_ops.local_hash)
		return;

	do_for_each_ftrace_op(op, ftrace_ops_list) {
		if (op->func_hash == &global_ops.local_hash &&
		    op->flags & FTRACE_OPS_FL_ENABLED) {
			ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
			/* Only need to do this once */
			return;
		}
	} while_for_each_ftrace_op(op);
4180 4181
}

4182 4183 4184
static int
ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
		unsigned long ip, int remove, int reset, int enable)
4185
{
4186
	struct ftrace_hash **orig_hash;
4187
	struct ftrace_ops_hash old_hash_ops;
4188
	struct ftrace_hash *old_hash;
4189
	struct ftrace_hash *hash;
4190
	int ret;
4191

4192
	if (unlikely(ftrace_disabled))
4193
		return -ENODEV;
4194

4195
	mutex_lock(&ops->func_hash->regex_lock);
4196

4197
	if (enable)
4198
		orig_hash = &ops->func_hash->filter_hash;
4199
	else
4200
		orig_hash = &ops->func_hash->notrace_hash;
4201

4202 4203 4204 4205 4206
	if (reset)
		hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
	else
		hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);

4207 4208 4209 4210
	if (!hash) {
		ret = -ENOMEM;
		goto out_regex_unlock;
	}
4211

4212 4213 4214 4215
	if (buf && !ftrace_match_records(hash, buf, len)) {
		ret = -EINVAL;
		goto out_regex_unlock;
	}
4216 4217 4218 4219 4220
	if (ip) {
		ret = ftrace_match_addr(hash, ip, remove);
		if (ret < 0)
			goto out_regex_unlock;
	}
4221 4222

	mutex_lock(&ftrace_lock);
4223
	old_hash = *orig_hash;
4224 4225
	old_hash_ops.filter_hash = ops->func_hash->filter_hash;
	old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
4226
	ret = ftrace_hash_move(ops, enable, orig_hash, hash);
4227
	if (!ret) {
4228
		ftrace_ops_update_code(ops, &old_hash_ops);
4229 4230
		free_ftrace_hash_rcu(old_hash);
	}
4231 4232
	mutex_unlock(&ftrace_lock);

4233
 out_regex_unlock:
4234
	mutex_unlock(&ops->func_hash->regex_lock);
4235 4236 4237

	free_ftrace_hash(hash);
	return ret;
4238 4239
}

4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259
static int
ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
		int reset, int enable)
{
	return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
}

/**
 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
 * @ops - the ops to set the filter with
 * @ip - the address to add to or remove from the filter.
 * @remove - non zero to remove the ip from the filter
 * @reset - non zero to reset all filters before applying this filter.
 *
 * Filters denote which functions should be enabled when tracing is enabled
 * If @ip is NULL, it failes to update filter.
 */
int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
			 int remove, int reset)
{
4260
	ftrace_ops_init(ops);
4261 4262 4263 4264
	return ftrace_set_addr(ops, ip, remove, reset, 1);
}
EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);

4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281
/**
 * ftrace_ops_set_global_filter - setup ops to use global filters
 * @ops - the ops which will use the global filters
 *
 * ftrace users who need global function trace filtering should call this.
 * It can set the global filter only if ops were not initialized before.
 */
void ftrace_ops_set_global_filter(struct ftrace_ops *ops)
{
	if (ops->flags & FTRACE_OPS_FL_INITIALIZED)
		return;

	ftrace_ops_init(ops);
	ops->func_hash = &global_ops.local_hash;
}
EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter);

4282 4283 4284 4285 4286 4287 4288
static int
ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
		 int reset, int enable)
{
	return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
}

4289 4290
/**
 * ftrace_set_filter - set a function to filter on in ftrace
4291 4292 4293 4294 4295 4296 4297 4298
 * @ops - the ops to set the filter with
 * @buf - the string that holds the function filter text.
 * @len - the length of the string.
 * @reset - non zero to reset all filters before applying this filter.
 *
 * Filters denote which functions should be enabled when tracing is enabled.
 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
 */
4299
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
4300 4301
		       int len, int reset)
{
4302
	ftrace_ops_init(ops);
4303
	return ftrace_set_regex(ops, buf, len, reset, 1);
4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317
}
EXPORT_SYMBOL_GPL(ftrace_set_filter);

/**
 * ftrace_set_notrace - set a function to not trace in ftrace
 * @ops - the ops to set the notrace filter with
 * @buf - the string that holds the function notrace text.
 * @len - the length of the string.
 * @reset - non zero to reset all filters before applying this filter.
 *
 * Notrace Filters denote which functions should not be enabled when tracing
 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
 * for tracing.
 */
4318
int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
4319 4320
			int len, int reset)
{
4321
	ftrace_ops_init(ops);
4322
	return ftrace_set_regex(ops, buf, len, reset, 0);
4323 4324 4325
}
EXPORT_SYMBOL_GPL(ftrace_set_notrace);
/**
4326
 * ftrace_set_global_filter - set a function to filter on with global tracers
4327 4328 4329 4330 4331 4332 4333
 * @buf - the string that holds the function filter text.
 * @len - the length of the string.
 * @reset - non zero to reset all filters before applying this filter.
 *
 * Filters denote which functions should be enabled when tracing is enabled.
 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
 */
4334
void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
4335
{
4336
	ftrace_set_regex(&global_ops, buf, len, reset, 1);
4337
}
4338
EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
4339

4340
/**
4341
 * ftrace_set_global_notrace - set a function to not trace with global tracers
4342 4343 4344 4345 4346 4347 4348 4349
 * @buf - the string that holds the function notrace text.
 * @len - the length of the string.
 * @reset - non zero to reset all filters before applying this filter.
 *
 * Notrace Filters denote which functions should not be enabled when tracing
 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
 * for tracing.
 */
4350
void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
4351
{
4352
	ftrace_set_regex(&global_ops, buf, len, reset, 0);
4353
}
4354
EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
4355

4356 4357 4358 4359 4360 4361 4362
/*
 * command line interface to allow users to set filters on boot up.
 */
#define FTRACE_FILTER_SIZE		COMMAND_LINE_SIZE
static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;

4363 4364 4365
/* Used by function selftest to not test if filter is set */
bool ftrace_filter_param __initdata;

4366 4367
static int __init set_ftrace_notrace(char *str)
{
4368
	ftrace_filter_param = true;
4369
	strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
4370 4371 4372 4373 4374 4375
	return 1;
}
__setup("ftrace_notrace=", set_ftrace_notrace);

static int __init set_ftrace_filter(char *str)
{
4376
	ftrace_filter_param = true;
4377
	strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
4378 4379 4380 4381
	return 1;
}
__setup("ftrace_filter=", set_ftrace_filter);

4382
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4383
static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
4384
static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
4385
static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
4386

4387 4388 4389
static unsigned long save_global_trampoline;
static unsigned long save_global_flags;

4390 4391
static int __init set_graph_function(char *str)
{
4392
	strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
4393 4394 4395 4396
	return 1;
}
__setup("ftrace_graph_filter=", set_graph_function);

4397 4398 4399 4400 4401 4402 4403 4404
static int __init set_graph_notrace_function(char *str)
{
	strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
	return 1;
}
__setup("ftrace_graph_notrace=", set_graph_notrace_function);

static void __init set_ftrace_early_graph(char *buf, int enable)
4405 4406 4407
{
	int ret;
	char *func;
4408 4409 4410 4411 4412 4413 4414
	unsigned long *table = ftrace_graph_funcs;
	int *count = &ftrace_graph_count;

	if (!enable) {
		table = ftrace_graph_notrace_funcs;
		count = &ftrace_graph_notrace_count;
	}
4415 4416 4417 4418

	while (buf) {
		func = strsep(&buf, ",");
		/* we allow only one expression at a time */
4419
		ret = ftrace_set_func(table, count, FTRACE_GRAPH_MAX_FUNCS, func);
4420 4421 4422 4423 4424 4425 4426
		if (ret)
			printk(KERN_DEBUG "ftrace: function %s not "
					  "traceable\n", func);
	}
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */

4427 4428
void __init
ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
4429 4430 4431
{
	char *func;

4432 4433
	ftrace_ops_init(ops);

4434 4435
	while (buf) {
		func = strsep(&buf, ",");
4436
		ftrace_set_regex(ops, func, strlen(func), 0, enable);
4437 4438 4439 4440 4441 4442
	}
}

static void __init set_ftrace_early_filters(void)
{
	if (ftrace_filter_buf[0])
4443
		ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
4444
	if (ftrace_notrace_buf[0])
4445
		ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
4446 4447
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	if (ftrace_graph_buf[0])
4448 4449 4450
		set_ftrace_early_graph(ftrace_graph_buf, 1);
	if (ftrace_graph_notrace_buf[0])
		set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
4451
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4452 4453
}

4454
int ftrace_regex_release(struct inode *inode, struct file *file)
4455 4456
{
	struct seq_file *m = (struct seq_file *)file->private_data;
4457
	struct ftrace_ops_hash old_hash_ops;
4458
	struct ftrace_iterator *iter;
4459
	struct ftrace_hash **orig_hash;
4460
	struct ftrace_hash *old_hash;
4461
	struct trace_parser *parser;
4462
	int filter_hash;
4463
	int ret;
4464 4465 4466 4467 4468 4469 4470

	if (file->f_mode & FMODE_READ) {
		iter = m->private;
		seq_release(inode, file);
	} else
		iter = file->private_data;

4471 4472 4473
	parser = &iter->parser;
	if (trace_parser_loaded(parser)) {
		parser->buffer[parser->idx] = 0;
4474
		ftrace_match_records(iter->hash, parser->buffer, parser->idx);
4475 4476
	}

4477 4478
	trace_parser_put(parser);

4479
	mutex_lock(&iter->ops->func_hash->regex_lock);
4480

4481
	if (file->f_mode & FMODE_WRITE) {
4482 4483 4484
		filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);

		if (filter_hash)
4485
			orig_hash = &iter->ops->func_hash->filter_hash;
4486
		else
4487
			orig_hash = &iter->ops->func_hash->notrace_hash;
4488

4489
		mutex_lock(&ftrace_lock);
4490
		old_hash = *orig_hash;
4491 4492
		old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash;
		old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash;
4493 4494
		ret = ftrace_hash_move(iter->ops, filter_hash,
				       orig_hash, iter->hash);
4495
		if (!ret) {
4496
			ftrace_ops_update_code(iter->ops, &old_hash_ops);
4497 4498
			free_ftrace_hash_rcu(old_hash);
		}
4499 4500
		mutex_unlock(&ftrace_lock);
	}
4501

4502
	mutex_unlock(&iter->ops->func_hash->regex_lock);
4503 4504
	free_ftrace_hash(iter->hash);
	kfree(iter);
4505

4506 4507 4508
	return 0;
}

4509
static const struct file_operations ftrace_avail_fops = {
4510 4511 4512
	.open = ftrace_avail_open,
	.read = seq_read,
	.llseek = seq_lseek,
L
Li Zefan 已提交
4513
	.release = seq_release_private,
4514 4515
};

4516 4517 4518 4519 4520 4521 4522
static const struct file_operations ftrace_enabled_fops = {
	.open = ftrace_enabled_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = seq_release_private,
};

4523
static const struct file_operations ftrace_filter_fops = {
4524
	.open = ftrace_filter_open,
L
Lai Jiangshan 已提交
4525
	.read = seq_read,
4526
	.write = ftrace_filter_write,
4527
	.llseek = tracing_lseek,
4528
	.release = ftrace_regex_release,
4529 4530
};

4531
static const struct file_operations ftrace_notrace_fops = {
4532
	.open = ftrace_notrace_open,
L
Lai Jiangshan 已提交
4533
	.read = seq_read,
4534
	.write = ftrace_notrace_write,
4535
	.llseek = tracing_lseek,
4536
	.release = ftrace_regex_release,
4537 4538
};

4539 4540 4541 4542 4543
#ifdef CONFIG_FUNCTION_GRAPH_TRACER

static DEFINE_MUTEX(graph_lock);

int ftrace_graph_count;
4544
int ftrace_graph_notrace_count;
4545
unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
4546
unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
4547

4548 4549 4550 4551 4552 4553 4554
struct ftrace_graph_data {
	unsigned long *table;
	size_t size;
	int *count;
	const struct seq_operations *seq_ops;
};

4555
static void *
4556
__g_next(struct seq_file *m, loff_t *pos)
4557
{
4558 4559 4560
	struct ftrace_graph_data *fgd = m->private;

	if (*pos >= *fgd->count)
4561
		return NULL;
4562
	return &fgd->table[*pos];
4563
}
4564

4565 4566 4567 4568 4569
static void *
g_next(struct seq_file *m, void *v, loff_t *pos)
{
	(*pos)++;
	return __g_next(m, pos);
4570 4571 4572 4573
}

static void *g_start(struct seq_file *m, loff_t *pos)
{
4574 4575
	struct ftrace_graph_data *fgd = m->private;

4576 4577
	mutex_lock(&graph_lock);

4578
	/* Nothing, tell g_show to print all functions are enabled */
4579
	if (!*fgd->count && !*pos)
4580 4581
		return (void *)1;

4582
	return __g_next(m, pos);
4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596
}

static void g_stop(struct seq_file *m, void *p)
{
	mutex_unlock(&graph_lock);
}

static int g_show(struct seq_file *m, void *v)
{
	unsigned long *ptr = v;

	if (!ptr)
		return 0;

4597
	if (ptr == (unsigned long *)1) {
4598 4599 4600
		struct ftrace_graph_data *fgd = m->private;

		if (fgd->table == ftrace_graph_funcs)
4601
			seq_puts(m, "#### all functions enabled ####\n");
4602
		else
4603
			seq_puts(m, "#### no functions disabled ####\n");
4604 4605 4606
		return 0;
	}

4607
	seq_printf(m, "%ps\n", (void *)*ptr);
4608 4609 4610 4611

	return 0;
}

J
James Morris 已提交
4612
static const struct seq_operations ftrace_graph_seq_ops = {
4613 4614 4615 4616 4617 4618 4619
	.start = g_start,
	.next = g_next,
	.stop = g_stop,
	.show = g_show,
};

static int
4620 4621
__ftrace_graph_open(struct inode *inode, struct file *file,
		    struct ftrace_graph_data *fgd)
4622 4623 4624 4625 4626
{
	int ret = 0;

	mutex_lock(&graph_lock);
	if ((file->f_mode & FMODE_WRITE) &&
4627
	    (file->f_flags & O_TRUNC)) {
4628 4629
		*fgd->count = 0;
		memset(fgd->table, 0, fgd->size * sizeof(*fgd->table));
4630
	}
4631
	mutex_unlock(&graph_lock);
4632

4633 4634 4635 4636 4637 4638 4639 4640
	if (file->f_mode & FMODE_READ) {
		ret = seq_open(file, fgd->seq_ops);
		if (!ret) {
			struct seq_file *m = file->private_data;
			m->private = fgd;
		}
	} else
		file->private_data = fgd;
4641 4642 4643 4644

	return ret;
}

4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664
static int
ftrace_graph_open(struct inode *inode, struct file *file)
{
	struct ftrace_graph_data *fgd;

	if (unlikely(ftrace_disabled))
		return -ENODEV;

	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
	if (fgd == NULL)
		return -ENOMEM;

	fgd->table = ftrace_graph_funcs;
	fgd->size = FTRACE_GRAPH_MAX_FUNCS;
	fgd->count = &ftrace_graph_count;
	fgd->seq_ops = &ftrace_graph_seq_ops;

	return __ftrace_graph_open(inode, file, fgd);
}

4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684
static int
ftrace_graph_notrace_open(struct inode *inode, struct file *file)
{
	struct ftrace_graph_data *fgd;

	if (unlikely(ftrace_disabled))
		return -ENODEV;

	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
	if (fgd == NULL)
		return -ENOMEM;

	fgd->table = ftrace_graph_notrace_funcs;
	fgd->size = FTRACE_GRAPH_MAX_FUNCS;
	fgd->count = &ftrace_graph_notrace_count;
	fgd->seq_ops = &ftrace_graph_seq_ops;

	return __ftrace_graph_open(inode, file, fgd);
}

4685 4686 4687
static int
ftrace_graph_release(struct inode *inode, struct file *file)
{
4688 4689 4690 4691
	if (file->f_mode & FMODE_READ) {
		struct seq_file *m = file->private_data;

		kfree(m->private);
4692
		seq_release(inode, file);
4693 4694 4695 4696
	} else {
		kfree(file->private_data);
	}

4697 4698 4699
	return 0;
}

4700
static int
4701
ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
4702
{
4703
	struct ftrace_glob func_g;
4704 4705
	struct dyn_ftrace *rec;
	struct ftrace_page *pg;
4706
	int fail = 1;
4707
	int not;
4708 4709
	bool exists;
	int i;
4710

4711
	/* decode regex */
4712 4713
	func_g.type = filter_parse_regex(buffer, strlen(buffer),
					 &func_g.search, &not);
4714
	if (!not && *idx >= size)
4715
		return -EBUSY;
4716

4717
	func_g.len = strlen(func_g.search);
4718

4719
	mutex_lock(&ftrace_lock);
4720 4721 4722 4723 4724 4725

	if (unlikely(ftrace_disabled)) {
		mutex_unlock(&ftrace_lock);
		return -ENODEV;
	}

4726 4727
	do_for_each_ftrace_rec(pg, rec) {

4728 4729 4730
		if (rec->flags & FTRACE_FL_DISABLED)
			continue;

D
Dmitry Safonov 已提交
4731
		if (ftrace_match_record(rec, &func_g, NULL, 0)) {
4732
			/* if it is in the array */
4733
			exists = false;
4734
			for (i = 0; i < *idx; i++) {
4735 4736
				if (array[i] == rec->ip) {
					exists = true;
4737 4738
					break;
				}
4739 4740 4741 4742 4743 4744
			}

			if (!not) {
				fail = 0;
				if (!exists) {
					array[(*idx)++] = rec->ip;
4745
					if (*idx >= size)
4746 4747 4748 4749 4750 4751 4752 4753 4754
						goto out;
				}
			} else {
				if (exists) {
					array[i] = array[--(*idx)];
					array[*idx] = 0;
					fail = 0;
				}
			}
4755
		}
4756
	} while_for_each_ftrace_rec();
4757
out:
4758
	mutex_unlock(&ftrace_lock);
4759

4760 4761 4762 4763
	if (fail)
		return -EINVAL;

	return 0;
4764 4765 4766 4767 4768 4769
}

static ssize_t
ftrace_graph_write(struct file *file, const char __user *ubuf,
		   size_t cnt, loff_t *ppos)
{
4770
	struct trace_parser parser;
4771
	ssize_t read, ret = 0;
4772
	struct ftrace_graph_data *fgd = file->private_data;
4773

4774
	if (!cnt)
4775 4776
		return 0;

4777 4778
	if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX))
		return -ENOMEM;
4779

4780
	read = trace_get_user(&parser, ubuf, cnt, ppos);
4781

4782
	if (read >= 0 && trace_parser_loaded((&parser))) {
4783 4784
		parser.buffer[parser.idx] = 0;

4785 4786
		mutex_lock(&graph_lock);

4787
		/* we allow only one expression at a time */
4788 4789
		ret = ftrace_set_func(fgd->table, fgd->count, fgd->size,
				      parser.buffer);
4790 4791

		mutex_unlock(&graph_lock);
4792 4793
	}

4794 4795
	if (!ret)
		ret = read;
4796

4797
	trace_parser_put(&parser);
4798 4799 4800 4801 4802

	return ret;
}

static const struct file_operations ftrace_graph_fops = {
4803 4804 4805
	.open		= ftrace_graph_open,
	.read		= seq_read,
	.write		= ftrace_graph_write,
4806
	.llseek		= tracing_lseek,
4807
	.release	= ftrace_graph_release,
4808
};
4809 4810 4811 4812 4813

static const struct file_operations ftrace_graph_notrace_fops = {
	.open		= ftrace_graph_notrace_open,
	.read		= seq_read,
	.write		= ftrace_graph_write,
4814
	.llseek		= tracing_lseek,
4815 4816
	.release	= ftrace_graph_release,
};
4817 4818
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */

4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848
void ftrace_create_filter_files(struct ftrace_ops *ops,
				struct dentry *parent)
{

	trace_create_file("set_ftrace_filter", 0644, parent,
			  ops, &ftrace_filter_fops);

	trace_create_file("set_ftrace_notrace", 0644, parent,
			  ops, &ftrace_notrace_fops);
}

/*
 * The name "destroy_filter_files" is really a misnomer. Although
 * in the future, it may actualy delete the files, but this is
 * really intended to make sure the ops passed in are disabled
 * and that when this function returns, the caller is free to
 * free the ops.
 *
 * The "destroy" name is only to match the "create" name that this
 * should be paired with.
 */
void ftrace_destroy_filter_files(struct ftrace_ops *ops)
{
	mutex_lock(&ftrace_lock);
	if (ops->flags & FTRACE_OPS_FL_ENABLED)
		ftrace_shutdown(ops, 0);
	ops->flags |= FTRACE_OPS_FL_DELETED;
	mutex_unlock(&ftrace_lock);
}

4849
static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
4850 4851
{

4852 4853
	trace_create_file("available_filter_functions", 0444,
			d_tracer, NULL, &ftrace_avail_fops);
4854

4855 4856 4857
	trace_create_file("enabled_functions", 0444,
			d_tracer, NULL, &ftrace_enabled_fops);

4858
	ftrace_create_filter_files(&global_ops, d_tracer);
4859

4860
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4861
	trace_create_file("set_graph_function", 0444, d_tracer,
4862 4863
				    NULL,
				    &ftrace_graph_fops);
4864 4865 4866
	trace_create_file("set_graph_notrace", 0444, d_tracer,
				    NULL,
				    &ftrace_graph_notrace_fops);
4867 4868
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */

4869 4870 4871
	return 0;
}

4872
static int ftrace_cmp_ips(const void *a, const void *b)
4873
{
4874 4875
	const unsigned long *ipa = a;
	const unsigned long *ipb = b;
4876

4877 4878 4879 4880 4881 4882 4883
	if (*ipa > *ipb)
		return 1;
	if (*ipa < *ipb)
		return -1;
	return 0;
}

4884
static int ftrace_process_locs(struct module *mod,
4885
			       unsigned long *start,
4886 4887
			       unsigned long *end)
{
4888
	struct ftrace_page *start_pg;
4889
	struct ftrace_page *pg;
4890
	struct dyn_ftrace *rec;
4891
	unsigned long count;
4892 4893
	unsigned long *p;
	unsigned long addr;
4894
	unsigned long flags = 0; /* Shut up gcc */
4895 4896 4897 4898 4899 4900 4901
	int ret = -ENOMEM;

	count = end - start;

	if (!count)
		return 0;

4902
	sort(start, count, sizeof(*start),
4903
	     ftrace_cmp_ips, NULL);
4904

4905 4906
	start_pg = ftrace_allocate_pages(count);
	if (!start_pg)
4907
		return -ENOMEM;
4908

S
Steven Rostedt 已提交
4909
	mutex_lock(&ftrace_lock);
4910

4911 4912 4913 4914 4915
	/*
	 * Core and each module needs their own pages, as
	 * modules will free them when they are removed.
	 * Force a new page to be allocated for modules.
	 */
4916 4917 4918
	if (!mod) {
		WARN_ON(ftrace_pages || ftrace_pages_start);
		/* First initialization */
4919
		ftrace_pages = ftrace_pages_start = start_pg;
4920
	} else {
4921
		if (!ftrace_pages)
4922
			goto out;
4923

4924 4925 4926 4927
		if (WARN_ON(ftrace_pages->next)) {
			/* Hmm, we have free pages? */
			while (ftrace_pages->next)
				ftrace_pages = ftrace_pages->next;
4928
		}
4929

4930
		ftrace_pages->next = start_pg;
4931 4932
	}

4933
	p = start;
4934
	pg = start_pg;
4935 4936
	while (p < end) {
		addr = ftrace_call_adjust(*p++);
4937 4938 4939 4940 4941 4942 4943 4944
		/*
		 * Some architecture linkers will pad between
		 * the different mcount_loc sections of different
		 * object files to satisfy alignments.
		 * Skip any NULL pointers.
		 */
		if (!addr)
			continue;
4945 4946 4947 4948 4949 4950 4951 4952 4953 4954

		if (pg->index == pg->size) {
			/* We should have allocated enough */
			if (WARN_ON(!pg->next))
				break;
			pg = pg->next;
		}

		rec = &pg->records[pg->index++];
		rec->ip = addr;
4955 4956
	}

4957 4958 4959 4960 4961 4962
	/* We should have used all pages */
	WARN_ON(pg->next);

	/* Assign the last page to ftrace_pages */
	ftrace_pages = pg;

4963
	/*
4964 4965 4966 4967 4968 4969
	 * We only need to disable interrupts on start up
	 * because we are modifying code that an interrupt
	 * may execute, and the modification is not atomic.
	 * But for modules, nothing runs the code we modify
	 * until we are finished with it, and there's no
	 * reason to cause large interrupt latencies while we do it.
4970
	 */
4971 4972
	if (!mod)
		local_irq_save(flags);
4973
	ftrace_update_code(mod, start_pg);
4974 4975
	if (!mod)
		local_irq_restore(flags);
4976 4977
	ret = 0;
 out:
S
Steven Rostedt 已提交
4978
	mutex_unlock(&ftrace_lock);
4979

4980
	return ret;
4981 4982
}

4983
#ifdef CONFIG_MODULES
4984 4985 4986

#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)

4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999
static int referenced_filters(struct dyn_ftrace *rec)
{
	struct ftrace_ops *ops;
	int cnt = 0;

	for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
		if (ops_references_rec(ops, rec))
		    cnt++;
	}

	return cnt;
}

5000
void ftrace_release_mod(struct module *mod)
5001 5002
{
	struct dyn_ftrace *rec;
5003
	struct ftrace_page **last_pg;
5004
	struct ftrace_page *pg;
5005
	int order;
5006

5007 5008
	mutex_lock(&ftrace_lock);

5009
	if (ftrace_disabled)
5010
		goto out_unlock;
5011

5012 5013 5014 5015 5016 5017 5018
	/*
	 * Each module has its own ftrace_pages, remove
	 * them from the list.
	 */
	last_pg = &ftrace_pages_start;
	for (pg = ftrace_pages_start; pg; pg = *last_pg) {
		rec = &pg->records[0];
5019
		if (within_module_core(rec->ip, mod)) {
5020
			/*
5021 5022
			 * As core pages are first, the first
			 * page should never be a module page.
5023
			 */
5024 5025 5026 5027 5028 5029 5030 5031
			if (WARN_ON(pg == ftrace_pages_start))
				goto out_unlock;

			/* Check if we are deleting the last page */
			if (pg == ftrace_pages)
				ftrace_pages = next_to_ftrace_page(last_pg);

			*last_pg = pg->next;
5032 5033 5034
			order = get_count_order(pg->size / ENTRIES_PER_PAGE);
			free_pages((unsigned long)pg->records, order);
			kfree(pg);
5035 5036 5037
		} else
			last_pg = &pg->next;
	}
5038
 out_unlock:
5039 5040 5041
	mutex_unlock(&ftrace_lock);
}

5042
void ftrace_module_enable(struct module *mod)
5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110
{
	struct dyn_ftrace *rec;
	struct ftrace_page *pg;

	mutex_lock(&ftrace_lock);

	if (ftrace_disabled)
		goto out_unlock;

	/*
	 * If the tracing is enabled, go ahead and enable the record.
	 *
	 * The reason not to enable the record immediatelly is the
	 * inherent check of ftrace_make_nop/ftrace_make_call for
	 * correct previous instructions.  Making first the NOP
	 * conversion puts the module to the correct state, thus
	 * passing the ftrace_make_call check.
	 *
	 * We also delay this to after the module code already set the
	 * text to read-only, as we now need to set it back to read-write
	 * so that we can modify the text.
	 */
	if (ftrace_start_up)
		ftrace_arch_code_modify_prepare();

	do_for_each_ftrace_rec(pg, rec) {
		int cnt;
		/*
		 * do_for_each_ftrace_rec() is a double loop.
		 * module text shares the pg. If a record is
		 * not part of this module, then skip this pg,
		 * which the "break" will do.
		 */
		if (!within_module_core(rec->ip, mod))
			break;

		cnt = 0;

		/*
		 * When adding a module, we need to check if tracers are
		 * currently enabled and if they are, and can trace this record,
		 * we need to enable the module functions as well as update the
		 * reference counts for those function records.
		 */
		if (ftrace_start_up)
			cnt += referenced_filters(rec);

		/* This clears FTRACE_FL_DISABLED */
		rec->flags = cnt;

		if (ftrace_start_up && cnt) {
			int failed = __ftrace_replace_code(rec, 1);
			if (failed) {
				ftrace_bug(failed, rec);
				goto out_loop;
			}
		}

	} while_for_each_ftrace_rec();

 out_loop:
	if (ftrace_start_up)
		ftrace_arch_code_modify_post_process();

 out_unlock:
	mutex_unlock(&ftrace_lock);
}

5111
void ftrace_module_init(struct module *mod)
5112
{
5113
	if (ftrace_disabled || !mod->num_ftrace_callsites)
5114
		return;
5115

5116 5117
	ftrace_process_locs(mod, mod->ftrace_callsites,
			    mod->ftrace_callsites + mod->num_ftrace_callsites);
5118
}
5119 5120
#endif /* CONFIG_MODULES */

5121 5122
void __init ftrace_init(void)
{
5123 5124
	extern unsigned long __start_mcount_loc[];
	extern unsigned long __stop_mcount_loc[];
5125
	unsigned long count, flags;
5126 5127 5128
	int ret;

	local_irq_save(flags);
5129
	ret = ftrace_dyn_arch_init();
5130
	local_irq_restore(flags);
5131
	if (ret)
5132 5133 5134
		goto failed;

	count = __stop_mcount_loc - __start_mcount_loc;
5135 5136
	if (!count) {
		pr_info("ftrace: No functions to be traced?\n");
5137
		goto failed;
5138 5139 5140 5141
	}

	pr_info("ftrace: allocating %ld entries in %ld pages\n",
		count, count / ENTRIES_PER_PAGE + 1);
5142 5143 5144

	last_ftrace_enabled = ftrace_enabled = 1;

5145
	ret = ftrace_process_locs(NULL,
5146
				  __start_mcount_loc,
5147 5148
				  __stop_mcount_loc);

5149 5150
	set_ftrace_early_filters();

5151 5152 5153 5154 5155
	return;
 failed:
	ftrace_disabled = 1;
}

5156 5157 5158 5159 5160 5161 5162
/* Do nothing if arch does not support this */
void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
{
}

static void ftrace_update_trampoline(struct ftrace_ops *ops)
{
5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173

/*
 * Currently there's no safe way to free a trampoline when the kernel
 * is configured with PREEMPT. That is because a task could be preempted
 * when it jumped to the trampoline, it may be preempted for a long time
 * depending on the system load, and currently there's no way to know
 * when it will be off the trampoline. If the trampoline is freed
 * too early, when the task runs again, it will be executing on freed
 * memory and crash.
 */
#ifdef CONFIG_PREEMPT
5174 5175 5176
	/* Currently, only non dynamic ops can have a trampoline */
	if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
		return;
5177
#endif
5178 5179 5180 5181

	arch_ftrace_update_trampoline(ops);
}

5182
#else
5183

5184
static struct ftrace_ops global_ops = {
5185
	.func			= ftrace_stub,
5186 5187 5188
	.flags			= FTRACE_OPS_FL_RECURSION_SAFE |
				  FTRACE_OPS_FL_INITIALIZED |
				  FTRACE_OPS_FL_PID,
5189 5190
};

5191 5192 5193 5194 5195
static int __init ftrace_nodyn_init(void)
{
	ftrace_enabled = 1;
	return 0;
}
5196
core_initcall(ftrace_nodyn_init);
5197

5198
static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
5199
static inline void ftrace_startup_enable(int command) { }
5200
static inline void ftrace_startup_all(int command) { }
5201
/* Keep as macros so we do not need to define the commands */
5202 5203 5204 5205 5206 5207
# define ftrace_startup(ops, command)					\
	({								\
		int ___ret = __register_ftrace_function(ops);		\
		if (!___ret)						\
			(ops)->flags |= FTRACE_OPS_FL_ENABLED;		\
		___ret;							\
5208
	})
5209 5210 5211 5212 5213 5214 5215
# define ftrace_shutdown(ops, command)					\
	({								\
		int ___ret = __unregister_ftrace_function(ops);		\
		if (!___ret)						\
			(ops)->flags &= ~FTRACE_OPS_FL_ENABLED;		\
		___ret;							\
	})
5216

I
Ingo Molnar 已提交
5217 5218
# define ftrace_startup_sysctl()	do { } while (0)
# define ftrace_shutdown_sysctl()	do { } while (0)
5219 5220

static inline int
5221
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
5222 5223 5224 5225
{
	return 1;
}

5226 5227 5228 5229
static void ftrace_update_trampoline(struct ftrace_ops *ops)
{
}

5230 5231
#endif /* CONFIG_DYNAMIC_FTRACE */

5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254
__init void ftrace_init_global_array_ops(struct trace_array *tr)
{
	tr->ops = &global_ops;
	tr->ops->private = tr;
}

void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
{
	/* If we filter on pids, update to use the pid function */
	if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
		if (WARN_ON(tr->ops->func != ftrace_stub))
			printk("ftrace ops had %pS for function\n",
			       tr->ops->func);
	}
	tr->ops->func = func;
	tr->ops->private = tr;
}

void ftrace_reset_array_ops(struct trace_array *tr)
{
	tr->ops->func = ftrace_stub;
}

5255 5256
static inline void
__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
5257
		       struct ftrace_ops *ignored, struct pt_regs *regs)
5258
{
5259
	struct ftrace_ops *op;
5260
	int bit;
5261

5262 5263 5264
	bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
	if (bit < 0)
		return;
5265

5266 5267 5268 5269 5270
	/*
	 * Some of the ops may be dynamically allocated,
	 * they must be freed after a synchronize_sched().
	 */
	preempt_disable_notrace();
5271

5272
	do_for_each_ftrace_op(op, ftrace_ops_list) {
5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286
		/*
		 * Check the following for each ops before calling their func:
		 *  if RCU flag is set, then rcu_is_watching() must be true
		 *  if PER_CPU is set, then ftrace_function_local_disable()
		 *                          must be false
		 *  Otherwise test if the ip matches the ops filter
		 *
		 * If any of the above fails then the op->func() is not executed.
		 */
		if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
		    (!(op->flags & FTRACE_OPS_FL_PER_CPU) ||
		     !ftrace_function_local_disabled(op)) &&
		    ftrace_ops_test(op, ip, regs)) {
		    
5287 5288
			if (FTRACE_WARN_ON(!op->func)) {
				pr_warn("op=%p %pS\n", op, op);
5289 5290
				goto out;
			}
5291
			op->func(ip, parent_ip, op, regs);
5292
		}
5293
	} while_for_each_ftrace_op(op);
5294
out:
5295
	preempt_enable_notrace();
5296
	trace_clear_recursion(bit);
5297 5298
}

5299 5300 5301 5302 5303
/*
 * Some archs only support passing ip and parent_ip. Even though
 * the list function ignores the op parameter, we do not want any
 * C side effects, where a function is called without the caller
 * sending a third parameter.
5304 5305 5306
 * Archs are to support both the regs and ftrace_ops at the same time.
 * If they support ftrace_ops, it is assumed they support regs.
 * If call backs want to use regs, they must either check for regs
5307 5308
 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
5309
 * An architecture can pass partial regs with ftrace_ops and still
L
Li Bin 已提交
5310
 * set the ARCH_SUPPORTS_FTRACE_OPS.
5311 5312 5313
 */
#if ARCH_SUPPORTS_FTRACE_OPS
static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
5314
				 struct ftrace_ops *op, struct pt_regs *regs)
5315
{
5316
	__ftrace_ops_list_func(ip, parent_ip, NULL, regs);
5317 5318 5319 5320
}
#else
static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
{
5321
	__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
5322 5323 5324
}
#endif

5325 5326
/*
 * If there's only one function registered but it does not support
5327 5328
 * recursion, needs RCU protection and/or requires per cpu handling, then
 * this function will be called by the mcount trampoline.
5329
 */
5330
static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
5331 5332 5333 5334
				   struct ftrace_ops *op, struct pt_regs *regs)
{
	int bit;

5335 5336 5337
	if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching())
		return;

5338 5339 5340 5341
	bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
	if (bit < 0)
		return;

5342
	preempt_disable_notrace();
5343

5344 5345 5346 5347 5348 5349
	if (!(op->flags & FTRACE_OPS_FL_PER_CPU) ||
	    !ftrace_function_local_disabled(op)) {
		op->func(ip, parent_ip, op, regs);
	}

	preempt_enable_notrace();
5350 5351 5352
	trace_clear_recursion(bit);
}

5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366
/**
 * ftrace_ops_get_func - get the function a trampoline should call
 * @ops: the ops to get the function for
 *
 * Normally the mcount trampoline will call the ops->func, but there
 * are times that it should not. For example, if the ops does not
 * have its own recursion protection, then it should call the
 * ftrace_ops_recurs_func() instead.
 *
 * Returns the function that the trampoline should call for @ops.
 */
ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
{
	/*
5367 5368
	 * If the function does not handle recursion, needs to be RCU safe,
	 * or does per cpu logic, then we need to call the assist handler.
5369
	 */
5370 5371 5372
	if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) ||
	    ops->flags & (FTRACE_OPS_FL_RCU | FTRACE_OPS_FL_PER_CPU))
		return ftrace_ops_assist_func;
5373 5374 5375 5376

	return ops->func;
}

5377 5378 5379
static void
ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
		    struct task_struct *prev, struct task_struct *next)
S
Steven Rostedt 已提交
5380
{
5381 5382
	struct trace_array *tr = data;
	struct trace_pid_list *pid_list;
S
Steven Rostedt 已提交
5383

5384
	pid_list = rcu_dereference_sched(tr->function_pids);
5385

5386 5387
	this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
		       trace_ignore_this_task(pid_list, next));
S
Steven Rostedt 已提交
5388 5389
}

5390
static void clear_ftrace_pids(struct trace_array *tr)
5391
{
5392 5393
	struct trace_pid_list *pid_list;
	int cpu;
5394

5395 5396 5397 5398
	pid_list = rcu_dereference_protected(tr->function_pids,
					     lockdep_is_held(&ftrace_lock));
	if (!pid_list)
		return;
5399

5400
	unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
5401

5402 5403
	for_each_possible_cpu(cpu)
		per_cpu_ptr(tr->trace_buffer.data, cpu)->ftrace_ignore_pid = false;
S
Steven Rostedt 已提交
5404

5405
	rcu_assign_pointer(tr->function_pids, NULL);
S
Steven Rostedt 已提交
5406

5407 5408
	/* Wait till all users are no longer using pid filtering */
	synchronize_sched();
5409

5410
	trace_free_pid_list(pid_list);
5411 5412
}

5413
static void ftrace_pid_reset(struct trace_array *tr)
5414
{
5415
	mutex_lock(&ftrace_lock);
5416
	clear_ftrace_pids(tr);
S
Steven Rostedt 已提交
5417

5418
	ftrace_update_pid_func();
5419
	ftrace_startup_all(0);
5420 5421 5422 5423

	mutex_unlock(&ftrace_lock);
}

5424 5425
/* Greater than any max PID */
#define FTRACE_NO_PIDS		(void *)(PID_MAX_LIMIT + 1)
5426

5427
static void *fpid_start(struct seq_file *m, loff_t *pos)
5428
	__acquires(RCU)
5429
{
5430 5431 5432
	struct trace_pid_list *pid_list;
	struct trace_array *tr = m->private;

5433
	mutex_lock(&ftrace_lock);
5434 5435 5436
	rcu_read_lock_sched();

	pid_list = rcu_dereference_sched(tr->function_pids);
5437

5438 5439
	if (!pid_list)
		return !(*pos) ? FTRACE_NO_PIDS : NULL;
5440

5441
	return trace_pid_start(pid_list, pos);
5442 5443 5444 5445
}

static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
{
5446 5447 5448 5449
	struct trace_array *tr = m->private;
	struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);

	if (v == FTRACE_NO_PIDS)
5450 5451
		return NULL;

5452
	return trace_pid_next(pid_list, v, pos);
5453 5454 5455
}

static void fpid_stop(struct seq_file *m, void *p)
5456
	__releases(RCU)
5457
{
5458
	rcu_read_unlock_sched();
5459 5460 5461 5462 5463
	mutex_unlock(&ftrace_lock);
}

static int fpid_show(struct seq_file *m, void *v)
{
5464
	if (v == FTRACE_NO_PIDS) {
5465
		seq_puts(m, "no pid\n");
5466 5467 5468
		return 0;
	}

5469
	return trace_pid_show(m, v);
5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481
}

static const struct seq_operations ftrace_pid_sops = {
	.start = fpid_start,
	.next = fpid_next,
	.stop = fpid_stop,
	.show = fpid_show,
};

static int
ftrace_pid_open(struct inode *inode, struct file *file)
{
5482 5483
	struct trace_array *tr = inode->i_private;
	struct seq_file *m;
5484 5485
	int ret = 0;

5486 5487 5488
	if (trace_array_get(tr) < 0)
		return -ENODEV;

5489 5490
	if ((file->f_mode & FMODE_WRITE) &&
	    (file->f_flags & O_TRUNC))
5491
		ftrace_pid_reset(tr);
5492

5493 5494 5495 5496 5497 5498 5499 5500
	ret = seq_open(file, &ftrace_pid_sops);
	if (ret < 0) {
		trace_array_put(tr);
	} else {
		m = file->private_data;
		/* copy tr over to seq ops */
		m->private = tr;
	}
5501 5502 5503 5504

	return ret;
}

5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520
static void ignore_task_cpu(void *data)
{
	struct trace_array *tr = data;
	struct trace_pid_list *pid_list;

	/*
	 * This function is called by on_each_cpu() while the
	 * event_mutex is held.
	 */
	pid_list = rcu_dereference_protected(tr->function_pids,
					     mutex_is_locked(&ftrace_lock));

	this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
		       trace_ignore_this_task(pid_list, current));
}

5521 5522 5523 5524
static ssize_t
ftrace_pid_write(struct file *filp, const char __user *ubuf,
		   size_t cnt, loff_t *ppos)
{
5525 5526 5527 5528 5529
	struct seq_file *m = filp->private_data;
	struct trace_array *tr = m->private;
	struct trace_pid_list *filtered_pids = NULL;
	struct trace_pid_list *pid_list;
	ssize_t ret;
5530

5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541
	if (!cnt)
		return 0;

	mutex_lock(&ftrace_lock);

	filtered_pids = rcu_dereference_protected(tr->function_pids,
					     lockdep_is_held(&ftrace_lock));

	ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
	if (ret < 0)
		goto out;
5542

5543
	rcu_assign_pointer(tr->function_pids, pid_list);
5544

5545 5546 5547 5548 5549 5550 5551
	if (filtered_pids) {
		synchronize_sched();
		trace_free_pid_list(filtered_pids);
	} else if (pid_list) {
		/* Register a probe to set whether to ignore the tracing of a task */
		register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
	}
5552

5553
	/*
5554 5555 5556
	 * Ignoring of pids is done at task switch. But we have to
	 * check for those tasks that are currently running.
	 * Always do this in case a pid was appended or removed.
5557
	 */
5558
	on_each_cpu(ignore_task_cpu, tr, 1);
5559

5560 5561 5562 5563
	ftrace_update_pid_func();
	ftrace_startup_all(0);
 out:
	mutex_unlock(&ftrace_lock);
5564

5565 5566
	if (ret > 0)
		*ppos += ret;
5567

5568
	return ret;
5569
}
5570

5571 5572 5573
static int
ftrace_pid_release(struct inode *inode, struct file *file)
{
5574
	struct trace_array *tr = inode->i_private;
5575

5576 5577 5578
	trace_array_put(tr);

	return seq_release(inode, file);
5579 5580
}

5581
static const struct file_operations ftrace_pid_fops = {
5582 5583 5584
	.open		= ftrace_pid_open,
	.write		= ftrace_pid_write,
	.read		= seq_read,
5585
	.llseek		= tracing_lseek,
5586
	.release	= ftrace_pid_release,
5587 5588
};

5589
void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
5590
{
5591
	trace_create_file("set_ftrace_pid", 0644, d_tracer,
5592
			    tr, &ftrace_pid_fops);
5593 5594
}

5595 5596 5597 5598 5599 5600 5601 5602 5603 5604
void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
					 struct dentry *d_tracer)
{
	/* Only the top level directory has the dyn_tracefs and profile */
	WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));

	ftrace_init_dyn_tracefs(d_tracer);
	ftrace_profile_tracefs(d_tracer);
}

S
Steven Rostedt 已提交
5605
/**
5606
 * ftrace_kill - kill ftrace
S
Steven Rostedt 已提交
5607 5608 5609 5610 5611
 *
 * This function should be used by panic code. It stops ftrace
 * but in a not so nice way. If you need to simply kill ftrace
 * from a non-atomic section, use ftrace_kill.
 */
5612
void ftrace_kill(void)
S
Steven Rostedt 已提交
5613 5614 5615 5616 5617 5618
{
	ftrace_disabled = 1;
	ftrace_enabled = 0;
	clear_ftrace_function();
}

5619 5620 5621 5622 5623 5624 5625 5626
/**
 * Test if ftrace is dead or not.
 */
int ftrace_is_dead(void)
{
	return ftrace_disabled;
}

5627
/**
5628 5629
 * register_ftrace_function - register a function for profiling
 * @ops - ops structure that holds the function for profiling.
5630
 *
5631 5632 5633 5634 5635 5636
 * Register a function to be called by all functions in the
 * kernel.
 *
 * Note: @ops->func and all the functions it calls must be labeled
 *       with "notrace", otherwise it will go into a
 *       recursive loop.
5637
 */
5638
int register_ftrace_function(struct ftrace_ops *ops)
5639
{
5640
	int ret = -1;
5641

5642 5643
	ftrace_ops_init(ops);

S
Steven Rostedt 已提交
5644
	mutex_lock(&ftrace_lock);
5645

5646
	ret = ftrace_startup(ops, 0);
5647

S
Steven Rostedt 已提交
5648
	mutex_unlock(&ftrace_lock);
5649

5650
	return ret;
5651
}
5652
EXPORT_SYMBOL_GPL(register_ftrace_function);
5653 5654

/**
5655
 * unregister_ftrace_function - unregister a function for profiling.
5656 5657 5658 5659 5660 5661 5662 5663
 * @ops - ops structure that holds the function to unregister
 *
 * Unregister a function that was added to be called by ftrace profiling.
 */
int unregister_ftrace_function(struct ftrace_ops *ops)
{
	int ret;

S
Steven Rostedt 已提交
5664
	mutex_lock(&ftrace_lock);
5665
	ret = ftrace_shutdown(ops, 0);
S
Steven Rostedt 已提交
5666
	mutex_unlock(&ftrace_lock);
5667 5668 5669

	return ret;
}
5670
EXPORT_SYMBOL_GPL(unregister_ftrace_function);
5671

I
Ingo Molnar 已提交
5672
int
5673
ftrace_enable_sysctl(struct ctl_table *table, int write,
5674
		     void __user *buffer, size_t *lenp,
5675 5676
		     loff_t *ppos)
{
5677
	int ret = -ENODEV;
5678

S
Steven Rostedt 已提交
5679
	mutex_lock(&ftrace_lock);
5680

5681 5682 5683 5684
	if (unlikely(ftrace_disabled))
		goto out;

	ret = proc_dointvec(table, write, buffer, lenp, ppos);
5685

5686
	if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
5687 5688
		goto out;

5689
	last_ftrace_enabled = !!ftrace_enabled;
5690 5691 5692 5693

	if (ftrace_enabled) {

		/* we are starting ftrace again */
5694 5695
		if (ftrace_ops_list != &ftrace_list_end)
			update_ftrace_function();
5696

5697 5698
		ftrace_startup_sysctl();

5699 5700 5701 5702 5703 5704 5705 5706
	} else {
		/* stopping ftrace calls (just send to ftrace_stub) */
		ftrace_trace_function = ftrace_stub;

		ftrace_shutdown_sysctl();
	}

 out:
S
Steven Rostedt 已提交
5707
	mutex_unlock(&ftrace_lock);
5708
	return ret;
5709
}
5710

5711
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5712

5713 5714 5715 5716
static struct ftrace_ops graph_ops = {
	.func			= ftrace_stub,
	.flags			= FTRACE_OPS_FL_RECURSION_SAFE |
				   FTRACE_OPS_FL_INITIALIZED |
5717
				   FTRACE_OPS_FL_PID |
5718 5719 5720
				   FTRACE_OPS_FL_STUB,
#ifdef FTRACE_GRAPH_TRAMP_ADDR
	.trampoline		= FTRACE_GRAPH_TRAMP_ADDR,
5721
	/* trampoline_size is only needed for dynamically allocated tramps */
5722 5723 5724 5725
#endif
	ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
};

5726 5727 5728 5729 5730 5731 5732 5733 5734 5735
void ftrace_graph_sleep_time_control(bool enable)
{
	fgraph_sleep_time = enable;
}

void ftrace_graph_graph_time_control(bool enable)
{
	fgraph_graph_time = enable;
}

5736 5737 5738 5739 5740
int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
{
	return 0;
}

5741 5742 5743
/* The callbacks that hook a function */
trace_func_graph_ret_t ftrace_graph_return =
			(trace_func_graph_ret_t)ftrace_stub;
5744
trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
5745
static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766

/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
{
	int i;
	int ret = 0;
	int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
	struct task_struct *g, *t;

	for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
		ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
					* sizeof(struct ftrace_ret_stack),
					GFP_KERNEL);
		if (!ret_stack_list[i]) {
			start = 0;
			end = i;
			ret = -ENOMEM;
			goto free;
		}
	}

5767
	read_lock(&tasklist_lock);
5768 5769 5770 5771 5772 5773 5774
	do_each_thread(g, t) {
		if (start == end) {
			ret = -EAGAIN;
			goto unlock;
		}

		if (t->ret_stack == NULL) {
5775
			atomic_set(&t->tracing_graph_pause, 0);
5776
			atomic_set(&t->trace_overrun, 0);
5777 5778 5779 5780
			t->curr_ret_stack = -1;
			/* Make sure the tasks see the -1 first: */
			smp_wmb();
			t->ret_stack = ret_stack_list[start++];
5781 5782 5783 5784
		}
	} while_each_thread(g, t);

unlock:
5785
	read_unlock(&tasklist_lock);
5786 5787 5788 5789 5790 5791
free:
	for (i = start; i < end; i++)
		kfree(ret_stack_list[i]);
	return ret;
}

5792
static void
5793
ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
5794
			struct task_struct *prev, struct task_struct *next)
5795 5796 5797 5798
{
	unsigned long long timestamp;
	int index;

5799 5800 5801 5802
	/*
	 * Does the user want to count the time a function was asleep.
	 * If so, do not update the time stamps.
	 */
5803
	if (fgraph_sleep_time)
5804 5805
		return;

5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823
	timestamp = trace_clock_local();

	prev->ftrace_timestamp = timestamp;

	/* only process tasks that we timestamped */
	if (!next->ftrace_timestamp)
		return;

	/*
	 * Update all the counters in next to make up for the
	 * time next was sleeping.
	 */
	timestamp -= next->ftrace_timestamp;

	for (index = next->curr_ret_stack; index >= 0; index--)
		next->ret_stack[index].calltime += timestamp;
}

5824
/* Allocate a return stack for each task */
5825
static int start_graph_tracing(void)
5826 5827
{
	struct ftrace_ret_stack **ret_stack_list;
5828
	int ret, cpu;
5829 5830 5831 5832 5833 5834 5835 5836

	ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
				sizeof(struct ftrace_ret_stack *),
				GFP_KERNEL);

	if (!ret_stack_list)
		return -ENOMEM;

5837
	/* The cpu_boot init_task->ret_stack will never be freed */
5838 5839
	for_each_online_cpu(cpu) {
		if (!idle_task(cpu)->ret_stack)
5840
			ftrace_graph_init_idle_task(idle_task(cpu), cpu);
5841
	}
5842

5843 5844 5845 5846
	do {
		ret = alloc_retstack_tasklist(ret_stack_list);
	} while (ret == -EAGAIN);

5847
	if (!ret) {
5848
		ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5849 5850 5851 5852 5853
		if (ret)
			pr_info("ftrace_graph: Couldn't activate tracepoint"
				" probe to kernel_sched_switch\n");
	}

5854 5855 5856 5857
	kfree(ret_stack_list);
	return ret;
}

5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878
/*
 * Hibernation protection.
 * The state of the current task is too much unstable during
 * suspend/restore to disk. We want to protect against that.
 */
static int
ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
							void *unused)
{
	switch (state) {
	case PM_HIBERNATION_PREPARE:
		pause_graph_tracing();
		break;

	case PM_POST_HIBERNATION:
		unpause_graph_tracing();
		break;
	}
	return NOTIFY_DONE;
}

5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894
static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
{
	if (!ftrace_ops_test(&global_ops, trace->func, NULL))
		return 0;
	return __ftrace_graph_entry(trace);
}

/*
 * The function graph tracer should only trace the functions defined
 * by set_ftrace_filter and set_ftrace_notrace. If another function
 * tracer ops is registered, the graph tracer requires testing the
 * function against the global ops, and not just trace any function
 * that any ftrace_ops registered.
 */
static void update_function_graph_func(void)
{
5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913
	struct ftrace_ops *op;
	bool do_test = false;

	/*
	 * The graph and global ops share the same set of functions
	 * to test. If any other ops is on the list, then
	 * the graph tracing needs to test if its the function
	 * it should call.
	 */
	do_for_each_ftrace_op(op, ftrace_ops_list) {
		if (op != &global_ops && op != &graph_ops &&
		    op != &ftrace_list_end) {
			do_test = true;
			/* in double loop, break out with goto */
			goto out;
		}
	} while_for_each_ftrace_op(op);
 out:
	if (do_test)
5914
		ftrace_graph_entry = ftrace_graph_entry_test;
5915 5916
	else
		ftrace_graph_entry = __ftrace_graph_entry;
5917 5918
}

5919 5920 5921 5922
static struct notifier_block ftrace_suspend_notifier = {
	.notifier_call = ftrace_suspend_notifier_call,
};

5923 5924
int register_ftrace_graph(trace_func_graph_ret_t retfunc,
			trace_func_graph_ent_t entryfunc)
5925
{
5926 5927
	int ret = 0;

S
Steven Rostedt 已提交
5928
	mutex_lock(&ftrace_lock);
5929

5930
	/* we currently allow only one tracer registered at a time */
5931
	if (ftrace_graph_active) {
5932 5933 5934 5935
		ret = -EBUSY;
		goto out;
	}

5936 5937
	register_pm_notifier(&ftrace_suspend_notifier);

5938
	ftrace_graph_active++;
5939
	ret = start_graph_tracing();
5940
	if (ret) {
5941
		ftrace_graph_active--;
5942 5943
		goto out;
	}
5944

5945
	ftrace_graph_return = retfunc;
5946 5947 5948 5949 5950 5951 5952 5953 5954 5955

	/*
	 * Update the indirect function to the entryfunc, and the
	 * function that gets called to the entry_test first. Then
	 * call the update fgraph entry function to determine if
	 * the entryfunc should be called directly or not.
	 */
	__ftrace_graph_entry = entryfunc;
	ftrace_graph_entry = ftrace_graph_entry_test;
	update_function_graph_func();
5956

5957
	ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
5958
out:
S
Steven Rostedt 已提交
5959
	mutex_unlock(&ftrace_lock);
5960
	return ret;
5961 5962
}

5963
void unregister_ftrace_graph(void)
5964
{
S
Steven Rostedt 已提交
5965
	mutex_lock(&ftrace_lock);
5966

5967
	if (unlikely(!ftrace_graph_active))
5968 5969
		goto out;

5970
	ftrace_graph_active--;
5971
	ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
5972
	ftrace_graph_entry = ftrace_graph_entry_stub;
5973
	__ftrace_graph_entry = ftrace_graph_entry_stub;
5974
	ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
5975
	unregister_pm_notifier(&ftrace_suspend_notifier);
5976
	unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5977

5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988
#ifdef CONFIG_DYNAMIC_FTRACE
	/*
	 * Function graph does not allocate the trampoline, but
	 * other global_ops do. We need to reset the ALLOC_TRAMP flag
	 * if one was used.
	 */
	global_ops.trampoline = save_global_trampoline;
	if (save_global_flags & FTRACE_OPS_FL_ALLOC_TRAMP)
		global_ops.flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
#endif

5989
 out:
S
Steven Rostedt 已提交
5990
	mutex_unlock(&ftrace_lock);
5991
}
5992

5993 5994 5995 5996 5997 5998 5999 6000
static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);

static void
graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
{
	atomic_set(&t->tracing_graph_pause, 0);
	atomic_set(&t->trace_overrun, 0);
	t->ftrace_timestamp = 0;
L
Lucas De Marchi 已提交
6001
	/* make curr_ret_stack visible before we add the ret_stack */
6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035
	smp_wmb();
	t->ret_stack = ret_stack;
}

/*
 * Allocate a return stack for the idle task. May be the first
 * time through, or it may be done by CPU hotplug online.
 */
void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
{
	t->curr_ret_stack = -1;
	/*
	 * The idle task has no parent, it either has its own
	 * stack or no stack at all.
	 */
	if (t->ret_stack)
		WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));

	if (ftrace_graph_active) {
		struct ftrace_ret_stack *ret_stack;

		ret_stack = per_cpu(idle_ret_stack, cpu);
		if (!ret_stack) {
			ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
					    * sizeof(struct ftrace_ret_stack),
					    GFP_KERNEL);
			if (!ret_stack)
				return;
			per_cpu(idle_ret_stack, cpu) = ret_stack;
		}
		graph_init_task(t, ret_stack);
	}
}

6036
/* Allocate a return stack for newly created task */
6037
void ftrace_graph_init_task(struct task_struct *t)
6038
{
6039 6040
	/* Make sure we do not use the parent ret_stack */
	t->ret_stack = NULL;
6041
	t->curr_ret_stack = -1;
6042

6043
	if (ftrace_graph_active) {
6044 6045 6046
		struct ftrace_ret_stack *ret_stack;

		ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
6047 6048
				* sizeof(struct ftrace_ret_stack),
				GFP_KERNEL);
6049
		if (!ret_stack)
6050
			return;
6051
		graph_init_task(t, ret_stack);
6052
	}
6053 6054
}

6055
void ftrace_graph_exit_task(struct task_struct *t)
6056
{
6057 6058
	struct ftrace_ret_stack	*ret_stack = t->ret_stack;

6059
	t->ret_stack = NULL;
6060 6061 6062 6063
	/* NULL must become visible to IRQs before we free it: */
	barrier();

	kfree(ret_stack);
6064
}
6065
#endif