ftrace.c 144.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * Infrastructure for profiling code inserted by 'gcc -pg'.
 *
 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
 *
 * Originally ported from the -rt patch by:
 *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
 *
 * Based on code in the latency_tracer, that is:
 *
 *  Copyright (C) 2004-2006 Ingo Molnar
13
 *  Copyright (C) 2004 Nadia Yvette Chambers
14 15
 */

16 17
#include <linux/stop_machine.h>
#include <linux/clocksource.h>
18
#include <linux/sched/task.h>
19
#include <linux/kallsyms.h>
20
#include <linux/seq_file.h>
21
#include <linux/suspend.h>
22
#include <linux/tracefs.h>
23
#include <linux/hardirq.h>
I
Ingo Molnar 已提交
24
#include <linux/kthread.h>
25
#include <linux/uaccess.h>
26
#include <linux/bsearch.h>
27
#include <linux/module.h>
I
Ingo Molnar 已提交
28
#include <linux/ftrace.h>
29
#include <linux/sysctl.h>
30
#include <linux/slab.h>
31
#include <linux/ctype.h>
32
#include <linux/sort.h>
33
#include <linux/list.h>
34
#include <linux/hash.h>
35
#include <linux/rcupdate.h>
36

37
#include <trace/events/sched.h>
38

39
#include <asm/setup.h>
40

41
#include "trace_output.h"
S
Steven Rostedt 已提交
42
#include "trace_stat.h"
43

44
#define FTRACE_WARN_ON(cond)			\
45 46 47
	({					\
		int ___r = cond;		\
		if (WARN_ON(___r))		\
48
			ftrace_kill();		\
49 50
		___r;				\
	})
51 52

#define FTRACE_WARN_ON_ONCE(cond)		\
53 54 55
	({					\
		int ___r = cond;		\
		if (WARN_ON_ONCE(___r))		\
56
			ftrace_kill();		\
57 58
		___r;				\
	})
59

60 61 62
/* hash bits for specific function selection */
#define FTRACE_HASH_BITS 7
#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
63 64
#define FTRACE_HASH_DEFAULT_BITS 10
#define FTRACE_HASH_MAX_BITS 12
65

66
#ifdef CONFIG_DYNAMIC_FTRACE
67 68 69
#define INIT_OPS_HASH(opsname)	\
	.func_hash		= &opsname.local_hash,			\
	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
70 71 72
#define ASSIGN_OPS_HASH(opsname, val) \
	.func_hash		= val, \
	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
73
#else
74
#define INIT_OPS_HASH(opsname)
75
#define ASSIGN_OPS_HASH(opsname, val)
76 77
#endif

78 79
static struct ftrace_ops ftrace_list_end __read_mostly = {
	.func		= ftrace_stub,
80
	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
81
	INIT_OPS_HASH(ftrace_list_end)
82 83
};

84 85
/* ftrace_enabled is a method to turn ftrace on or off */
int ftrace_enabled __read_mostly;
86
static int last_ftrace_enabled;
87

88 89
/* Current function tracing op */
struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
90 91
/* What to set function_trace_op to */
static struct ftrace_ops *set_function_trace_op;
92

93
static bool ftrace_pids_enabled(struct ftrace_ops *ops)
94
{
95 96 97 98 99 100 101 102
	struct trace_array *tr;

	if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
		return false;

	tr = ops->private;

	return tr->function_pids != NULL;
103 104 105 106
}

static void ftrace_update_trampoline(struct ftrace_ops *ops);

107 108 109 110 111 112
/*
 * ftrace_disabled is set when an anomaly is discovered.
 * ftrace_disabled is much stronger than ftrace_enabled.
 */
static int ftrace_disabled __read_mostly;

113
static DEFINE_MUTEX(ftrace_lock);
114

115
static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
116
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
117
static struct ftrace_ops global_ops;
118

119 120
#if ARCH_SUPPORTS_FTRACE_OPS
static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
121
				 struct ftrace_ops *op, struct pt_regs *regs);
122 123 124 125 126
#else
/* See comment below, where ftrace_ops_list_func is defined */
static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
#endif
127

128 129
/*
 * Traverse the ftrace_global_list, invoking all entries.  The reason that we
130
 * can use rcu_dereference_raw_notrace() is that elements removed from this list
131
 * are simply leaked, so there is no need to interact with a grace-period
132
 * mechanism.  The rcu_dereference_raw_notrace() calls are needed to handle
133 134 135 136 137
 * concurrent insertions into the ftrace_global_list.
 *
 * Silly Alpha and silly pointer-speculation compiler optimizations!
 */
#define do_for_each_ftrace_op(op, list)			\
138
	op = rcu_dereference_raw_notrace(list);			\
139 140 141 142 143 144
	do

/*
 * Optimized for just a single item in the list (as that is the normal case).
 */
#define while_for_each_ftrace_op(op)				\
145
	while (likely(op = rcu_dereference_raw_notrace((op)->next)) &&	\
146 147
	       unlikely((op) != &ftrace_list_end))

148 149 150 151
static inline void ftrace_ops_init(struct ftrace_ops *ops)
{
#ifdef CONFIG_DYNAMIC_FTRACE
	if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
152 153
		mutex_init(&ops->local_hash.regex_lock);
		ops->func_hash = &ops->local_hash;
154 155 156 157 158
		ops->flags |= FTRACE_OPS_FL_INITIALIZED;
	}
#endif
}

159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
/**
 * ftrace_nr_registered_ops - return number of ops registered
 *
 * Returns the number of ftrace_ops registered and tracing functions
 */
int ftrace_nr_registered_ops(void)
{
	struct ftrace_ops *ops;
	int cnt = 0;

	mutex_lock(&ftrace_lock);

	for (ops = ftrace_ops_list;
	     ops != &ftrace_list_end; ops = ops->next)
		cnt++;

	mutex_unlock(&ftrace_lock);

	return cnt;
}

180
static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
181
			    struct ftrace_ops *op, struct pt_regs *regs)
182
{
183 184 185
	struct trace_array *tr = op->private;

	if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid))
186 187
		return;

188
	op->saved_func(ip, parent_ip, op, regs);
189 190
}

191
/**
192
 * clear_ftrace_function - reset the ftrace function
193
 *
194 195
 * This NULLs the ftrace function and in essence stops
 * tracing.  There may be lag
196
 */
197
void clear_ftrace_function(void)
198
{
199 200 201
	ftrace_trace_function = ftrace_stub;
}

202
static void per_cpu_ops_disable_all(struct ftrace_ops *ops)
203 204 205 206 207 208 209
{
	int cpu;

	for_each_possible_cpu(cpu)
		*per_cpu_ptr(ops->disabled, cpu) = 1;
}

210
static int per_cpu_ops_alloc(struct ftrace_ops *ops)
211 212 213
{
	int __percpu *disabled;

214 215 216
	if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
		return -EINVAL;

217 218 219 220 221
	disabled = alloc_percpu(int);
	if (!disabled)
		return -ENOMEM;

	ops->disabled = disabled;
222
	per_cpu_ops_disable_all(ops);
223 224 225
	return 0;
}

226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
static void ftrace_sync(struct work_struct *work)
{
	/*
	 * This function is just a stub to implement a hard force
	 * of synchronize_sched(). This requires synchronizing
	 * tasks even in userspace and idle.
	 *
	 * Yes, function tracing is rude.
	 */
}

static void ftrace_sync_ipi(void *data)
{
	/* Probably not needed, but do it anyway */
	smp_rmb();
}

243 244
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static void update_function_graph_func(void);
245 246 247 248 249

/* Both enabled by default (can be cleared by function_graph tracer flags */
static bool fgraph_sleep_time = true;
static bool fgraph_graph_time = true;

250 251 252 253
#else
static inline void update_function_graph_func(void) { }
#endif

254 255 256 257

static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
{
	/*
258
	 * If this is a dynamic, RCU, or per CPU ops, or we force list func,
259 260
	 * then it needs to call the list anyway.
	 */
261 262
	if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU |
			  FTRACE_OPS_FL_RCU) || FTRACE_FORCE_LIST_FUNC)
263 264 265 266 267
		return ftrace_ops_list_func;

	return ftrace_ops_get_func(ops);
}

268 269 270 271
static void update_ftrace_function(void)
{
	ftrace_func_t func;

272 273 274 275 276 277 278 279 280 281 282
	/*
	 * Prepare the ftrace_ops that the arch callback will use.
	 * If there's only one ftrace_ops registered, the ftrace_ops_list
	 * will point to the ops we want.
	 */
	set_function_trace_op = ftrace_ops_list;

	/* If there's no ftrace_ops registered, just call the stub function */
	if (ftrace_ops_list == &ftrace_list_end) {
		func = ftrace_stub;

283 284
	/*
	 * If we are at the end of the list and this ops is
285 286
	 * recursion safe and not dynamic and the arch supports passing ops,
	 * then have the mcount trampoline call the function directly.
287
	 */
288
	} else if (ftrace_ops_list->next == &ftrace_list_end) {
289
		func = ftrace_ops_get_list_func(ftrace_ops_list);
290

291 292
	} else {
		/* Just use the default ftrace_ops */
293
		set_function_trace_op = &ftrace_list_end;
294
		func = ftrace_ops_list_func;
295
	}
296

297 298
	update_function_graph_func();

299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
	/* If there's no change, then do nothing more here */
	if (ftrace_trace_function == func)
		return;

	/*
	 * If we are using the list function, it doesn't care
	 * about the function_trace_ops.
	 */
	if (func == ftrace_ops_list_func) {
		ftrace_trace_function = func;
		/*
		 * Don't even bother setting function_trace_ops,
		 * it would be racy to do so anyway.
		 */
		return;
	}

#ifndef CONFIG_DYNAMIC_FTRACE
	/*
	 * For static tracing, we need to be a bit more careful.
	 * The function change takes affect immediately. Thus,
	 * we need to coorditate the setting of the function_trace_ops
	 * with the setting of the ftrace_trace_function.
	 *
	 * Set the function to the list ops, which will call the
	 * function we want, albeit indirectly, but it handles the
	 * ftrace_ops and doesn't depend on function_trace_op.
	 */
	ftrace_trace_function = ftrace_ops_list_func;
	/*
	 * Make sure all CPUs see this. Yes this is slow, but static
	 * tracing is slow and nasty to have enabled.
	 */
	schedule_on_each_cpu(ftrace_sync);
	/* Now all cpus are using the list ops. */
	function_trace_op = set_function_trace_op;
	/* Make sure the function_trace_op is visible on all CPUs */
	smp_wmb();
	/* Nasty way to force a rmb on all cpus */
	smp_call_function(ftrace_sync_ipi, NULL, 1);
	/* OK, we are all set to update the ftrace_trace_function now! */
#endif /* !CONFIG_DYNAMIC_FTRACE */

342 343 344
	ftrace_trace_function = func;
}

345 346 347 348 349
int using_ftrace_ops_list_func(void)
{
	return ftrace_trace_function == ftrace_ops_list_func;
}

350
static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
351
{
352
	ops->next = *list;
353
	/*
354
	 * We are entering ops into the list but another
355 356
	 * CPU might be walking that list. We need to make sure
	 * the ops->next pointer is valid before another CPU sees
357
	 * the ops pointer included into the list.
358
	 */
359
	rcu_assign_pointer(*list, ops);
360 361
}

362
static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
363 364 365 366
{
	struct ftrace_ops **p;

	/*
367 368
	 * If we are removing the last function, then simply point
	 * to the ftrace_stub.
369
	 */
370 371
	if (*list == ops && ops->next == &ftrace_list_end) {
		*list = &ftrace_list_end;
S
Steven Rostedt 已提交
372
		return 0;
373 374
	}

375
	for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
376 377 378
		if (*p == ops)
			break;

S
Steven Rostedt 已提交
379 380
	if (*p != ops)
		return -1;
381 382

	*p = (*p)->next;
383 384
	return 0;
}
385

386 387
static void ftrace_update_trampoline(struct ftrace_ops *ops);

388 389
static int __register_ftrace_function(struct ftrace_ops *ops)
{
390 391 392
	if (ops->flags & FTRACE_OPS_FL_DELETED)
		return -EINVAL;

393 394 395
	if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
		return -EBUSY;

396
#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
397 398 399 400 401 402 403 404 405 406 407 408 409
	/*
	 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
	 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
	 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
	 */
	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
	    !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
		return -EINVAL;

	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
		ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
#endif

410 411 412
	if (!core_kernel_data((unsigned long)ops))
		ops->flags |= FTRACE_OPS_FL_DYNAMIC;

413 414
	if (ops->flags & FTRACE_OPS_FL_PER_CPU) {
		if (per_cpu_ops_alloc(ops))
415
			return -ENOMEM;
416 417 418
	}

	add_ftrace_ops(&ftrace_ops_list, ops);
419

420 421 422
	/* Always save the function, and reset at unregistering */
	ops->saved_func = ops->func;

423
	if (ftrace_pids_enabled(ops))
424 425
		ops->func = ftrace_pid_func;

426 427
	ftrace_update_trampoline(ops);

428 429 430 431 432 433 434 435 436 437
	if (ftrace_enabled)
		update_ftrace_function();

	return 0;
}

static int __unregister_ftrace_function(struct ftrace_ops *ops)
{
	int ret;

438 439 440
	if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
		return -EBUSY;

441
	ret = remove_ftrace_ops(&ftrace_ops_list, ops);
442

443 444
	if (ret < 0)
		return ret;
445

446 447
	if (ftrace_enabled)
		update_ftrace_function();
448

449 450
	ops->func = ops->saved_func;

S
Steven Rostedt 已提交
451
	return 0;
452 453
}

454 455
static void ftrace_update_pid_func(void)
{
456 457
	struct ftrace_ops *op;

458
	/* Only do something if we are tracing something */
459
	if (ftrace_trace_function == ftrace_stub)
460
		return;
461

462 463
	do_for_each_ftrace_op(op, ftrace_ops_list) {
		if (op->flags & FTRACE_OPS_FL_PID) {
464 465
			op->func = ftrace_pids_enabled(op) ?
				ftrace_pid_func : op->saved_func;
466 467 468 469
			ftrace_update_trampoline(op);
		}
	} while_for_each_ftrace_op(op);

470
	update_ftrace_function();
471 472
}

473 474 475 476 477
#ifdef CONFIG_FUNCTION_PROFILER
struct ftrace_profile {
	struct hlist_node		node;
	unsigned long			ip;
	unsigned long			counter;
478 479
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	unsigned long long		time;
480
	unsigned long long		time_squared;
481
#endif
482 483
};

484 485 486 487
struct ftrace_profile_page {
	struct ftrace_profile_page	*next;
	unsigned long			index;
	struct ftrace_profile		records[];
488 489
};

490 491 492 493 494 495 496 497
struct ftrace_profile_stat {
	atomic_t			disabled;
	struct hlist_head		*hash;
	struct ftrace_profile_page	*pages;
	struct ftrace_profile_page	*start;
	struct tracer_stat		stat;
};

498 499
#define PROFILE_RECORDS_SIZE						\
	(PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
500

501 502
#define PROFILES_PER_PAGE					\
	(PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
503

504 505 506
static int ftrace_profile_enabled __read_mostly;

/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
S
Steven Rostedt 已提交
507 508
static DEFINE_MUTEX(ftrace_profile_lock);

509
static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
510

511 512
#define FTRACE_PROFILE_HASH_BITS 10
#define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
513

S
Steven Rostedt 已提交
514 515 516
static void *
function_stat_next(void *v, int idx)
{
517 518
	struct ftrace_profile *rec = v;
	struct ftrace_profile_page *pg;
S
Steven Rostedt 已提交
519

520
	pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
S
Steven Rostedt 已提交
521 522

 again:
L
Li Zefan 已提交
523 524 525
	if (idx != 0)
		rec++;

S
Steven Rostedt 已提交
526 527 528 529 530
	if ((void *)rec >= (void *)&pg->records[pg->index]) {
		pg = pg->next;
		if (!pg)
			return NULL;
		rec = &pg->records[0];
531 532
		if (!rec->counter)
			goto again;
S
Steven Rostedt 已提交
533 534 535 536 537 538 539
	}

	return rec;
}

static void *function_stat_start(struct tracer_stat *trace)
{
540 541 542 543 544 545 546
	struct ftrace_profile_stat *stat =
		container_of(trace, struct ftrace_profile_stat, stat);

	if (!stat || !stat->start)
		return NULL;

	return function_stat_next(&stat->start->records[0], 0);
S
Steven Rostedt 已提交
547 548
}

549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* function graph compares on total time */
static int function_stat_cmp(void *p1, void *p2)
{
	struct ftrace_profile *a = p1;
	struct ftrace_profile *b = p2;

	if (a->time < b->time)
		return -1;
	if (a->time > b->time)
		return 1;
	else
		return 0;
}
#else
/* not function graph compares against hits */
S
Steven Rostedt 已提交
565 566
static int function_stat_cmp(void *p1, void *p2)
{
567 568
	struct ftrace_profile *a = p1;
	struct ftrace_profile *b = p2;
S
Steven Rostedt 已提交
569 570 571 572 573 574 575 576

	if (a->counter < b->counter)
		return -1;
	if (a->counter > b->counter)
		return 1;
	else
		return 0;
}
577
#endif
S
Steven Rostedt 已提交
578 579 580

static int function_stat_headers(struct seq_file *m)
{
581
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
582 583 584 585
	seq_puts(m, "  Function                               "
		 "Hit    Time            Avg             s^2\n"
		    "  --------                               "
		 "---    ----            ---             ---\n");
586
#else
587 588
	seq_puts(m, "  Function                               Hit\n"
		    "  --------                               ---\n");
589
#endif
S
Steven Rostedt 已提交
590 591 592 593 594
	return 0;
}

static int function_stat_show(struct seq_file *m, void *v)
{
595
	struct ftrace_profile *rec = v;
S
Steven Rostedt 已提交
596
	char str[KSYM_SYMBOL_LEN];
597
	int ret = 0;
598
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
599 600
	static struct trace_seq s;
	unsigned long long avg;
601
	unsigned long long stddev;
602
#endif
603 604 605 606 607 608 609
	mutex_lock(&ftrace_profile_lock);

	/* we raced with function_profile_reset() */
	if (unlikely(rec->counter == 0)) {
		ret = -EBUSY;
		goto out;
	}
S
Steven Rostedt 已提交
610

611 612 613 614 615 616 617
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	avg = rec->time;
	do_div(avg, rec->counter);
	if (tracing_thresh && (avg < tracing_thresh))
		goto out;
#endif

S
Steven Rostedt 已提交
618
	kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
619 620 621
	seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
622
	seq_puts(m, "    ");
623

624 625 626 627
	/* Sample standard deviation (s^2) */
	if (rec->counter <= 1)
		stddev = 0;
	else {
628 629 630 631 632 633 634
		/*
		 * Apply Welford's method:
		 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
		 */
		stddev = rec->counter * rec->time_squared -
			 rec->time * rec->time;

635 636 637 638
		/*
		 * Divide only 1000 for ns^2 -> us^2 conversion.
		 * trace_print_graph_duration will divide 1000 again.
		 */
639
		do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
640 641
	}

642 643 644 645
	trace_seq_init(&s);
	trace_print_graph_duration(rec->time, &s);
	trace_seq_puts(&s, "    ");
	trace_print_graph_duration(avg, &s);
646 647
	trace_seq_puts(&s, "    ");
	trace_print_graph_duration(stddev, &s);
648 649 650
	trace_print_seq(m, &s);
#endif
	seq_putc(m, '\n');
651 652
out:
	mutex_unlock(&ftrace_profile_lock);
S
Steven Rostedt 已提交
653

654
	return ret;
S
Steven Rostedt 已提交
655 656
}

657
static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
S
Steven Rostedt 已提交
658
{
659
	struct ftrace_profile_page *pg;
S
Steven Rostedt 已提交
660

661
	pg = stat->pages = stat->start;
S
Steven Rostedt 已提交
662

663 664 665 666
	while (pg) {
		memset(pg->records, 0, PROFILE_RECORDS_SIZE);
		pg->index = 0;
		pg = pg->next;
S
Steven Rostedt 已提交
667 668
	}

669
	memset(stat->hash, 0,
670 671
	       FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
}
S
Steven Rostedt 已提交
672

673
int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
674 675
{
	struct ftrace_profile_page *pg;
676 677
	int functions;
	int pages;
678
	int i;
S
Steven Rostedt 已提交
679

680
	/* If we already allocated, do nothing */
681
	if (stat->pages)
682
		return 0;
S
Steven Rostedt 已提交
683

684 685
	stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
	if (!stat->pages)
686
		return -ENOMEM;
S
Steven Rostedt 已提交
687

688 689 690 691 692 693 694 695 696 697 698 699 700
#ifdef CONFIG_DYNAMIC_FTRACE
	functions = ftrace_update_tot_cnt;
#else
	/*
	 * We do not know the number of functions that exist because
	 * dynamic tracing is what counts them. With past experience
	 * we have around 20K functions. That should be more than enough.
	 * It is highly unlikely we will execute every function in
	 * the kernel.
	 */
	functions = 20000;
#endif

701
	pg = stat->start = stat->pages;
S
Steven Rostedt 已提交
702

703 704
	pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);

705
	for (i = 1; i < pages; i++) {
706 707
		pg->next = (void *)get_zeroed_page(GFP_KERNEL);
		if (!pg->next)
708
			goto out_free;
709 710 711 712
		pg = pg->next;
	}

	return 0;
713 714 715 716 717 718 719 720 721 722 723 724 725 726

 out_free:
	pg = stat->start;
	while (pg) {
		unsigned long tmp = (unsigned long)pg;

		pg = pg->next;
		free_page(tmp);
	}

	stat->pages = NULL;
	stat->start = NULL;

	return -ENOMEM;
S
Steven Rostedt 已提交
727 728
}

729
static int ftrace_profile_init_cpu(int cpu)
S
Steven Rostedt 已提交
730
{
731
	struct ftrace_profile_stat *stat;
732
	int size;
S
Steven Rostedt 已提交
733

734 735 736
	stat = &per_cpu(ftrace_profile_stats, cpu);

	if (stat->hash) {
737
		/* If the profile is already created, simply reset it */
738
		ftrace_profile_reset(stat);
739 740
		return 0;
	}
S
Steven Rostedt 已提交
741

742 743 744 745 746
	/*
	 * We are profiling all functions, but usually only a few thousand
	 * functions are hit. We'll make a hash of 1024 items.
	 */
	size = FTRACE_PROFILE_HASH_SIZE;
S
Steven Rostedt 已提交
747

748
	stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
749

750
	if (!stat->hash)
751 752
		return -ENOMEM;

753
	/* Preallocate the function profiling pages */
754 755 756
	if (ftrace_profile_pages_init(stat) < 0) {
		kfree(stat->hash);
		stat->hash = NULL;
757 758 759 760
		return -ENOMEM;
	}

	return 0;
S
Steven Rostedt 已提交
761 762
}

763 764 765 766 767
static int ftrace_profile_init(void)
{
	int cpu;
	int ret = 0;

768
	for_each_possible_cpu(cpu) {
769 770 771 772 773 774 775 776
		ret = ftrace_profile_init_cpu(cpu);
		if (ret)
			break;
	}

	return ret;
}

777
/* interrupts must be disabled */
778 779
static struct ftrace_profile *
ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
S
Steven Rostedt 已提交
780
{
781
	struct ftrace_profile *rec;
S
Steven Rostedt 已提交
782 783 784
	struct hlist_head *hhd;
	unsigned long key;

785
	key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
786
	hhd = &stat->hash[key];
S
Steven Rostedt 已提交
787 788 789 790

	if (hlist_empty(hhd))
		return NULL;

791
	hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
S
Steven Rostedt 已提交
792
		if (rec->ip == ip)
793 794 795 796 797 798
			return rec;
	}

	return NULL;
}

799 800
static void ftrace_add_profile(struct ftrace_profile_stat *stat,
			       struct ftrace_profile *rec)
801 802 803
{
	unsigned long key;

804
	key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
805
	hlist_add_head_rcu(&rec->node, &stat->hash[key]);
806 807
}

808 809 810
/*
 * The memory is already allocated, this simply finds a new record to use.
 */
811
static struct ftrace_profile *
812
ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
813 814 815
{
	struct ftrace_profile *rec = NULL;

816
	/* prevent recursion (from NMIs) */
817
	if (atomic_inc_return(&stat->disabled) != 1)
818 819 820
		goto out;

	/*
821 822
	 * Try to find the function again since an NMI
	 * could have added it
823
	 */
824
	rec = ftrace_find_profiled_func(stat, ip);
825
	if (rec)
826
		goto out;
827

828 829 830 831
	if (stat->pages->index == PROFILES_PER_PAGE) {
		if (!stat->pages->next)
			goto out;
		stat->pages = stat->pages->next;
S
Steven Rostedt 已提交
832
	}
833

834
	rec = &stat->pages->records[stat->pages->index++];
835
	rec->ip = ip;
836
	ftrace_add_profile(stat, rec);
837

S
Steven Rostedt 已提交
838
 out:
839
	atomic_dec(&stat->disabled);
S
Steven Rostedt 已提交
840 841 842 843 844

	return rec;
}

static void
845
function_profile_call(unsigned long ip, unsigned long parent_ip,
846
		      struct ftrace_ops *ops, struct pt_regs *regs)
S
Steven Rostedt 已提交
847
{
848
	struct ftrace_profile_stat *stat;
849
	struct ftrace_profile *rec;
S
Steven Rostedt 已提交
850 851 852 853 854 855
	unsigned long flags;

	if (!ftrace_profile_enabled)
		return;

	local_irq_save(flags);
856

857
	stat = this_cpu_ptr(&ftrace_profile_stats);
858
	if (!stat->hash || !ftrace_profile_enabled)
859 860 861
		goto out;

	rec = ftrace_find_profiled_func(stat, ip);
862
	if (!rec) {
863
		rec = ftrace_profile_alloc(stat, ip);
864 865 866
		if (!rec)
			goto out;
	}
S
Steven Rostedt 已提交
867 868 869 870 871 872

	rec->counter++;
 out:
	local_irq_restore(flags);
}

873 874 875
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static int profile_graph_entry(struct ftrace_graph_ent *trace)
{
876 877
	int index = trace->depth;

878
	function_profile_call(trace->func, 0, NULL, NULL);
879 880 881 882

	if (index >= 0 && index < FTRACE_RETFUNC_DEPTH)
		current->ret_stack[index].subtime = 0;

883 884 885 886 887
	return 1;
}

static void profile_graph_return(struct ftrace_graph_ret *trace)
{
888
	struct ftrace_profile_stat *stat;
889
	unsigned long long calltime;
890
	struct ftrace_profile *rec;
891
	unsigned long flags;
892 893

	local_irq_save(flags);
894
	stat = this_cpu_ptr(&ftrace_profile_stats);
895
	if (!stat->hash || !ftrace_profile_enabled)
896 897
		goto out;

898 899 900 901
	/* If the calltime was zero'd ignore it */
	if (!trace->calltime)
		goto out;

902 903
	calltime = trace->rettime - trace->calltime;

904
	if (!fgraph_graph_time) {
905 906 907 908 909 910 911 912 913 914 915 916 917 918
		int index;

		index = trace->depth;

		/* Append this call time to the parent time to subtract */
		if (index)
			current->ret_stack[index - 1].subtime += calltime;

		if (current->ret_stack[index].subtime < calltime)
			calltime -= current->ret_stack[index].subtime;
		else
			calltime = 0;
	}

919
	rec = ftrace_find_profiled_func(stat, trace->func);
920
	if (rec) {
921
		rec->time += calltime;
922 923
		rec->time_squared += calltime * calltime;
	}
924

925
 out:
926 927 928 929 930 931 932 933 934 935 936 937 938 939
	local_irq_restore(flags);
}

static int register_ftrace_profiler(void)
{
	return register_ftrace_graph(&profile_graph_return,
				     &profile_graph_entry);
}

static void unregister_ftrace_profiler(void)
{
	unregister_ftrace_graph();
}
#else
940
static struct ftrace_ops ftrace_profile_ops __read_mostly = {
941
	.func		= function_profile_call,
942
	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
943
	INIT_OPS_HASH(ftrace_profile_ops)
S
Steven Rostedt 已提交
944 945
};

946 947 948 949 950 951 952 953 954 955 956
static int register_ftrace_profiler(void)
{
	return register_ftrace_function(&ftrace_profile_ops);
}

static void unregister_ftrace_profiler(void)
{
	unregister_ftrace_function(&ftrace_profile_ops);
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */

S
Steven Rostedt 已提交
957 958 959 960 961 962 963
static ssize_t
ftrace_profile_write(struct file *filp, const char __user *ubuf,
		     size_t cnt, loff_t *ppos)
{
	unsigned long val;
	int ret;

964 965
	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
	if (ret)
S
Steven Rostedt 已提交
966 967 968 969 970 971 972
		return ret;

	val = !!val;

	mutex_lock(&ftrace_profile_lock);
	if (ftrace_profile_enabled ^ val) {
		if (val) {
973 974 975 976 977 978
			ret = ftrace_profile_init();
			if (ret < 0) {
				cnt = ret;
				goto out;
			}

979 980 981 982 983
			ret = register_ftrace_profiler();
			if (ret < 0) {
				cnt = ret;
				goto out;
			}
S
Steven Rostedt 已提交
984 985 986
			ftrace_profile_enabled = 1;
		} else {
			ftrace_profile_enabled = 0;
987 988 989 990
			/*
			 * unregister_ftrace_profiler calls stop_machine
			 * so this acts like an synchronize_sched.
			 */
991
			unregister_ftrace_profiler();
S
Steven Rostedt 已提交
992 993
		}
	}
994
 out:
S
Steven Rostedt 已提交
995 996
	mutex_unlock(&ftrace_profile_lock);

997
	*ppos += cnt;
S
Steven Rostedt 已提交
998 999 1000 1001

	return cnt;
}

1002 1003 1004 1005
static ssize_t
ftrace_profile_read(struct file *filp, char __user *ubuf,
		     size_t cnt, loff_t *ppos)
{
1006
	char buf[64];		/* big enough to hold a number */
1007 1008 1009 1010 1011 1012
	int r;

	r = sprintf(buf, "%u\n", ftrace_profile_enabled);
	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}

S
Steven Rostedt 已提交
1013 1014 1015 1016
static const struct file_operations ftrace_profile_fops = {
	.open		= tracing_open_generic,
	.read		= ftrace_profile_read,
	.write		= ftrace_profile_write,
1017
	.llseek		= default_llseek,
S
Steven Rostedt 已提交
1018 1019
};

1020 1021
/* used to initialize the real stat files */
static struct tracer_stat function_stats __initdata = {
1022 1023 1024 1025 1026 1027
	.name		= "functions",
	.stat_start	= function_stat_start,
	.stat_next	= function_stat_next,
	.stat_cmp	= function_stat_cmp,
	.stat_headers	= function_stat_headers,
	.stat_show	= function_stat_show
1028 1029
};

1030
static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
S
Steven Rostedt 已提交
1031
{
1032
	struct ftrace_profile_stat *stat;
S
Steven Rostedt 已提交
1033
	struct dentry *entry;
1034
	char *name;
S
Steven Rostedt 已提交
1035
	int ret;
1036 1037 1038 1039 1040
	int cpu;

	for_each_possible_cpu(cpu) {
		stat = &per_cpu(ftrace_profile_stats, cpu);

1041
		name = kasprintf(GFP_KERNEL, "function%d", cpu);
1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
		if (!name) {
			/*
			 * The files created are permanent, if something happens
			 * we still do not free memory.
			 */
			WARN(1,
			     "Could not allocate stat file for cpu %d\n",
			     cpu);
			return;
		}
		stat->stat = function_stats;
		stat->stat.name = name;
		ret = register_stat_tracer(&stat->stat);
		if (ret) {
			WARN(1,
			     "Could not register function stat for cpu %d\n",
			     cpu);
			kfree(name);
			return;
		}
S
Steven Rostedt 已提交
1062 1063
	}

1064
	entry = tracefs_create_file("function_profile_enabled", 0644,
S
Steven Rostedt 已提交
1065 1066
				    d_tracer, NULL, &ftrace_profile_fops);
	if (!entry)
1067
		pr_warn("Could not create tracefs 'function_profile_enabled' entry\n");
S
Steven Rostedt 已提交
1068 1069 1070
}

#else /* CONFIG_FUNCTION_PROFILER */
1071
static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
S
Steven Rostedt 已提交
1072 1073 1074 1075
{
}
#endif /* CONFIG_FUNCTION_PROFILER */

1076 1077
static struct pid * const ftrace_swapper_pid = &init_struct_pid;

1078 1079 1080 1081 1082 1083
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static int ftrace_graph_active;
#else
# define ftrace_graph_active 0
#endif

1084 1085
#ifdef CONFIG_DYNAMIC_FTRACE

1086 1087
static struct ftrace_ops *removed_ops;

1088 1089 1090 1091 1092 1093
/*
 * Set when doing a global update, like enabling all recs or disabling them.
 * It is not set when just updating a single ftrace_ops.
 */
static bool update_all_ops;

1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
#ifndef CONFIG_FTRACE_MCOUNT_RECORD
# error Dynamic ftrace depends on MCOUNT_RECORD
#endif

static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;

struct ftrace_func_probe {
	struct hlist_node	node;
	struct ftrace_probe_ops	*ops;
	unsigned long		flags;
	unsigned long		ip;
	void			*data;
1106
	struct list_head	free_list;
1107 1108
};

1109 1110 1111 1112 1113
struct ftrace_func_entry {
	struct hlist_node hlist;
	unsigned long ip;
};

1114 1115 1116 1117 1118 1119 1120 1121 1122
/*
 * We make these constant because no one should touch them,
 * but they are used as the default "empty hash", to avoid allocating
 * it all the time. These are in a read only section such that if
 * anyone does try to modify it, it will cause an exception.
 */
static const struct hlist_head empty_buckets[1];
static const struct ftrace_hash empty_hash = {
	.buckets = (struct hlist_head *)empty_buckets,
1123
};
1124
#define EMPTY_HASH	((struct ftrace_hash *)&empty_hash)
1125

1126
static struct ftrace_ops global_ops = {
1127 1128 1129 1130 1131
	.func				= ftrace_stub,
	.local_hash.notrace_hash	= EMPTY_HASH,
	.local_hash.filter_hash		= EMPTY_HASH,
	INIT_OPS_HASH(global_ops)
	.flags				= FTRACE_OPS_FL_RECURSION_SAFE |
1132 1133
					  FTRACE_OPS_FL_INITIALIZED |
					  FTRACE_OPS_FL_PID,
1134 1135
};

1136 1137
/*
 * This is used by __kernel_text_address() to return true if the
1138
 * address is on a dynamically allocated trampoline that would
1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172
 * not return true for either core_kernel_text() or
 * is_module_text_address().
 */
bool is_ftrace_trampoline(unsigned long addr)
{
	struct ftrace_ops *op;
	bool ret = false;

	/*
	 * Some of the ops may be dynamically allocated,
	 * they are freed after a synchronize_sched().
	 */
	preempt_disable_notrace();

	do_for_each_ftrace_op(op, ftrace_ops_list) {
		/*
		 * This is to check for dynamically allocated trampolines.
		 * Trampolines that are in kernel text will have
		 * core_kernel_text() return true.
		 */
		if (op->trampoline && op->trampoline_size)
			if (addr >= op->trampoline &&
			    addr < op->trampoline + op->trampoline_size) {
				ret = true;
				goto out;
			}
	} while_for_each_ftrace_op(op);

 out:
	preempt_enable_notrace();

	return ret;
}

1173 1174
struct ftrace_page {
	struct ftrace_page	*next;
1175
	struct dyn_ftrace	*records;
1176
	int			index;
1177
	int			size;
1178 1179
};

1180 1181
#define ENTRY_SIZE sizeof(struct dyn_ftrace)
#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1182 1183 1184 1185 1186 1187 1188

/* estimate from running different kernels */
#define NR_TO_INIT		10000

static struct ftrace_page	*ftrace_pages_start;
static struct ftrace_page	*ftrace_pages;

1189 1190 1191 1192 1193 1194 1195 1196 1197
static __always_inline unsigned long
ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip)
{
	if (hash->size_bits > 0)
		return hash_long(ip, hash->size_bits);

	return 0;
}

1198 1199 1200
/* Only use this function if ftrace_hash_empty() has already been tested */
static __always_inline struct ftrace_func_entry *
__ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1201 1202 1203 1204 1205
{
	unsigned long key;
	struct ftrace_func_entry *entry;
	struct hlist_head *hhd;

1206
	key = ftrace_hash_key(hash, ip);
1207 1208
	hhd = &hash->buckets[key];

1209
	hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1210 1211 1212 1213 1214 1215
		if (entry->ip == ip)
			return entry;
	}
	return NULL;
}

1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234
/**
 * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash
 * @hash: The hash to look at
 * @ip: The instruction pointer to test
 *
 * Search a given @hash to see if a given instruction pointer (@ip)
 * exists in it.
 *
 * Returns the entry that holds the @ip if found. NULL otherwise.
 */
struct ftrace_func_entry *
ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
{
	if (ftrace_hash_empty(hash))
		return NULL;

	return __ftrace_lookup_ip(hash, ip);
}

1235 1236
static void __add_hash_entry(struct ftrace_hash *hash,
			     struct ftrace_func_entry *entry)
1237 1238 1239 1240
{
	struct hlist_head *hhd;
	unsigned long key;

1241
	key = ftrace_hash_key(hash, entry->ip);
1242 1243 1244
	hhd = &hash->buckets[key];
	hlist_add_head(&entry->hlist, hhd);
	hash->count++;
1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256
}

static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
{
	struct ftrace_func_entry *entry;

	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
	if (!entry)
		return -ENOMEM;

	entry->ip = ip;
	__add_hash_entry(hash, entry);
1257 1258 1259 1260 1261

	return 0;
}

static void
1262
free_hash_entry(struct ftrace_hash *hash,
1263 1264 1265 1266 1267 1268 1269
		  struct ftrace_func_entry *entry)
{
	hlist_del(&entry->hlist);
	kfree(entry);
	hash->count--;
}

1270 1271 1272 1273 1274 1275 1276 1277
static void
remove_hash_entry(struct ftrace_hash *hash,
		  struct ftrace_func_entry *entry)
{
	hlist_del(&entry->hlist);
	hash->count--;
}

1278 1279 1280
static void ftrace_hash_clear(struct ftrace_hash *hash)
{
	struct hlist_head *hhd;
1281
	struct hlist_node *tn;
1282 1283 1284 1285
	struct ftrace_func_entry *entry;
	int size = 1 << hash->size_bits;
	int i;

1286 1287 1288
	if (!hash->count)
		return;

1289 1290
	for (i = 0; i < size; i++) {
		hhd = &hash->buckets[i];
1291
		hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1292
			free_hash_entry(hash, entry);
1293 1294 1295 1296
	}
	FTRACE_WARN_ON(hash->count);
}

1297 1298 1299 1300 1301 1302 1303 1304 1305
static void free_ftrace_hash(struct ftrace_hash *hash)
{
	if (!hash || hash == EMPTY_HASH)
		return;
	ftrace_hash_clear(hash);
	kfree(hash->buckets);
	kfree(hash);
}

1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320
static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
{
	struct ftrace_hash *hash;

	hash = container_of(rcu, struct ftrace_hash, rcu);
	free_ftrace_hash(hash);
}

static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
{
	if (!hash || hash == EMPTY_HASH)
		return;
	call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
}

1321 1322
void ftrace_free_filter(struct ftrace_ops *ops)
{
1323
	ftrace_ops_init(ops);
1324 1325
	free_ftrace_hash(ops->func_hash->filter_hash);
	free_ftrace_hash(ops->func_hash->notrace_hash);
1326 1327
}

1328 1329 1330 1331 1332 1333 1334 1335 1336 1337
static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
{
	struct ftrace_hash *hash;
	int size;

	hash = kzalloc(sizeof(*hash), GFP_KERNEL);
	if (!hash)
		return NULL;

	size = 1 << size_bits;
1338
	hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363

	if (!hash->buckets) {
		kfree(hash);
		return NULL;
	}

	hash->size_bits = size_bits;

	return hash;
}

static struct ftrace_hash *
alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
{
	struct ftrace_func_entry *entry;
	struct ftrace_hash *new_hash;
	int size;
	int ret;
	int i;

	new_hash = alloc_ftrace_hash(size_bits);
	if (!new_hash)
		return NULL;

	/* Empty hash? */
1364
	if (ftrace_hash_empty(hash))
1365 1366 1367 1368
		return new_hash;

	size = 1 << hash->size_bits;
	for (i = 0; i < size; i++) {
1369
		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384
			ret = add_hash_entry(new_hash, entry->ip);
			if (ret < 0)
				goto free_hash;
		}
	}

	FTRACE_WARN_ON(new_hash->count != hash->count);

	return new_hash;

 free_hash:
	free_ftrace_hash(new_hash);
	return NULL;
}

1385
static void
1386
ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
1387
static void
1388
ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
1389

1390 1391 1392
static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
				       struct ftrace_hash *new_hash);

1393 1394
static struct ftrace_hash *
__ftrace_hash_move(struct ftrace_hash *src)
1395 1396
{
	struct ftrace_func_entry *entry;
1397
	struct hlist_node *tn;
1398
	struct hlist_head *hhd;
1399
	struct ftrace_hash *new_hash;
1400 1401 1402 1403 1404
	int size = src->count;
	int bits = 0;
	int i;

	/*
1405
	 * If the new source is empty, just return the empty_hash.
1406
	 */
1407 1408
	if (!src->count)
		return EMPTY_HASH;
1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419

	/*
	 * Make the hash size about 1/2 the # found
	 */
	for (size /= 2; size; size >>= 1)
		bits++;

	/* Don't allocate too much */
	if (bits > FTRACE_HASH_MAX_BITS)
		bits = FTRACE_HASH_MAX_BITS;

1420 1421
	new_hash = alloc_ftrace_hash(bits);
	if (!new_hash)
1422
		return NULL;
1423 1424 1425 1426

	size = 1 << src->size_bits;
	for (i = 0; i < size; i++) {
		hhd = &src->buckets[i];
1427
		hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1428
			remove_hash_entry(src, entry);
1429
			__add_hash_entry(new_hash, entry);
1430 1431 1432
		}
	}

1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450
	return new_hash;
}

static int
ftrace_hash_move(struct ftrace_ops *ops, int enable,
		 struct ftrace_hash **dst, struct ftrace_hash *src)
{
	struct ftrace_hash *new_hash;
	int ret;

	/* Reject setting notrace hash on IPMODIFY ftrace_ops */
	if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
		return -EINVAL;

	new_hash = __ftrace_hash_move(src);
	if (!new_hash)
		return -ENOMEM;

1451 1452 1453 1454 1455 1456 1457 1458 1459 1460
	/* Make sure this can be applied if it is IPMODIFY ftrace_ops */
	if (enable) {
		/* IPMODIFY should be updated only when filter_hash updating */
		ret = ftrace_hash_ipmodify_update(ops, new_hash);
		if (ret < 0) {
			free_ftrace_hash(new_hash);
			return ret;
		}
	}

1461 1462 1463 1464
	/*
	 * Remove the current set, update the hash and add
	 * them back.
	 */
1465
	ftrace_hash_rec_disable_modify(ops, enable);
1466

1467 1468
	rcu_assign_pointer(*dst, new_hash);

1469
	ftrace_hash_rec_enable_modify(ops, enable);
1470

1471
	return 0;
1472 1473
}

1474 1475 1476 1477 1478 1479 1480 1481 1482 1483
static bool hash_contains_ip(unsigned long ip,
			     struct ftrace_ops_hash *hash)
{
	/*
	 * The function record is a match if it exists in the filter
	 * hash and not in the notrace hash. Note, an emty hash is
	 * considered a match for the filter hash, but an empty
	 * notrace hash is considered not in the notrace hash.
	 */
	return (ftrace_hash_empty(hash->filter_hash) ||
1484
		__ftrace_lookup_ip(hash->filter_hash, ip)) &&
1485
		(ftrace_hash_empty(hash->notrace_hash) ||
1486
		 !__ftrace_lookup_ip(hash->notrace_hash, ip));
1487 1488
}

1489 1490 1491 1492 1493 1494 1495 1496
/*
 * Test the hashes for this ops to see if we want to call
 * the ops->func or not.
 *
 * It's a match if the ip is in the ops->filter_hash or
 * the filter_hash does not exist or is empty,
 *  AND
 * the ip is not in the ops->notrace_hash.
1497 1498 1499
 *
 * This needs to be called with preemption disabled as
 * the hashes are freed with call_rcu_sched().
1500 1501
 */
static int
1502
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1503
{
1504
	struct ftrace_ops_hash hash;
1505 1506
	int ret;

1507 1508 1509 1510 1511 1512 1513 1514 1515 1516
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
	/*
	 * There's a small race when adding ops that the ftrace handler
	 * that wants regs, may be called without them. We can not
	 * allow that handler to be called if regs is NULL.
	 */
	if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
		return 0;
#endif

1517 1518
	hash.filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
	hash.notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
1519

1520
	if (hash_contains_ip(ip, &hash))
1521 1522 1523 1524 1525 1526 1527
		ret = 1;
	else
		ret = 0;

	return ret;
}

1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541
/*
 * This is a double for. Do not use 'break' to break out of the loop,
 * you must use a goto.
 */
#define do_for_each_ftrace_rec(pg, rec)					\
	for (pg = ftrace_pages_start; pg; pg = pg->next) {		\
		int _____i;						\
		for (_____i = 0; _____i < pg->index; _____i++) {	\
			rec = &pg->records[_____i];

#define while_for_each_ftrace_rec()		\
		}				\
	}

1542 1543 1544

static int ftrace_cmp_recs(const void *a, const void *b)
{
1545 1546
	const struct dyn_ftrace *key = a;
	const struct dyn_ftrace *rec = b;
1547

1548
	if (key->flags < rec->ip)
1549
		return -1;
1550 1551
	if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
		return 1;
1552 1553 1554
	return 0;
}

1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567
/**
 * ftrace_location_range - return the first address of a traced location
 *	if it touches the given ip range
 * @start: start of range to search.
 * @end: end of range to search (inclusive). @end points to the last byte
 *	to check.
 *
 * Returns rec->ip if the related ftrace location is a least partly within
 * the given address range. That is, the first address of the instruction
 * that is either a NOP or call to the function tracer. It checks the ftrace
 * internal tables to determine if the address belongs or not.
 */
unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1568 1569 1570
{
	struct ftrace_page *pg;
	struct dyn_ftrace *rec;
1571
	struct dyn_ftrace key;
1572

1573 1574
	key.ip = start;
	key.flags = end;	/* overload flags, as it is unsigned long */
1575 1576

	for (pg = ftrace_pages_start; pg; pg = pg->next) {
1577 1578
		if (end < pg->records[0].ip ||
		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1579
			continue;
1580 1581 1582 1583
		rec = bsearch(&key, pg->records, pg->index,
			      sizeof(struct dyn_ftrace),
			      ftrace_cmp_recs);
		if (rec)
1584
			return rec->ip;
1585
	}
1586 1587 1588 1589

	return 0;
}

1590 1591 1592 1593
/**
 * ftrace_location - return true if the ip giving is a traced location
 * @ip: the instruction pointer to check
 *
1594
 * Returns rec->ip if @ip given is a pointer to a ftrace location.
1595 1596 1597 1598
 * That is, the instruction that is either a NOP or call to
 * the function tracer. It checks the ftrace internal tables to
 * determine if the address belongs or not.
 */
1599
unsigned long ftrace_location(unsigned long ip)
1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613
{
	return ftrace_location_range(ip, ip);
}

/**
 * ftrace_text_reserved - return true if range contains an ftrace location
 * @start: start of range to search
 * @end: end of range to search (inclusive). @end points to the last byte to check.
 *
 * Returns 1 if @start and @end contains a ftrace location.
 * That is, the instruction that is either a NOP or call to
 * the function tracer. It checks the ftrace internal tables to
 * determine if the address belongs or not.
 */
1614
int ftrace_text_reserved(const void *start, const void *end)
1615
{
1616 1617 1618 1619 1620 1621
	unsigned long ret;

	ret = ftrace_location_range((unsigned long)start,
				    (unsigned long)end);

	return (int)!!ret;
1622 1623
}

1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643
/* Test if ops registered to this rec needs regs */
static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
{
	struct ftrace_ops *ops;
	bool keep_regs = false;

	for (ops = ftrace_ops_list;
	     ops != &ftrace_list_end; ops = ops->next) {
		/* pass rec in as regs to have non-NULL val */
		if (ftrace_ops_test(ops, rec->ip, rec)) {
			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
				keep_regs = true;
				break;
			}
		}
	}

	return  keep_regs;
}

1644
static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
1645 1646 1647 1648 1649 1650 1651
				     int filter_hash,
				     bool inc)
{
	struct ftrace_hash *hash;
	struct ftrace_hash *other_hash;
	struct ftrace_page *pg;
	struct dyn_ftrace *rec;
1652
	bool update = false;
1653 1654 1655 1656 1657
	int count = 0;
	int all = 0;

	/* Only update if the ops has been registered */
	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1658
		return false;
1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671

	/*
	 * In the filter_hash case:
	 *   If the count is zero, we update all records.
	 *   Otherwise we just update the items in the hash.
	 *
	 * In the notrace_hash case:
	 *   We enable the update in the hash.
	 *   As disabling notrace means enabling the tracing,
	 *   and enabling notrace means disabling, the inc variable
	 *   gets inversed.
	 */
	if (filter_hash) {
1672 1673
		hash = ops->func_hash->filter_hash;
		other_hash = ops->func_hash->notrace_hash;
1674
		if (ftrace_hash_empty(hash))
1675 1676 1677
			all = 1;
	} else {
		inc = !inc;
1678 1679
		hash = ops->func_hash->notrace_hash;
		other_hash = ops->func_hash->filter_hash;
1680 1681 1682 1683
		/*
		 * If the notrace hash has no items,
		 * then there's nothing to do.
		 */
1684
		if (ftrace_hash_empty(hash))
1685
			return false;
1686 1687 1688 1689 1690 1691 1692
	}

	do_for_each_ftrace_rec(pg, rec) {
		int in_other_hash = 0;
		int in_hash = 0;
		int match = 0;

1693 1694 1695
		if (rec->flags & FTRACE_FL_DISABLED)
			continue;

1696 1697 1698 1699 1700
		if (all) {
			/*
			 * Only the filter_hash affects all records.
			 * Update if the record is not in the notrace hash.
			 */
1701
			if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1702 1703
				match = 1;
		} else {
1704 1705
			in_hash = !!ftrace_lookup_ip(hash, rec->ip);
			in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1706 1707

			/*
1708 1709
			 * If filter_hash is set, we want to match all functions
			 * that are in the hash but not in the other hash.
1710
			 *
1711 1712 1713 1714 1715
			 * If filter_hash is not set, then we are decrementing.
			 * That means we match anything that is in the hash
			 * and also in the other_hash. That is, we need to turn
			 * off functions in the other hash because they are disabled
			 * by this hash.
1716 1717 1718 1719
			 */
			if (filter_hash && in_hash && !in_other_hash)
				match = 1;
			else if (!filter_hash && in_hash &&
1720
				 (in_other_hash || ftrace_hash_empty(other_hash)))
1721 1722 1723 1724 1725 1726 1727
				match = 1;
		}
		if (!match)
			continue;

		if (inc) {
			rec->flags++;
1728
			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1729
				return false;
1730 1731 1732 1733 1734 1735

			/*
			 * If there's only a single callback registered to a
			 * function, and the ops has a trampoline registered
			 * for it, then we can call it directly.
			 */
1736
			if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1737
				rec->flags |= FTRACE_FL_TRAMP;
1738
			else
1739 1740 1741
				/*
				 * If we are adding another function callback
				 * to this function, and the previous had a
1742 1743
				 * custom trampoline in use, then we need to go
				 * back to the default trampoline.
1744
				 */
1745
				rec->flags &= ~FTRACE_FL_TRAMP;
1746

1747 1748 1749 1750 1751 1752
			/*
			 * If any ops wants regs saved for this function
			 * then all ops will get saved regs.
			 */
			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
				rec->flags |= FTRACE_FL_REGS;
1753
		} else {
1754
			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1755
				return false;
1756
			rec->flags--;
1757

1758 1759 1760 1761 1762 1763
			/*
			 * If the rec had REGS enabled and the ops that is
			 * being removed had REGS set, then see if there is
			 * still any ops for this record that wants regs.
			 * If not, we can stop recording them.
			 */
1764
			if (ftrace_rec_count(rec) > 0 &&
1765 1766 1767 1768 1769
			    rec->flags & FTRACE_FL_REGS &&
			    ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
				if (!test_rec_ops_needs_regs(rec))
					rec->flags &= ~FTRACE_FL_REGS;
			}
1770

1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781
			/*
			 * If the rec had TRAMP enabled, then it needs to
			 * be cleared. As TRAMP can only be enabled iff
			 * there is only a single ops attached to it.
			 * In otherwords, always disable it on decrementing.
			 * In the future, we may set it if rec count is
			 * decremented to one, and the ops that is left
			 * has a trampoline.
			 */
			rec->flags &= ~FTRACE_FL_TRAMP;

1782 1783 1784 1785
			/*
			 * flags will be cleared in ftrace_check_record()
			 * if rec count is zero.
			 */
1786 1787
		}
		count++;
1788 1789 1790 1791

		/* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
		update |= ftrace_test_record(rec, 1) != FTRACE_UPDATE_IGNORE;

1792 1793
		/* Shortcut, if we handled all records, we are done. */
		if (!all && count == hash->count)
1794
			return update;
1795
	} while_for_each_ftrace_rec();
1796 1797

	return update;
1798 1799
}

1800
static bool ftrace_hash_rec_disable(struct ftrace_ops *ops,
1801 1802
				    int filter_hash)
{
1803
	return __ftrace_hash_rec_update(ops, filter_hash, 0);
1804 1805
}

1806
static bool ftrace_hash_rec_enable(struct ftrace_ops *ops,
1807 1808
				   int filter_hash)
{
1809
	return __ftrace_hash_rec_update(ops, filter_hash, 1);
1810 1811
}

1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846
static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
					  int filter_hash, int inc)
{
	struct ftrace_ops *op;

	__ftrace_hash_rec_update(ops, filter_hash, inc);

	if (ops->func_hash != &global_ops.local_hash)
		return;

	/*
	 * If the ops shares the global_ops hash, then we need to update
	 * all ops that are enabled and use this hash.
	 */
	do_for_each_ftrace_op(op, ftrace_ops_list) {
		/* Already done */
		if (op == ops)
			continue;
		if (op->func_hash == &global_ops.local_hash)
			__ftrace_hash_rec_update(op, filter_hash, inc);
	} while_for_each_ftrace_op(op);
}

static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
					   int filter_hash)
{
	ftrace_hash_rec_update_modify(ops, filter_hash, 0);
}

static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
					  int filter_hash)
{
	ftrace_hash_rec_update_modify(ops, filter_hash, 1);
}

1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879
/*
 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
 * or no-needed to update, -EBUSY if it detects a conflict of the flag
 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
 * Note that old_hash and new_hash has below meanings
 *  - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
 *  - If the hash is EMPTY_HASH, it hits nothing
 *  - Anything else hits the recs which match the hash entries.
 */
static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
					 struct ftrace_hash *old_hash,
					 struct ftrace_hash *new_hash)
{
	struct ftrace_page *pg;
	struct dyn_ftrace *rec, *end = NULL;
	int in_old, in_new;

	/* Only update if the ops has been registered */
	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
		return 0;

	if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
		return 0;

	/*
	 * Since the IPMODIFY is a very address sensitive action, we do not
	 * allow ftrace_ops to set all functions to new hash.
	 */
	if (!new_hash || !old_hash)
		return -EINVAL;

	/* Update rec->flags */
	do_for_each_ftrace_rec(pg, rec) {
1880 1881 1882 1883

		if (rec->flags & FTRACE_FL_DISABLED)
			continue;

1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905
		/* We need to update only differences of filter_hash */
		in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
		in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
		if (in_old == in_new)
			continue;

		if (in_new) {
			/* New entries must ensure no others are using it */
			if (rec->flags & FTRACE_FL_IPMODIFY)
				goto rollback;
			rec->flags |= FTRACE_FL_IPMODIFY;
		} else /* Removed entry */
			rec->flags &= ~FTRACE_FL_IPMODIFY;
	} while_for_each_ftrace_rec();

	return 0;

rollback:
	end = rec;

	/* Roll back what we did above */
	do_for_each_ftrace_rec(pg, rec) {
1906 1907 1908 1909

		if (rec->flags & FTRACE_FL_DISABLED)
			continue;

1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962
		if (rec == end)
			goto err_out;

		in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
		in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
		if (in_old == in_new)
			continue;

		if (in_new)
			rec->flags &= ~FTRACE_FL_IPMODIFY;
		else
			rec->flags |= FTRACE_FL_IPMODIFY;
	} while_for_each_ftrace_rec();

err_out:
	return -EBUSY;
}

static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
{
	struct ftrace_hash *hash = ops->func_hash->filter_hash;

	if (ftrace_hash_empty(hash))
		hash = NULL;

	return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
}

/* Disabling always succeeds */
static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
{
	struct ftrace_hash *hash = ops->func_hash->filter_hash;

	if (ftrace_hash_empty(hash))
		hash = NULL;

	__ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
}

static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
				       struct ftrace_hash *new_hash)
{
	struct ftrace_hash *old_hash = ops->func_hash->filter_hash;

	if (ftrace_hash_empty(old_hash))
		old_hash = NULL;

	if (ftrace_hash_empty(new_hash))
		new_hash = NULL;

	return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
}

1963
static void print_ip_ins(const char *fmt, const unsigned char *p)
1964 1965 1966 1967 1968 1969 1970 1971 1972
{
	int i;

	printk(KERN_CONT "%s", fmt);

	for (i = 0; i < MCOUNT_INSN_SIZE; i++)
		printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
}

1973 1974
static struct ftrace_ops *
ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
1975 1976
static struct ftrace_ops *
ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
1977

1978
enum ftrace_bug_type ftrace_bug_type;
1979
const void *ftrace_expected;
1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000

static void print_bug_type(void)
{
	switch (ftrace_bug_type) {
	case FTRACE_BUG_UNKNOWN:
		break;
	case FTRACE_BUG_INIT:
		pr_info("Initializing ftrace call sites\n");
		break;
	case FTRACE_BUG_NOP:
		pr_info("Setting ftrace call site to NOP\n");
		break;
	case FTRACE_BUG_CALL:
		pr_info("Setting ftrace call site to call ftrace function\n");
		break;
	case FTRACE_BUG_UPDATE:
		pr_info("Updating ftrace call site to call a different ftrace function\n");
		break;
	}
}

2001 2002 2003
/**
 * ftrace_bug - report and shutdown function tracer
 * @failed: The failed type (EFAULT, EINVAL, EPERM)
2004
 * @rec: The record that failed
2005 2006 2007 2008 2009 2010 2011 2012
 *
 * The arch code that enables or disables the function tracing
 * can call ftrace_bug() when it has detected a problem in
 * modifying the code. @failed should be one of either:
 * EFAULT - if the problem happens on reading the @ip address
 * EINVAL - if what is read at @ip is not what was expected
 * EPERM - if the problem happens on writting to the @ip address
 */
2013
void ftrace_bug(int failed, struct dyn_ftrace *rec)
2014
{
2015 2016
	unsigned long ip = rec ? rec->ip : 0;

2017 2018 2019 2020 2021 2022 2023 2024 2025 2026
	switch (failed) {
	case -EFAULT:
		FTRACE_WARN_ON_ONCE(1);
		pr_info("ftrace faulted on modifying ");
		print_ip_sym(ip);
		break;
	case -EINVAL:
		FTRACE_WARN_ON_ONCE(1);
		pr_info("ftrace failed to modify ");
		print_ip_sym(ip);
2027
		print_ip_ins(" actual:   ", (unsigned char *)ip);
2028
		pr_cont("\n");
2029 2030 2031 2032
		if (ftrace_expected) {
			print_ip_ins(" expected: ", ftrace_expected);
			pr_cont("\n");
		}
2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043
		break;
	case -EPERM:
		FTRACE_WARN_ON_ONCE(1);
		pr_info("ftrace faulted on writing ");
		print_ip_sym(ip);
		break;
	default:
		FTRACE_WARN_ON_ONCE(1);
		pr_info("ftrace faulted on unknown error ");
		print_ip_sym(ip);
	}
2044
	print_bug_type();
2045 2046 2047 2048 2049 2050 2051 2052
	if (rec) {
		struct ftrace_ops *ops = NULL;

		pr_info("ftrace record flags: %lx\n", rec->flags);
		pr_cont(" (%ld)%s", ftrace_rec_count(rec),
			rec->flags & FTRACE_FL_REGS ? " R" : "  ");
		if (rec->flags & FTRACE_FL_TRAMP_EN) {
			ops = ftrace_find_tramp_ops_any(rec);
2053 2054 2055 2056 2057 2058 2059 2060
			if (ops) {
				do {
					pr_cont("\ttramp: %pS (%pS)",
						(void *)ops->trampoline,
						(void *)ops->func);
					ops = ftrace_find_tramp_ops_next(rec, ops);
				} while (ops);
			} else
2061 2062 2063 2064
				pr_cont("\ttramp: ERROR!");

		}
		ip = ftrace_get_addr_curr(rec);
2065
		pr_cont("\n expected tramp: %lx\n", ip);
2066
	}
2067 2068
}

2069
static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
2070
{
2071
	unsigned long flag = 0UL;
2072

2073 2074
	ftrace_bug_type = FTRACE_BUG_UNKNOWN;

2075 2076 2077
	if (rec->flags & FTRACE_FL_DISABLED)
		return FTRACE_UPDATE_IGNORE;

S
Steven Rostedt 已提交
2078
	/*
2079
	 * If we are updating calls:
S
Steven Rostedt 已提交
2080
	 *
2081 2082
	 *   If the record has a ref count, then we need to enable it
	 *   because someone is using it.
S
Steven Rostedt 已提交
2083
	 *
2084 2085
	 *   Otherwise we make sure its disabled.
	 *
2086
	 * If we are disabling calls, then disable all records that
2087
	 * are enabled.
S
Steven Rostedt 已提交
2088
	 */
2089
	if (enable && ftrace_rec_count(rec))
2090
		flag = FTRACE_FL_ENABLED;
S
Steven Rostedt 已提交
2091

2092
	/*
2093 2094 2095
	 * If enabling and the REGS flag does not match the REGS_EN, or
	 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
	 * this record. Set flags to fail the compare against ENABLED.
2096
	 */
2097 2098 2099 2100 2101 2102 2103 2104 2105
	if (flag) {
		if (!(rec->flags & FTRACE_FL_REGS) != 
		    !(rec->flags & FTRACE_FL_REGS_EN))
			flag |= FTRACE_FL_REGS;

		if (!(rec->flags & FTRACE_FL_TRAMP) != 
		    !(rec->flags & FTRACE_FL_TRAMP_EN))
			flag |= FTRACE_FL_TRAMP;
	}
2106

2107 2108
	/* If the state of this record hasn't changed, then do nothing */
	if ((rec->flags & FTRACE_FL_ENABLED) == flag)
2109
		return FTRACE_UPDATE_IGNORE;
S
Steven Rostedt 已提交
2110

2111
	if (flag) {
2112 2113 2114 2115
		/* Save off if rec is being enabled (for return value) */
		flag ^= rec->flags & FTRACE_FL_ENABLED;

		if (update) {
2116
			rec->flags |= FTRACE_FL_ENABLED;
2117 2118 2119 2120 2121 2122
			if (flag & FTRACE_FL_REGS) {
				if (rec->flags & FTRACE_FL_REGS)
					rec->flags |= FTRACE_FL_REGS_EN;
				else
					rec->flags &= ~FTRACE_FL_REGS_EN;
			}
2123 2124 2125 2126 2127 2128
			if (flag & FTRACE_FL_TRAMP) {
				if (rec->flags & FTRACE_FL_TRAMP)
					rec->flags |= FTRACE_FL_TRAMP_EN;
				else
					rec->flags &= ~FTRACE_FL_TRAMP_EN;
			}
2129 2130 2131 2132 2133 2134 2135
		}

		/*
		 * If this record is being updated from a nop, then
		 *   return UPDATE_MAKE_CALL.
		 * Otherwise,
		 *   return UPDATE_MODIFY_CALL to tell the caller to convert
2136
		 *   from the save regs, to a non-save regs function or
2137
		 *   vice versa, or from a trampoline call.
2138
		 */
2139 2140
		if (flag & FTRACE_FL_ENABLED) {
			ftrace_bug_type = FTRACE_BUG_CALL;
2141
			return FTRACE_UPDATE_MAKE_CALL;
2142
		}
2143

2144
		ftrace_bug_type = FTRACE_BUG_UPDATE;
2145
		return FTRACE_UPDATE_MODIFY_CALL;
2146 2147
	}

2148 2149
	if (update) {
		/* If there's no more users, clear all flags */
2150
		if (!ftrace_rec_count(rec))
2151 2152
			rec->flags = 0;
		else
2153 2154 2155 2156 2157 2158
			/*
			 * Just disable the record, but keep the ops TRAMP
			 * and REGS states. The _EN flags must be disabled though.
			 */
			rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
					FTRACE_FL_REGS_EN);
2159
	}
2160

2161
	ftrace_bug_type = FTRACE_BUG_NOP;
2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191
	return FTRACE_UPDATE_MAKE_NOP;
}

/**
 * ftrace_update_record, set a record that now is tracing or not
 * @rec: the record to update
 * @enable: set to 1 if the record is tracing, zero to force disable
 *
 * The records that represent all functions that can be traced need
 * to be updated when tracing has been enabled.
 */
int ftrace_update_record(struct dyn_ftrace *rec, int enable)
{
	return ftrace_check_record(rec, enable, 1);
}

/**
 * ftrace_test_record, check if the record has been enabled or not
 * @rec: the record to test
 * @enable: set to 1 to check if enabled, 0 if it is disabled
 *
 * The arch code may need to test if a record is already set to
 * tracing to determine how to modify the function code that it
 * represents.
 */
int ftrace_test_record(struct dyn_ftrace *rec, int enable)
{
	return ftrace_check_record(rec, enable, 0);
}

2192 2193 2194 2195
static struct ftrace_ops *
ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
{
	struct ftrace_ops *op;
2196
	unsigned long ip = rec->ip;
2197 2198 2199 2200 2201 2202

	do_for_each_ftrace_op(op, ftrace_ops_list) {

		if (!op->trampoline)
			continue;

2203
		if (hash_contains_ip(ip, op->func_hash))
2204 2205 2206 2207 2208 2209
			return op;
	} while_for_each_ftrace_op(op);

	return NULL;
}

2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227
static struct ftrace_ops *
ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
			   struct ftrace_ops *op)
{
	unsigned long ip = rec->ip;

	while_for_each_ftrace_op(op) {

		if (!op->trampoline)
			continue;

		if (hash_contains_ip(ip, op->func_hash))
			return op;
	} 

	return NULL;
}

2228 2229 2230 2231
static struct ftrace_ops *
ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
{
	struct ftrace_ops *op;
2232
	unsigned long ip = rec->ip;
2233

2234 2235 2236 2237 2238 2239 2240 2241
	/*
	 * Need to check removed ops first.
	 * If they are being removed, and this rec has a tramp,
	 * and this rec is in the ops list, then it would be the
	 * one with the tramp.
	 */
	if (removed_ops) {
		if (hash_contains_ip(ip, &removed_ops->old_hash))
2242 2243 2244
			return removed_ops;
	}

2245 2246 2247 2248 2249 2250 2251
	/*
	 * Need to find the current trampoline for a rec.
	 * Now, a trampoline is only attached to a rec if there
	 * was a single 'ops' attached to it. But this can be called
	 * when we are adding another op to the rec or removing the
	 * current one. Thus, if the op is being added, we can
	 * ignore it because it hasn't attached itself to the rec
2252 2253 2254 2255 2256 2257 2258 2259 2260 2261
	 * yet.
	 *
	 * If an ops is being modified (hooking to different functions)
	 * then we don't care about the new functions that are being
	 * added, just the old ones (that are probably being removed).
	 *
	 * If we are adding an ops to a function that already is using
	 * a trampoline, it needs to be removed (trampolines are only
	 * for single ops connected), then an ops that is not being
	 * modified also needs to be checked.
2262
	 */
2263
	do_for_each_ftrace_op(op, ftrace_ops_list) {
2264 2265 2266 2267 2268 2269 2270 2271 2272

		if (!op->trampoline)
			continue;

		/*
		 * If the ops is being added, it hasn't gotten to
		 * the point to be removed from this tree yet.
		 */
		if (op->flags & FTRACE_OPS_FL_ADDING)
2273 2274
			continue;

2275

2276
		/*
2277 2278 2279
		 * If the ops is being modified and is in the old
		 * hash, then it is probably being removed from this
		 * function.
2280 2281 2282
		 */
		if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
		    hash_contains_ip(ip, &op->old_hash))
2283
			return op;
2284 2285 2286 2287 2288 2289 2290 2291
		/*
		 * If the ops is not being added or modified, and it's
		 * in its normal filter hash, then this must be the one
		 * we want!
		 */
		if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
		    hash_contains_ip(ip, op->func_hash))
			return op;
2292 2293 2294 2295 2296 2297 2298 2299 2300 2301

	} while_for_each_ftrace_op(op);

	return NULL;
}

static struct ftrace_ops *
ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
{
	struct ftrace_ops *op;
2302
	unsigned long ip = rec->ip;
2303 2304 2305

	do_for_each_ftrace_op(op, ftrace_ops_list) {
		/* pass rec in as regs to have non-NULL val */
2306
		if (hash_contains_ip(ip, op->func_hash))
2307 2308 2309 2310 2311 2312
			return op;
	} while_for_each_ftrace_op(op);

	return NULL;
}

2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324
/**
 * ftrace_get_addr_new - Get the call address to set to
 * @rec:  The ftrace record descriptor
 *
 * If the record has the FTRACE_FL_REGS set, that means that it
 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
 * is not not set, then it wants to convert to the normal callback.
 *
 * Returns the address of the trampoline to set to
 */
unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
{
2325 2326 2327 2328 2329 2330
	struct ftrace_ops *ops;

	/* Trampolines take precedence over regs */
	if (rec->flags & FTRACE_FL_TRAMP) {
		ops = ftrace_find_tramp_ops_new(rec);
		if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2331 2332
			pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
				(void *)rec->ip, (void *)rec->ip, rec->flags);
2333 2334 2335 2336 2337 2338
			/* Ftrace is shutting down, return anything */
			return (unsigned long)FTRACE_ADDR;
		}
		return ops->trampoline;
	}

2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356
	if (rec->flags & FTRACE_FL_REGS)
		return (unsigned long)FTRACE_REGS_ADDR;
	else
		return (unsigned long)FTRACE_ADDR;
}

/**
 * ftrace_get_addr_curr - Get the call address that is already there
 * @rec:  The ftrace record descriptor
 *
 * The FTRACE_FL_REGS_EN is set when the record already points to
 * a function that saves all the regs. Basically the '_EN' version
 * represents the current state of the function.
 *
 * Returns the address of the trampoline that is currently being called
 */
unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
{
2357 2358 2359 2360 2361 2362
	struct ftrace_ops *ops;

	/* Trampolines take precedence over regs */
	if (rec->flags & FTRACE_FL_TRAMP_EN) {
		ops = ftrace_find_tramp_ops_curr(rec);
		if (FTRACE_WARN_ON(!ops)) {
2363 2364
			pr_warn("Bad trampoline accounting at: %p (%pS)\n",
				(void *)rec->ip, (void *)rec->ip);
2365 2366 2367 2368 2369 2370
			/* Ftrace is shutting down, return anything */
			return (unsigned long)FTRACE_ADDR;
		}
		return ops->trampoline;
	}

2371 2372 2373 2374 2375 2376
	if (rec->flags & FTRACE_FL_REGS_EN)
		return (unsigned long)FTRACE_REGS_ADDR;
	else
		return (unsigned long)FTRACE_ADDR;
}

2377 2378 2379
static int
__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
{
2380
	unsigned long ftrace_old_addr;
2381 2382 2383
	unsigned long ftrace_addr;
	int ret;

2384
	ftrace_addr = ftrace_get_addr_new(rec);
2385

2386 2387 2388 2389
	/* This needs to be done before we call ftrace_update_record */
	ftrace_old_addr = ftrace_get_addr_curr(rec);

	ret = ftrace_update_record(rec, enable);
2390

2391 2392
	ftrace_bug_type = FTRACE_BUG_UNKNOWN;

2393 2394 2395 2396 2397
	switch (ret) {
	case FTRACE_UPDATE_IGNORE:
		return 0;

	case FTRACE_UPDATE_MAKE_CALL:
2398
		ftrace_bug_type = FTRACE_BUG_CALL;
2399
		return ftrace_make_call(rec, ftrace_addr);
2400 2401

	case FTRACE_UPDATE_MAKE_NOP:
2402
		ftrace_bug_type = FTRACE_BUG_NOP;
2403
		return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2404 2405

	case FTRACE_UPDATE_MODIFY_CALL:
2406
		ftrace_bug_type = FTRACE_BUG_UPDATE;
2407
		return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2408 2409
	}

2410
	return -1; /* unknow ftrace bug */
2411 2412
}

2413
void __weak ftrace_replace_code(int enable)
2414 2415 2416
{
	struct dyn_ftrace *rec;
	struct ftrace_page *pg;
S
Steven Rostedt 已提交
2417
	int failed;
2418

2419 2420 2421
	if (unlikely(ftrace_disabled))
		return;

2422
	do_for_each_ftrace_rec(pg, rec) {
2423 2424 2425 2426

		if (rec->flags & FTRACE_FL_DISABLED)
			continue;

2427
		failed = __ftrace_replace_code(rec, enable);
2428
		if (failed) {
2429
			ftrace_bug(failed, rec);
2430 2431
			/* Stop processing */
			return;
2432
		}
2433
	} while_for_each_ftrace_rec();
2434 2435
}

2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507
struct ftrace_rec_iter {
	struct ftrace_page	*pg;
	int			index;
};

/**
 * ftrace_rec_iter_start, start up iterating over traced functions
 *
 * Returns an iterator handle that is used to iterate over all
 * the records that represent address locations where functions
 * are traced.
 *
 * May return NULL if no records are available.
 */
struct ftrace_rec_iter *ftrace_rec_iter_start(void)
{
	/*
	 * We only use a single iterator.
	 * Protected by the ftrace_lock mutex.
	 */
	static struct ftrace_rec_iter ftrace_rec_iter;
	struct ftrace_rec_iter *iter = &ftrace_rec_iter;

	iter->pg = ftrace_pages_start;
	iter->index = 0;

	/* Could have empty pages */
	while (iter->pg && !iter->pg->index)
		iter->pg = iter->pg->next;

	if (!iter->pg)
		return NULL;

	return iter;
}

/**
 * ftrace_rec_iter_next, get the next record to process.
 * @iter: The handle to the iterator.
 *
 * Returns the next iterator after the given iterator @iter.
 */
struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
{
	iter->index++;

	if (iter->index >= iter->pg->index) {
		iter->pg = iter->pg->next;
		iter->index = 0;

		/* Could have empty pages */
		while (iter->pg && !iter->pg->index)
			iter->pg = iter->pg->next;
	}

	if (!iter->pg)
		return NULL;

	return iter;
}

/**
 * ftrace_rec_iter_record, get the record at the iterator location
 * @iter: The current iterator location
 *
 * Returns the record that the current @iter is at.
 */
struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
{
	return &iter->pg->records[iter->index];
}

2508
static int
2509
ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
2510
{
2511
	int ret;
2512

2513 2514 2515
	if (unlikely(ftrace_disabled))
		return 0;

2516
	ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
2517
	if (ret) {
2518
		ftrace_bug_type = FTRACE_BUG_INIT;
2519
		ftrace_bug(ret, rec);
2520
		return 0;
2521
	}
2522
	return 1;
2523 2524
}

2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542
/*
 * archs can override this function if they must do something
 * before the modifying code is performed.
 */
int __weak ftrace_arch_code_modify_prepare(void)
{
	return 0;
}

/*
 * archs can override this function if they must do something
 * after the modifying code is performed.
 */
int __weak ftrace_arch_code_modify_post_process(void)
{
	return 0;
}

2543
void ftrace_modify_all_code(int command)
2544
{
2545
	int update = command & FTRACE_UPDATE_TRACE_FUNC;
2546
	int err = 0;
2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557

	/*
	 * If the ftrace_caller calls a ftrace_ops func directly,
	 * we need to make sure that it only traces functions it
	 * expects to trace. When doing the switch of functions,
	 * we need to update to the ftrace_ops_list_func first
	 * before the transition between old and new calls are set,
	 * as the ftrace_ops_list_func will check the ops hashes
	 * to make sure the ops are having the right functions
	 * traced.
	 */
2558 2559 2560 2561 2562
	if (update) {
		err = ftrace_update_ftrace_func(ftrace_ops_list_func);
		if (FTRACE_WARN_ON(err))
			return;
	}
2563

2564
	if (command & FTRACE_UPDATE_CALLS)
2565
		ftrace_replace_code(1);
2566
	else if (command & FTRACE_DISABLE_CALLS)
2567 2568
		ftrace_replace_code(0);

2569 2570 2571 2572 2573 2574
	if (update && ftrace_trace_function != ftrace_ops_list_func) {
		function_trace_op = set_function_trace_op;
		smp_wmb();
		/* If irqs are disabled, we are in stop machine */
		if (!irqs_disabled())
			smp_call_function(ftrace_sync_ipi, NULL, 1);
2575 2576 2577
		err = ftrace_update_ftrace_func(ftrace_trace_function);
		if (FTRACE_WARN_ON(err))
			return;
2578
	}
2579

2580
	if (command & FTRACE_START_FUNC_RET)
2581
		err = ftrace_enable_ftrace_graph_caller();
2582
	else if (command & FTRACE_STOP_FUNC_RET)
2583 2584
		err = ftrace_disable_ftrace_graph_caller();
	FTRACE_WARN_ON(err);
2585 2586 2587 2588 2589 2590 2591
}

static int __ftrace_modify_code(void *data)
{
	int *command = data;

	ftrace_modify_all_code(*command);
2592

2593
	return 0;
2594 2595
}

2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619
/**
 * ftrace_run_stop_machine, go back to the stop machine method
 * @command: The command to tell ftrace what to do
 *
 * If an arch needs to fall back to the stop machine method, the
 * it can call this function.
 */
void ftrace_run_stop_machine(int command)
{
	stop_machine(__ftrace_modify_code, &command, NULL);
}

/**
 * arch_ftrace_update_code, modify the code to trace or not trace
 * @command: The command that needs to be done
 *
 * Archs can override this function if it does not need to
 * run stop_machine() to modify code.
 */
void __weak arch_ftrace_update_code(int command)
{
	ftrace_run_stop_machine(command);
}

I
Ingo Molnar 已提交
2620
static void ftrace_run_update_code(int command)
2621
{
2622 2623 2624 2625 2626 2627 2628
	int ret;

	ret = ftrace_arch_code_modify_prepare();
	FTRACE_WARN_ON(ret);
	if (ret)
		return;

2629 2630 2631 2632 2633 2634 2635 2636
	/*
	 * By default we use stop_machine() to modify the code.
	 * But archs can do what ever they want as long as it
	 * is safe. The stop_machine() is the safest, but also
	 * produces the most overhead.
	 */
	arch_ftrace_update_code(command);

2637 2638
	ret = ftrace_arch_code_modify_post_process();
	FTRACE_WARN_ON(ret);
2639 2640
}

2641
static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2642
				   struct ftrace_ops_hash *old_hash)
2643 2644
{
	ops->flags |= FTRACE_OPS_FL_MODIFYING;
2645 2646
	ops->old_hash.filter_hash = old_hash->filter_hash;
	ops->old_hash.notrace_hash = old_hash->notrace_hash;
2647
	ftrace_run_update_code(command);
2648
	ops->old_hash.filter_hash = NULL;
2649
	ops->old_hash.notrace_hash = NULL;
2650 2651 2652
	ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
}

2653
static ftrace_func_t saved_ftrace_func;
2654
static int ftrace_start_up;
2655

2656 2657 2658 2659
void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
{
}

2660
static void per_cpu_ops_free(struct ftrace_ops *ops)
2661 2662 2663 2664
{
	free_percpu(ops->disabled);
}

2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676
static void ftrace_startup_enable(int command)
{
	if (saved_ftrace_func != ftrace_trace_function) {
		saved_ftrace_func = ftrace_trace_function;
		command |= FTRACE_UPDATE_TRACE_FUNC;
	}

	if (!command || !ftrace_enabled)
		return;

	ftrace_run_update_code(command);
}
2677

2678 2679 2680 2681 2682 2683 2684
static void ftrace_startup_all(int command)
{
	update_all_ops = true;
	ftrace_startup_enable(command);
	update_all_ops = false;
}

2685
static int ftrace_startup(struct ftrace_ops *ops, int command)
2686
{
2687
	int ret;
2688

2689
	if (unlikely(ftrace_disabled))
2690
		return -ENODEV;
2691

2692 2693 2694 2695
	ret = __register_ftrace_function(ops);
	if (ret)
		return ret;

2696
	ftrace_start_up++;
2697

2698 2699 2700 2701 2702 2703 2704 2705 2706
	/*
	 * Note that ftrace probes uses this to start up
	 * and modify functions it will probe. But we still
	 * set the ADDING flag for modification, as probes
	 * do not have trampolines. If they add them in the
	 * future, then the probes will need to distinguish
	 * between adding and updating probes.
	 */
	ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
2707

2708 2709 2710 2711 2712 2713 2714 2715 2716
	ret = ftrace_hash_ipmodify_enable(ops);
	if (ret < 0) {
		/* Rollback registration process */
		__unregister_ftrace_function(ops);
		ftrace_start_up--;
		ops->flags &= ~FTRACE_OPS_FL_ENABLED;
		return ret;
	}

2717 2718
	if (ftrace_hash_rec_enable(ops, 1))
		command |= FTRACE_UPDATE_CALLS;
2719

2720
	ftrace_startup_enable(command);
2721

2722 2723
	ops->flags &= ~FTRACE_OPS_FL_ADDING;

2724
	return 0;
2725 2726
}

2727
static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2728
{
2729
	int ret;
2730

2731
	if (unlikely(ftrace_disabled))
2732 2733 2734 2735 2736
		return -ENODEV;

	ret = __unregister_ftrace_function(ops);
	if (ret)
		return ret;
2737

2738
	ftrace_start_up--;
2739 2740 2741 2742 2743 2744 2745
	/*
	 * Just warn in case of unbalance, no need to kill ftrace, it's not
	 * critical but the ftrace_call callers may be never nopped again after
	 * further ftrace uses.
	 */
	WARN_ON_ONCE(ftrace_start_up < 0);

2746 2747
	/* Disabling ipmodify never fails */
	ftrace_hash_ipmodify_disable(ops);
2748

2749 2750
	if (ftrace_hash_rec_disable(ops, 1))
		command |= FTRACE_UPDATE_CALLS;
2751

2752
	ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2753

2754 2755 2756 2757
	if (saved_ftrace_func != ftrace_trace_function) {
		saved_ftrace_func = ftrace_trace_function;
		command |= FTRACE_UPDATE_TRACE_FUNC;
	}
2758

2759 2760
	if (!command || !ftrace_enabled) {
		/*
2761
		 * If these are per_cpu ops, they still need their
2762 2763 2764 2765
		 * per_cpu field freed. Since, function tracing is
		 * not currently active, we can just free them
		 * without synchronizing all CPUs.
		 */
2766 2767
		if (ops->flags & FTRACE_OPS_FL_PER_CPU)
			per_cpu_ops_free(ops);
2768
		return 0;
2769
	}
2770

2771 2772 2773 2774
	/*
	 * If the ops uses a trampoline, then it needs to be
	 * tested first on update.
	 */
2775
	ops->flags |= FTRACE_OPS_FL_REMOVING;
2776 2777
	removed_ops = ops;

2778 2779 2780 2781
	/* The trampoline logic checks the old hashes */
	ops->old_hash.filter_hash = ops->func_hash->filter_hash;
	ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;

2782
	ftrace_run_update_code(command);
2783

2784 2785 2786 2787 2788 2789 2790 2791 2792
	/*
	 * If there's no more ops registered with ftrace, run a
	 * sanity check to make sure all rec flags are cleared.
	 */
	if (ftrace_ops_list == &ftrace_list_end) {
		struct ftrace_page *pg;
		struct dyn_ftrace *rec;

		do_for_each_ftrace_rec(pg, rec) {
2793
			if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
2794 2795 2796 2797 2798
				pr_warn("  %pS flags:%lx\n",
					(void *)rec->ip, rec->flags);
		} while_for_each_ftrace_rec();
	}

2799 2800 2801 2802
	ops->old_hash.filter_hash = NULL;
	ops->old_hash.notrace_hash = NULL;

	removed_ops = NULL;
2803
	ops->flags &= ~FTRACE_OPS_FL_REMOVING;
2804

2805 2806 2807
	/*
	 * Dynamic ops may be freed, we must make sure that all
	 * callers are done before leaving this function.
2808
	 * The same goes for freeing the per_cpu data of the per_cpu
2809 2810 2811 2812 2813 2814 2815 2816 2817 2818
	 * ops.
	 *
	 * Again, normal synchronize_sched() is not good enough.
	 * We need to do a hard force of sched synchronization.
	 * This is because we use preempt_disable() to do RCU, but
	 * the function tracers can be called where RCU is not watching
	 * (like before user_exit()). We can not rely on the RCU
	 * infrastructure to do the synchronization, thus we must do it
	 * ourselves.
	 */
2819
	if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU)) {
2820 2821
		schedule_on_each_cpu(ftrace_sync);

2822 2823
		arch_ftrace_trampoline_free(ops);

2824 2825
		if (ops->flags & FTRACE_OPS_FL_PER_CPU)
			per_cpu_ops_free(ops);
2826 2827
	}

2828
	return 0;
2829 2830
}

I
Ingo Molnar 已提交
2831
static void ftrace_startup_sysctl(void)
2832
{
2833 2834
	int command;

2835 2836 2837
	if (unlikely(ftrace_disabled))
		return;

2838 2839
	/* Force update next time */
	saved_ftrace_func = NULL;
2840
	/* ftrace_start_up is true if we want ftrace running */
2841 2842 2843 2844
	if (ftrace_start_up) {
		command = FTRACE_UPDATE_CALLS;
		if (ftrace_graph_active)
			command |= FTRACE_START_FUNC_RET;
2845
		ftrace_startup_enable(command);
2846
	}
2847 2848
}

I
Ingo Molnar 已提交
2849
static void ftrace_shutdown_sysctl(void)
2850
{
2851 2852
	int command;

2853 2854 2855
	if (unlikely(ftrace_disabled))
		return;

2856
	/* ftrace_start_up is true if ftrace is running */
2857 2858 2859 2860 2861 2862
	if (ftrace_start_up) {
		command = FTRACE_DISABLE_CALLS;
		if (ftrace_graph_active)
			command |= FTRACE_STOP_FUNC_RET;
		ftrace_run_update_code(command);
	}
2863 2864
}

2865
static u64		ftrace_update_time;
2866 2867
unsigned long		ftrace_update_tot_cnt;

2868
static inline int ops_traces_mod(struct ftrace_ops *ops)
2869
{
2870 2871 2872 2873
	/*
	 * Filter_hash being empty will default to trace module.
	 * But notrace hash requires a test of individual module functions.
	 */
2874 2875
	return ftrace_hash_empty(ops->func_hash->filter_hash) &&
		ftrace_hash_empty(ops->func_hash->notrace_hash);
2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891
}

/*
 * Check if the current ops references the record.
 *
 * If the ops traces all functions, then it was already accounted for.
 * If the ops does not trace the current record function, skip it.
 * If the ops ignores the function via notrace filter, skip it.
 */
static inline bool
ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
{
	/* If ops isn't enabled, ignore it */
	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
		return 0;

2892
	/* If ops traces all then it includes this function */
2893
	if (ops_traces_mod(ops))
2894
		return 1;
2895 2896

	/* The function must be in the filter */
2897
	if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
2898
	    !__ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
2899
		return 0;
2900

2901
	/* If in notrace hash, we ignore it too */
2902
	if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
2903 2904 2905 2906 2907
		return 0;

	return 1;
}

2908
static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
2909
{
2910
	struct ftrace_page *pg;
2911
	struct dyn_ftrace *p;
2912
	u64 start, stop;
2913
	unsigned long update_cnt = 0;
2914
	unsigned long rec_flags = 0;
2915
	int i;
2916

2917 2918
	start = ftrace_now(raw_smp_processor_id());

2919
	/*
2920 2921 2922 2923 2924 2925 2926 2927 2928
	 * When a module is loaded, this function is called to convert
	 * the calls to mcount in its text to nops, and also to create
	 * an entry in the ftrace data. Now, if ftrace is activated
	 * after this call, but before the module sets its text to
	 * read-only, the modification of enabling ftrace can fail if
	 * the read-only is done while ftrace is converting the calls.
	 * To prevent this, the module's records are set as disabled
	 * and will be enabled after the call to set the module's text
	 * to read-only.
2929
	 */
2930 2931
	if (mod)
		rec_flags |= FTRACE_FL_DISABLED;
2932

2933
	for (pg = new_pgs; pg; pg = pg->next) {
2934

2935
		for (i = 0; i < pg->index; i++) {
2936

2937 2938 2939
			/* If something went wrong, bail without enabling anything */
			if (unlikely(ftrace_disabled))
				return -1;
2940

2941
			p = &pg->records[i];
2942
			p->flags = rec_flags;
2943

2944 2945 2946 2947 2948 2949
			/*
			 * Do the initial record conversion from mcount jump
			 * to the NOP instructions.
			 */
			if (!ftrace_code_disable(mod, p))
				break;
2950

2951
			update_cnt++;
2952
		}
2953 2954
	}

I
Ingo Molnar 已提交
2955
	stop = ftrace_now(raw_smp_processor_id());
2956
	ftrace_update_time = stop - start;
2957
	ftrace_update_tot_cnt += update_cnt;
2958

2959 2960 2961
	return 0;
}

2962
static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2963
{
2964
	int order;
2965 2966
	int cnt;

2967 2968 2969 2970
	if (WARN_ON(!count))
		return -EINVAL;

	order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2971 2972

	/*
2973 2974
	 * We want to fill as much as possible. No more than a page
	 * may be empty.
2975
	 */
2976 2977
	while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
		order--;
2978

2979 2980
 again:
	pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2981

2982 2983 2984 2985 2986 2987 2988
	if (!pg->records) {
		/* if we can't allocate this size, try something smaller */
		if (!order)
			return -ENOMEM;
		order >>= 1;
		goto again;
	}
2989

2990 2991
	cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
	pg->size = cnt;
2992

2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025
	if (cnt > count)
		cnt = count;

	return cnt;
}

static struct ftrace_page *
ftrace_allocate_pages(unsigned long num_to_init)
{
	struct ftrace_page *start_pg;
	struct ftrace_page *pg;
	int order;
	int cnt;

	if (!num_to_init)
		return 0;

	start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
	if (!pg)
		return NULL;

	/*
	 * Try to allocate as much as possible in one continues
	 * location that fills in all of the space. We want to
	 * waste as little space as possible.
	 */
	for (;;) {
		cnt = ftrace_allocate_records(pg, num_to_init);
		if (cnt < 0)
			goto free_pages;

		num_to_init -= cnt;
		if (!num_to_init)
3026 3027
			break;

3028 3029 3030 3031
		pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
		if (!pg->next)
			goto free_pages;

3032 3033 3034
		pg = pg->next;
	}

3035 3036 3037
	return start_pg;

 free_pages:
3038 3039
	pg = start_pg;
	while (pg) {
3040 3041 3042 3043 3044 3045 3046 3047 3048 3049
		order = get_count_order(pg->size / ENTRIES_PER_PAGE);
		free_pages((unsigned long)pg->records, order);
		start_pg = pg->next;
		kfree(pg);
		pg = start_pg;
	}
	pr_info("ftrace: FAILED to allocate memory for functions\n");
	return NULL;
}

3050 3051 3052
#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */

struct ftrace_iterator {
3053
	loff_t				pos;
3054 3055 3056 3057 3058
	loff_t				func_pos;
	struct ftrace_page		*pg;
	struct dyn_ftrace		*func;
	struct ftrace_func_probe	*probe;
	struct trace_parser		parser;
3059
	struct ftrace_hash		*hash;
3060
	struct ftrace_ops		*ops;
3061 3062 3063
	int				hidx;
	int				idx;
	unsigned			flags;
3064 3065
};

3066
static void *
3067
t_hash_next(struct seq_file *m, loff_t *pos)
3068 3069
{
	struct ftrace_iterator *iter = m->private;
3070
	struct hlist_node *hnd = NULL;
3071 3072 3073
	struct hlist_head *hhd;

	(*pos)++;
3074
	iter->pos = *pos;
3075

3076 3077
	if (iter->probe)
		hnd = &iter->probe->node;
3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099
 retry:
	if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
		return NULL;

	hhd = &ftrace_func_hash[iter->hidx];

	if (hlist_empty(hhd)) {
		iter->hidx++;
		hnd = NULL;
		goto retry;
	}

	if (!hnd)
		hnd = hhd->first;
	else {
		hnd = hnd->next;
		if (!hnd) {
			iter->hidx++;
			goto retry;
		}
	}

3100 3101 3102 3103 3104 3105
	if (WARN_ON_ONCE(!hnd))
		return NULL;

	iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);

	return iter;
3106 3107 3108 3109 3110 3111
}

static void *t_hash_start(struct seq_file *m, loff_t *pos)
{
	struct ftrace_iterator *iter = m->private;
	void *p = NULL;
L
Li Zefan 已提交
3112 3113
	loff_t l;

3114 3115 3116
	if (!(iter->flags & FTRACE_ITER_DO_HASH))
		return NULL;

3117 3118
	if (iter->func_pos > *pos)
		return NULL;
3119

L
Li Zefan 已提交
3120
	iter->hidx = 0;
3121
	for (l = 0; l <= (*pos - iter->func_pos); ) {
3122
		p = t_hash_next(m, &l);
L
Li Zefan 已提交
3123 3124 3125
		if (!p)
			break;
	}
3126 3127 3128
	if (!p)
		return NULL;

3129 3130 3131
	/* Only set this if we have an item */
	iter->flags |= FTRACE_ITER_HASH;

3132
	return iter;
3133 3134
}

3135 3136
static int
t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
3137
{
S
Steven Rostedt 已提交
3138
	struct ftrace_func_probe *rec;
3139

3140 3141 3142
	rec = iter->probe;
	if (WARN_ON_ONCE(!rec))
		return -EIO;
3143

3144 3145 3146
	if (rec->ops->print)
		return rec->ops->print(m, rec->ip, rec->ops, rec->data);

3147
	seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
3148 3149 3150 3151 3152 3153 3154 3155

	if (rec->data)
		seq_printf(m, ":%p", rec->data);
	seq_putc(m, '\n');

	return 0;
}

I
Ingo Molnar 已提交
3156
static void *
3157 3158 3159
t_next(struct seq_file *m, void *v, loff_t *pos)
{
	struct ftrace_iterator *iter = m->private;
3160
	struct ftrace_ops *ops = iter->ops;
3161 3162
	struct dyn_ftrace *rec = NULL;

3163 3164 3165
	if (unlikely(ftrace_disabled))
		return NULL;

3166
	if (iter->flags & FTRACE_ITER_HASH)
3167
		return t_hash_next(m, pos);
3168

3169
	(*pos)++;
3170
	iter->pos = iter->func_pos = *pos;
3171

3172
	if (iter->flags & FTRACE_ITER_PRINTALL)
3173
		return t_hash_start(m, pos);
3174

3175 3176 3177 3178 3179 3180 3181 3182 3183
 retry:
	if (iter->idx >= iter->pg->index) {
		if (iter->pg->next) {
			iter->pg = iter->pg->next;
			iter->idx = 0;
			goto retry;
		}
	} else {
		rec = &iter->pg->records[iter->idx++];
3184
		if (((iter->flags & FTRACE_ITER_FILTER) &&
3185
		     !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) ||
S
Steven Rostedt 已提交
3186

3187
		    ((iter->flags & FTRACE_ITER_NOTRACE) &&
3188
		     !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) ||
3189 3190

		    ((iter->flags & FTRACE_ITER_ENABLED) &&
3191
		     !(rec->flags & FTRACE_FL_ENABLED))) {
3192

3193 3194 3195 3196 3197
			rec = NULL;
			goto retry;
		}
	}

3198
	if (!rec)
3199
		return t_hash_start(m, pos);
3200 3201 3202 3203

	iter->func = rec;

	return iter;
3204 3205
}

3206 3207 3208 3209
static void reset_iter_read(struct ftrace_iterator *iter)
{
	iter->pos = 0;
	iter->func_pos = 0;
3210
	iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
3211 3212 3213 3214 3215
}

static void *t_start(struct seq_file *m, loff_t *pos)
{
	struct ftrace_iterator *iter = m->private;
3216
	struct ftrace_ops *ops = iter->ops;
3217
	void *p = NULL;
3218
	loff_t l;
3219

3220
	mutex_lock(&ftrace_lock);
3221 3222 3223 3224

	if (unlikely(ftrace_disabled))
		return NULL;

3225 3226 3227 3228 3229 3230
	/*
	 * If an lseek was done, then reset and start from beginning.
	 */
	if (*pos < iter->pos)
		reset_iter_read(iter);

3231 3232 3233 3234 3235
	/*
	 * For set_ftrace_filter reading, if we have the filter
	 * off, we can short cut and just print out that all
	 * functions are enabled.
	 */
3236
	if ((iter->flags & FTRACE_ITER_FILTER &&
3237
	     ftrace_hash_empty(ops->func_hash->filter_hash)) ||
3238
	    (iter->flags & FTRACE_ITER_NOTRACE &&
3239
	     ftrace_hash_empty(ops->func_hash->notrace_hash))) {
3240
		if (*pos > 0)
3241
			return t_hash_start(m, pos);
3242
		iter->flags |= FTRACE_ITER_PRINTALL;
3243 3244
		/* reset in case of seek/pread */
		iter->flags &= ~FTRACE_ITER_HASH;
3245 3246 3247
		return iter;
	}

3248 3249 3250
	if (iter->flags & FTRACE_ITER_HASH)
		return t_hash_start(m, pos);

3251 3252 3253 3254 3255
	/*
	 * Unfortunately, we need to restart at ftrace_pages_start
	 * every time we let go of the ftrace_mutex. This is because
	 * those pointers can change without the lock.
	 */
3256 3257 3258 3259 3260 3261
	iter->pg = ftrace_pages_start;
	iter->idx = 0;
	for (l = 0; l <= *pos; ) {
		p = t_next(m, p, &l);
		if (!p)
			break;
3262
	}
3263

3264 3265
	if (!p)
		return t_hash_start(m, pos);
3266 3267

	return iter;
3268 3269 3270 3271
}

static void t_stop(struct seq_file *m, void *p)
{
3272
	mutex_unlock(&ftrace_lock);
3273 3274
}

3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290
void * __weak
arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
{
	return NULL;
}

static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
				struct dyn_ftrace *rec)
{
	void *ptr;

	ptr = arch_ftrace_trampoline_func(ops, rec);
	if (ptr)
		seq_printf(m, " ->%pS", ptr);
}

3291 3292
static int t_show(struct seq_file *m, void *v)
{
3293
	struct ftrace_iterator *iter = m->private;
3294
	struct dyn_ftrace *rec;
3295

3296
	if (iter->flags & FTRACE_ITER_HASH)
3297
		return t_hash_show(m, iter);
3298

3299
	if (iter->flags & FTRACE_ITER_PRINTALL) {
3300
		if (iter->flags & FTRACE_ITER_NOTRACE)
3301
			seq_puts(m, "#### no functions disabled ####\n");
3302
		else
3303
			seq_puts(m, "#### all functions enabled ####\n");
3304 3305 3306
		return 0;
	}

3307 3308
	rec = iter->func;

3309 3310 3311
	if (!rec)
		return 0;

3312
	seq_printf(m, "%ps", (void *)rec->ip);
3313
	if (iter->flags & FTRACE_ITER_ENABLED) {
3314
		struct ftrace_ops *ops;
3315

3316
		seq_printf(m, " (%ld)%s%s",
3317
			   ftrace_rec_count(rec),
3318 3319
			   rec->flags & FTRACE_FL_REGS ? " R" : "  ",
			   rec->flags & FTRACE_FL_IPMODIFY ? " I" : "  ");
3320
		if (rec->flags & FTRACE_FL_TRAMP_EN) {
3321
			ops = ftrace_find_tramp_ops_any(rec);
3322 3323 3324 3325 3326
			if (ops) {
				do {
					seq_printf(m, "\ttramp: %pS (%pS)",
						   (void *)ops->trampoline,
						   (void *)ops->func);
3327
					add_trampoline_func(m, ops, rec);
3328 3329 3330
					ops = ftrace_find_tramp_ops_next(rec, ops);
				} while (ops);
			} else
3331
				seq_puts(m, "\ttramp: ERROR!");
3332 3333
		} else {
			add_trampoline_func(m, NULL, rec);
3334 3335 3336
		}
	}	

3337
	seq_putc(m, '\n');
3338 3339 3340 3341

	return 0;
}

J
James Morris 已提交
3342
static const struct seq_operations show_ftrace_seq_ops = {
3343 3344 3345 3346 3347 3348
	.start = t_start,
	.next = t_next,
	.stop = t_stop,
	.show = t_show,
};

I
Ingo Molnar 已提交
3349
static int
3350 3351 3352 3353
ftrace_avail_open(struct inode *inode, struct file *file)
{
	struct ftrace_iterator *iter;

3354 3355 3356
	if (unlikely(ftrace_disabled))
		return -ENODEV;

3357 3358 3359 3360
	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
	if (iter) {
		iter->pg = ftrace_pages_start;
		iter->ops = &global_ops;
I
Ingo Molnar 已提交
3361
	}
3362

3363
	return iter ? 0 : -ENOMEM;
3364 3365
}

3366 3367 3368 3369 3370
static int
ftrace_enabled_open(struct inode *inode, struct file *file)
{
	struct ftrace_iterator *iter;

3371 3372 3373 3374 3375
	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
	if (iter) {
		iter->pg = ftrace_pages_start;
		iter->flags = FTRACE_ITER_ENABLED;
		iter->ops = &global_ops;
3376 3377
	}

3378
	return iter ? 0 : -ENOMEM;
3379 3380
}

3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393
/**
 * ftrace_regex_open - initialize function tracer filter files
 * @ops: The ftrace_ops that hold the hash filters
 * @flag: The type of filter to process
 * @inode: The inode, usually passed in to your open routine
 * @file: The file, usually passed in to your open routine
 *
 * ftrace_regex_open() initializes the filter files for the
 * @ops. Depending on @flag it may process the filter hash or
 * the notrace hash of @ops. With this called from the open
 * routine, you can use ftrace_filter_write() for the write
 * routine if @flag has FTRACE_ITER_FILTER set, or
 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
3394
 * tracing_lseek() should be used as the lseek routine, and
3395 3396 3397
 * release must call ftrace_regex_release().
 */
int
3398
ftrace_regex_open(struct ftrace_ops *ops, int flag,
3399
		  struct inode *inode, struct file *file)
3400 3401
{
	struct ftrace_iterator *iter;
3402
	struct ftrace_hash *hash;
3403 3404
	int ret = 0;

3405 3406
	ftrace_ops_init(ops);

3407 3408 3409
	if (unlikely(ftrace_disabled))
		return -ENODEV;

3410 3411 3412 3413
	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
	if (!iter)
		return -ENOMEM;

3414 3415 3416 3417 3418
	if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
		kfree(iter);
		return -ENOMEM;
	}

3419 3420 3421
	iter->ops = ops;
	iter->flags = flag;

3422
	mutex_lock(&ops->func_hash->regex_lock);
3423

3424
	if (flag & FTRACE_ITER_NOTRACE)
3425
		hash = ops->func_hash->notrace_hash;
3426
	else
3427
		hash = ops->func_hash->filter_hash;
3428

3429
	if (file->f_mode & FMODE_WRITE) {
3430 3431 3432 3433 3434 3435 3436
		const int size_bits = FTRACE_HASH_DEFAULT_BITS;

		if (file->f_flags & O_TRUNC)
			iter->hash = alloc_ftrace_hash(size_bits);
		else
			iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);

3437 3438 3439
		if (!iter->hash) {
			trace_parser_put(&iter->parser);
			kfree(iter);
3440 3441
			ret = -ENOMEM;
			goto out_unlock;
3442 3443
		}
	}
3444

3445 3446 3447 3448 3449 3450 3451
	if (file->f_mode & FMODE_READ) {
		iter->pg = ftrace_pages_start;

		ret = seq_open(file, &show_ftrace_seq_ops);
		if (!ret) {
			struct seq_file *m = file->private_data;
			m->private = iter;
3452
		} else {
3453 3454
			/* Failed */
			free_ftrace_hash(iter->hash);
3455
			trace_parser_put(&iter->parser);
3456
			kfree(iter);
3457
		}
3458 3459
	} else
		file->private_data = iter;
3460 3461

 out_unlock:
3462
	mutex_unlock(&ops->func_hash->regex_lock);
3463 3464 3465 3466

	return ret;
}

3467 3468 3469
static int
ftrace_filter_open(struct inode *inode, struct file *file)
{
3470 3471 3472
	struct ftrace_ops *ops = inode->i_private;

	return ftrace_regex_open(ops,
3473 3474
			FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
			inode, file);
3475 3476 3477 3478 3479
}

static int
ftrace_notrace_open(struct inode *inode, struct file *file)
{
3480 3481 3482
	struct ftrace_ops *ops = inode->i_private;

	return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
3483
				 inode, file);
3484 3485
}

3486 3487 3488 3489 3490 3491 3492
/* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
struct ftrace_glob {
	char *search;
	unsigned len;
	int type;
};

3493 3494 3495 3496 3497 3498 3499 3500 3501 3502
/*
 * If symbols in an architecture don't correspond exactly to the user-visible
 * name of what they represent, it is possible to define this function to
 * perform the necessary adjustments.
*/
char * __weak arch_ftrace_match_adjust(char *str, const char *search)
{
	return str;
}

3503
static int ftrace_match(char *str, struct ftrace_glob *g)
3504 3505
{
	int matched = 0;
3506
	int slen;
3507

3508 3509
	str = arch_ftrace_match_adjust(str, g->search);

3510
	switch (g->type) {
3511
	case MATCH_FULL:
3512
		if (strcmp(str, g->search) == 0)
3513 3514 3515
			matched = 1;
		break;
	case MATCH_FRONT_ONLY:
3516
		if (strncmp(str, g->search, g->len) == 0)
3517 3518 3519
			matched = 1;
		break;
	case MATCH_MIDDLE_ONLY:
3520
		if (strstr(str, g->search))
3521 3522 3523
			matched = 1;
		break;
	case MATCH_END_ONLY:
3524
		slen = strlen(str);
3525 3526
		if (slen >= g->len &&
		    memcmp(str + slen - g->len, g->search, g->len) == 0)
3527 3528
			matched = 1;
		break;
3529 3530 3531 3532
	case MATCH_GLOB:
		if (glob_match(g->search, str))
			matched = 1;
		break;
3533 3534 3535 3536 3537
	}

	return matched;
}

3538
static int
3539
enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
3540
{
3541 3542 3543
	struct ftrace_func_entry *entry;
	int ret = 0;

3544
	entry = ftrace_lookup_ip(hash, rec->ip);
3545
	if (clear_filter) {
3546 3547 3548
		/* Do nothing if it doesn't exist */
		if (!entry)
			return 0;
3549

3550
		free_hash_entry(hash, entry);
3551 3552 3553 3554
	} else {
		/* Do nothing if it exists */
		if (entry)
			return 0;
3555

3556
		ret = add_hash_entry(hash, rec->ip);
3557 3558
	}
	return ret;
3559 3560
}

3561
static int
D
Dmitry Safonov 已提交
3562 3563
ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
		struct ftrace_glob *mod_g, int exclude_mod)
3564 3565
{
	char str[KSYM_SYMBOL_LEN];
3566 3567 3568 3569
	char *modname;

	kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);

D
Dmitry Safonov 已提交
3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589
	if (mod_g) {
		int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;

		/* blank module name to match all modules */
		if (!mod_g->len) {
			/* blank module globbing: modname xor exclude_mod */
			if ((!exclude_mod) != (!modname))
				goto func_match;
			return 0;
		}

		/* not matching the module */
		if (!modname || !mod_matches) {
			if (exclude_mod)
				goto func_match;
			else
				return 0;
		}

		if (mod_matches && exclude_mod)
3590 3591
			return 0;

D
Dmitry Safonov 已提交
3592
func_match:
3593
		/* blank search means to match all funcs in the mod */
3594
		if (!func_g->len)
3595 3596
			return 1;
	}
3597

3598
	return ftrace_match(str, func_g);
3599 3600
}

3601
static int
3602
match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
3603 3604 3605
{
	struct ftrace_page *pg;
	struct dyn_ftrace *rec;
3606
	struct ftrace_glob func_g = { .type = MATCH_FULL };
D
Dmitry Safonov 已提交
3607 3608 3609
	struct ftrace_glob mod_g = { .type = MATCH_FULL };
	struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
	int exclude_mod = 0;
3610
	int found = 0;
3611
	int ret;
3612
	int clear_filter;
3613

D
Dmitry Safonov 已提交
3614
	if (func) {
3615 3616 3617
		func_g.type = filter_parse_regex(func, len, &func_g.search,
						 &clear_filter);
		func_g.len = strlen(func_g.search);
3618
	}
3619

D
Dmitry Safonov 已提交
3620 3621 3622 3623
	if (mod) {
		mod_g.type = filter_parse_regex(mod, strlen(mod),
				&mod_g.search, &exclude_mod);
		mod_g.len = strlen(mod_g.search);
3624
	}
3625

3626
	mutex_lock(&ftrace_lock);
3627

3628 3629
	if (unlikely(ftrace_disabled))
		goto out_unlock;
3630

3631
	do_for_each_ftrace_rec(pg, rec) {
3632 3633 3634 3635

		if (rec->flags & FTRACE_FL_DISABLED)
			continue;

D
Dmitry Safonov 已提交
3636
		if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
3637
			ret = enter_record(hash, rec, clear_filter);
3638 3639 3640 3641
			if (ret < 0) {
				found = ret;
				goto out_unlock;
			}
3642
			found = 1;
3643 3644
		}
	} while_for_each_ftrace_rec();
3645
 out_unlock:
3646
	mutex_unlock(&ftrace_lock);
3647 3648

	return found;
3649 3650
}

3651
static int
3652
ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
3653
{
3654
	return match_records(hash, buff, len, NULL);
3655 3656 3657
}


3658 3659 3660 3661 3662 3663
/*
 * We register the module command as a template to show others how
 * to register the a command as well.
 */

static int
3664
ftrace_mod_callback(struct ftrace_hash *hash,
3665
		    char *func, char *cmd, char *module, int enable)
3666
{
3667
	int ret;
3668 3669 3670 3671 3672 3673 3674 3675

	/*
	 * cmd == 'mod' because we only registered this func
	 * for the 'mod' ftrace_func_command.
	 * But if you register one func with multiple commands,
	 * you can tell which command was used by the cmd
	 * parameter.
	 */
3676
	ret = match_records(hash, func, strlen(func), module);
3677
	if (!ret)
3678
		return -EINVAL;
3679 3680 3681
	if (ret < 0)
		return ret;
	return 0;
3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692
}

static struct ftrace_func_command ftrace_mod_cmd = {
	.name			= "mod",
	.func			= ftrace_mod_callback,
};

static int __init ftrace_mod_cmd_init(void)
{
	return register_ftrace_command(&ftrace_mod_cmd);
}
3693
core_initcall(ftrace_mod_cmd_init);
3694

3695
static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
3696
				      struct ftrace_ops *op, struct pt_regs *pt_regs)
3697
{
S
Steven Rostedt 已提交
3698
	struct ftrace_func_probe *entry;
3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713
	struct hlist_head *hhd;
	unsigned long key;

	key = hash_long(ip, FTRACE_HASH_BITS);

	hhd = &ftrace_func_hash[key];

	if (hlist_empty(hhd))
		return;

	/*
	 * Disable preemption for these calls to prevent a RCU grace
	 * period. This syncs the hash iteration and freeing of items
	 * on the hash. rcu_read_lock is too dangerous here.
	 */
3714
	preempt_disable_notrace();
3715
	hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
3716 3717 3718
		if (entry->ip == ip)
			entry->ops->func(ip, parent_ip, &entry->data);
	}
3719
	preempt_enable_notrace();
3720 3721
}

S
Steven Rostedt 已提交
3722
static struct ftrace_ops trace_probe_ops __read_mostly =
3723
{
3724
	.func		= function_trace_probe_call,
3725
	.flags		= FTRACE_OPS_FL_INITIALIZED,
3726
	INIT_OPS_HASH(trace_probe_ops)
3727 3728
};

S
Steven Rostedt 已提交
3729
static int ftrace_probe_registered;
3730

3731
static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash)
3732
{
3733
	int ret;
3734 3735
	int i;

3736 3737 3738
	if (ftrace_probe_registered) {
		/* still need to update the function call sites */
		if (ftrace_enabled)
3739 3740
			ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
					       old_hash);
3741
		return;
3742
	}
3743 3744 3745 3746 3747 3748 3749 3750 3751 3752

	for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
		struct hlist_head *hhd = &ftrace_func_hash[i];
		if (hhd->first)
			break;
	}
	/* Nothing registered? */
	if (i == FTRACE_FUNC_HASHSIZE)
		return;

3753
	ret = ftrace_startup(&trace_probe_ops, 0);
3754

S
Steven Rostedt 已提交
3755
	ftrace_probe_registered = 1;
3756 3757
}

3758
static bool __disable_ftrace_function_probe(void)
3759 3760 3761
{
	int i;

S
Steven Rostedt 已提交
3762
	if (!ftrace_probe_registered)
3763
		return false;
3764 3765 3766 3767

	for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
		struct hlist_head *hhd = &ftrace_func_hash[i];
		if (hhd->first)
3768
			return false;
3769 3770 3771
	}

	/* no more funcs left */
3772
	ftrace_shutdown(&trace_probe_ops, 0);
3773

S
Steven Rostedt 已提交
3774
	ftrace_probe_registered = 0;
3775
	return true;
3776 3777 3778
}


3779
static void ftrace_free_entry(struct ftrace_func_probe *entry)
3780 3781
{
	if (entry->ops->free)
3782
		entry->ops->free(entry->ops, entry->ip, &entry->data);
3783 3784 3785 3786
	kfree(entry);
}

int
S
Steven Rostedt 已提交
3787
register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3788 3789
			      void *data)
{
3790
	struct ftrace_ops_hash old_hash_ops;
S
Steven Rostedt 已提交
3791
	struct ftrace_func_probe *entry;
3792
	struct ftrace_glob func_g;
3793
	struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3794
	struct ftrace_hash *old_hash = *orig_hash;
3795
	struct ftrace_hash *hash;
3796 3797
	struct ftrace_page *pg;
	struct dyn_ftrace *rec;
3798
	int not;
S
Steven Rostedt 已提交
3799
	unsigned long key;
3800
	int count = 0;
3801
	int ret;
3802

3803 3804 3805
	func_g.type = filter_parse_regex(glob, strlen(glob),
			&func_g.search, &not);
	func_g.len = strlen(func_g.search);
3806

S
Steven Rostedt 已提交
3807
	/* we do not support '!' for function probes */
3808 3809 3810
	if (WARN_ON(not))
		return -EINVAL;

3811
	mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3812

3813 3814 3815 3816
	old_hash_ops.filter_hash = old_hash;
	/* Probes only have filters */
	old_hash_ops.notrace_hash = NULL;

3817
	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
3818 3819
	if (!hash) {
		count = -ENOMEM;
3820
		goto out;
3821 3822 3823 3824
	}

	if (unlikely(ftrace_disabled)) {
		count = -ENODEV;
3825
		goto out;
3826
	}
3827

3828 3829
	mutex_lock(&ftrace_lock);

3830
	do_for_each_ftrace_rec(pg, rec) {
3831

3832 3833 3834
		if (rec->flags & FTRACE_FL_DISABLED)
			continue;

D
Dmitry Safonov 已提交
3835
		if (!ftrace_match_record(rec, &func_g, NULL, 0))
3836 3837 3838 3839
			continue;

		entry = kmalloc(sizeof(*entry), GFP_KERNEL);
		if (!entry) {
S
Steven Rostedt 已提交
3840
			/* If we did not process any, then return error */
3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854
			if (!count)
				count = -ENOMEM;
			goto out_unlock;
		}

		count++;

		entry->data = data;

		/*
		 * The caller might want to do something special
		 * for each function we find. We call the callback
		 * to give the caller an opportunity to do so.
		 */
3855 3856
		if (ops->init) {
			if (ops->init(ops, rec->ip, &entry->data) < 0) {
3857 3858 3859 3860 3861 3862
				/* caller does not like this func */
				kfree(entry);
				continue;
			}
		}

3863 3864 3865 3866 3867 3868 3869
		ret = enter_record(hash, rec, 0);
		if (ret < 0) {
			kfree(entry);
			count = ret;
			goto out_unlock;
		}

3870 3871 3872 3873 3874 3875 3876
		entry->ops = ops;
		entry->ip = rec->ip;

		key = hash_long(entry->ip, FTRACE_HASH_BITS);
		hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);

	} while_for_each_ftrace_rec();
3877 3878

	ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3879

3880
	__enable_ftrace_function_probe(&old_hash_ops);
3881

3882 3883 3884
	if (!ret)
		free_ftrace_hash_rcu(old_hash);
	else
3885 3886
		count = ret;

3887
 out_unlock:
3888 3889
	mutex_unlock(&ftrace_lock);
 out:
3890
	mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
3891
	free_ftrace_hash(hash);
3892 3893 3894 3895 3896

	return count;
}

enum {
S
Steven Rostedt 已提交
3897 3898
	PROBE_TEST_FUNC		= 1,
	PROBE_TEST_DATA		= 2
3899 3900 3901
};

static void
S
Steven Rostedt 已提交
3902
__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3903 3904
				  void *data, int flags)
{
3905
	struct ftrace_ops_hash old_hash_ops;
3906
	struct ftrace_func_entry *rec_entry;
S
Steven Rostedt 已提交
3907
	struct ftrace_func_probe *entry;
3908
	struct ftrace_func_probe *p;
3909
	struct ftrace_glob func_g;
3910
	struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3911
	struct ftrace_hash *old_hash = *orig_hash;
3912
	struct list_head free_list;
3913
	struct ftrace_hash *hash;
3914
	struct hlist_node *tmp;
3915
	char str[KSYM_SYMBOL_LEN];
3916
	int i, ret;
3917
	bool disabled;
3918

3919
	if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3920
		func_g.search = NULL;
3921
	else if (glob) {
3922 3923
		int not;

3924 3925 3926 3927
		func_g.type = filter_parse_regex(glob, strlen(glob),
						 &func_g.search, &not);
		func_g.len = strlen(func_g.search);
		func_g.search = glob;
3928

S
Steven Rostedt 已提交
3929
		/* we do not support '!' for function probes */
3930 3931 3932 3933
		if (WARN_ON(not))
			return;
	}

3934
	mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3935

3936 3937 3938 3939
	old_hash_ops.filter_hash = old_hash;
	/* Probes only have filters */
	old_hash_ops.notrace_hash = NULL;

3940 3941 3942 3943 3944
	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
	if (!hash)
		/* Hmm, should report this somehow */
		goto out_unlock;

3945 3946
	INIT_LIST_HEAD(&free_list);

3947 3948 3949
	for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
		struct hlist_head *hhd = &ftrace_func_hash[i];

3950
		hlist_for_each_entry_safe(entry, tmp, hhd, node) {
3951 3952

			/* break up if statements for readability */
S
Steven Rostedt 已提交
3953
			if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3954 3955
				continue;

S
Steven Rostedt 已提交
3956
			if ((flags & PROBE_TEST_DATA) && entry->data != data)
3957 3958 3959
				continue;

			/* do this last, since it is the most expensive */
3960
			if (func_g.search) {
3961 3962
				kallsyms_lookup(entry->ip, NULL, NULL,
						NULL, str);
3963
				if (!ftrace_match(str, &func_g))
3964 3965 3966
					continue;
			}

3967 3968 3969 3970 3971
			rec_entry = ftrace_lookup_ip(hash, entry->ip);
			/* It is possible more than one entry had this ip */
			if (rec_entry)
				free_hash_entry(hash, rec_entry);

3972
			hlist_del_rcu(&entry->node);
3973
			list_add(&entry->free_list, &free_list);
3974 3975
		}
	}
3976
	mutex_lock(&ftrace_lock);
3977
	disabled = __disable_ftrace_function_probe();
3978 3979 3980 3981
	/*
	 * Remove after the disable is called. Otherwise, if the last
	 * probe is removed, a null hash means *all enabled*.
	 */
3982
	ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3983 3984 3985 3986 3987

	/* still need to update the function call sites */
	if (ftrace_enabled && !disabled)
		ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
				       &old_hash_ops);
3988
	synchronize_sched();
3989 3990 3991
	if (!ret)
		free_ftrace_hash_rcu(old_hash);

3992 3993 3994 3995
	list_for_each_entry_safe(entry, p, &free_list, free_list) {
		list_del(&entry->free_list);
		ftrace_free_entry(entry);
	}
3996
	mutex_unlock(&ftrace_lock);
3997

3998
 out_unlock:
3999
	mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
4000
	free_ftrace_hash(hash);
4001 4002 4003
}

void
S
Steven Rostedt 已提交
4004
unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
4005 4006
				void *data)
{
S
Steven Rostedt 已提交
4007 4008
	__unregister_ftrace_function_probe(glob, ops, data,
					  PROBE_TEST_FUNC | PROBE_TEST_DATA);
4009 4010 4011
}

void
S
Steven Rostedt 已提交
4012
unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
4013
{
S
Steven Rostedt 已提交
4014
	__unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
4015 4016
}

S
Steven Rostedt 已提交
4017
void unregister_ftrace_function_probe_all(char *glob)
4018
{
S
Steven Rostedt 已提交
4019
	__unregister_ftrace_function_probe(glob, NULL, NULL, 0);
4020 4021
}

4022 4023 4024
static LIST_HEAD(ftrace_commands);
static DEFINE_MUTEX(ftrace_cmd_mutex);

4025 4026 4027 4028 4029
/*
 * Currently we only register ftrace commands from __init, so mark this
 * __init too.
 */
__init int register_ftrace_command(struct ftrace_func_command *cmd)
4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047
{
	struct ftrace_func_command *p;
	int ret = 0;

	mutex_lock(&ftrace_cmd_mutex);
	list_for_each_entry(p, &ftrace_commands, list) {
		if (strcmp(cmd->name, p->name) == 0) {
			ret = -EBUSY;
			goto out_unlock;
		}
	}
	list_add(&cmd->list, &ftrace_commands);
 out_unlock:
	mutex_unlock(&ftrace_cmd_mutex);

	return ret;
}

4048 4049 4050 4051 4052
/*
 * Currently we only unregister ftrace commands from __init, so mark
 * this __init too.
 */
__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070
{
	struct ftrace_func_command *p, *n;
	int ret = -ENODEV;

	mutex_lock(&ftrace_cmd_mutex);
	list_for_each_entry_safe(p, n, &ftrace_commands, list) {
		if (strcmp(cmd->name, p->name) == 0) {
			ret = 0;
			list_del_init(&p->list);
			goto out_unlock;
		}
	}
 out_unlock:
	mutex_unlock(&ftrace_cmd_mutex);

	return ret;
}

4071 4072
static int ftrace_process_regex(struct ftrace_hash *hash,
				char *buff, int len, int enable)
4073
{
4074
	char *func, *command, *next = buff;
S
Steven Rostedt 已提交
4075
	struct ftrace_func_command *p;
4076
	int ret = -EINVAL;
4077 4078 4079 4080

	func = strsep(&next, ":");

	if (!next) {
4081
		ret = ftrace_match_records(hash, func, len);
4082 4083 4084 4085 4086
		if (!ret)
			ret = -EINVAL;
		if (ret < 0)
			return ret;
		return 0;
4087 4088
	}

4089
	/* command found */
4090 4091 4092

	command = strsep(&next, ":");

4093 4094 4095
	mutex_lock(&ftrace_cmd_mutex);
	list_for_each_entry(p, &ftrace_commands, list) {
		if (strcmp(p->name, command) == 0) {
4096
			ret = p->func(hash, func, command, next, enable);
4097 4098
			goto out_unlock;
		}
4099
	}
4100 4101
 out_unlock:
	mutex_unlock(&ftrace_cmd_mutex);
4102

4103
	return ret;
4104 4105
}

I
Ingo Molnar 已提交
4106
static ssize_t
4107 4108
ftrace_regex_write(struct file *file, const char __user *ubuf,
		   size_t cnt, loff_t *ppos, int enable)
4109 4110
{
	struct ftrace_iterator *iter;
4111 4112
	struct trace_parser *parser;
	ssize_t ret, read;
4113

4114
	if (!cnt)
4115 4116 4117 4118 4119 4120 4121 4122
		return 0;

	if (file->f_mode & FMODE_READ) {
		struct seq_file *m = file->private_data;
		iter = m->private;
	} else
		iter = file->private_data;

4123
	if (unlikely(ftrace_disabled))
4124 4125 4126
		return -ENODEV;

	/* iter->hash is a local copy, so we don't need regex_lock */
4127

4128 4129
	parser = &iter->parser;
	read = trace_get_user(parser, ubuf, cnt, ppos);
4130

4131
	if (read >= 0 && trace_parser_loaded(parser) &&
4132
	    !trace_parser_cont(parser)) {
4133
		ret = ftrace_process_regex(iter->hash, parser->buffer,
4134
					   parser->idx, enable);
4135
		trace_parser_clear(parser);
4136
		if (ret < 0)
4137
			goto out;
4138
	}
4139 4140

	ret = read;
4141
 out:
4142 4143 4144
	return ret;
}

4145
ssize_t
4146 4147 4148 4149 4150 4151
ftrace_filter_write(struct file *file, const char __user *ubuf,
		    size_t cnt, loff_t *ppos)
{
	return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
}

4152
ssize_t
4153 4154 4155 4156 4157 4158
ftrace_notrace_write(struct file *file, const char __user *ubuf,
		     size_t cnt, loff_t *ppos)
{
	return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
}

4159
static int
4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177
ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
{
	struct ftrace_func_entry *entry;

	if (!ftrace_location(ip))
		return -EINVAL;

	if (remove) {
		entry = ftrace_lookup_ip(hash, ip);
		if (!entry)
			return -ENOENT;
		free_hash_entry(hash, entry);
		return 0;
	}

	return add_hash_entry(hash, ip);
}

4178
static void ftrace_ops_update_code(struct ftrace_ops *ops,
4179
				   struct ftrace_ops_hash *old_hash)
4180
{
4181 4182 4183 4184 4185 4186
	struct ftrace_ops *op;

	if (!ftrace_enabled)
		return;

	if (ops->flags & FTRACE_OPS_FL_ENABLED) {
4187
		ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206
		return;
	}

	/*
	 * If this is the shared global_ops filter, then we need to
	 * check if there is another ops that shares it, is enabled.
	 * If so, we still need to run the modify code.
	 */
	if (ops->func_hash != &global_ops.local_hash)
		return;

	do_for_each_ftrace_op(op, ftrace_ops_list) {
		if (op->func_hash == &global_ops.local_hash &&
		    op->flags & FTRACE_OPS_FL_ENABLED) {
			ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
			/* Only need to do this once */
			return;
		}
	} while_for_each_ftrace_op(op);
4207 4208
}

4209 4210 4211
static int
ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
		unsigned long ip, int remove, int reset, int enable)
4212
{
4213
	struct ftrace_hash **orig_hash;
4214
	struct ftrace_ops_hash old_hash_ops;
4215
	struct ftrace_hash *old_hash;
4216
	struct ftrace_hash *hash;
4217
	int ret;
4218

4219
	if (unlikely(ftrace_disabled))
4220
		return -ENODEV;
4221

4222
	mutex_lock(&ops->func_hash->regex_lock);
4223

4224
	if (enable)
4225
		orig_hash = &ops->func_hash->filter_hash;
4226
	else
4227
		orig_hash = &ops->func_hash->notrace_hash;
4228

4229 4230 4231 4232 4233
	if (reset)
		hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
	else
		hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);

4234 4235 4236 4237
	if (!hash) {
		ret = -ENOMEM;
		goto out_regex_unlock;
	}
4238

4239 4240 4241 4242
	if (buf && !ftrace_match_records(hash, buf, len)) {
		ret = -EINVAL;
		goto out_regex_unlock;
	}
4243 4244 4245 4246 4247
	if (ip) {
		ret = ftrace_match_addr(hash, ip, remove);
		if (ret < 0)
			goto out_regex_unlock;
	}
4248 4249

	mutex_lock(&ftrace_lock);
4250
	old_hash = *orig_hash;
4251 4252
	old_hash_ops.filter_hash = ops->func_hash->filter_hash;
	old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
4253
	ret = ftrace_hash_move(ops, enable, orig_hash, hash);
4254
	if (!ret) {
4255
		ftrace_ops_update_code(ops, &old_hash_ops);
4256 4257
		free_ftrace_hash_rcu(old_hash);
	}
4258 4259
	mutex_unlock(&ftrace_lock);

4260
 out_regex_unlock:
4261
	mutex_unlock(&ops->func_hash->regex_lock);
4262 4263 4264

	free_ftrace_hash(hash);
	return ret;
4265 4266
}

4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286
static int
ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
		int reset, int enable)
{
	return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
}

/**
 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
 * @ops - the ops to set the filter with
 * @ip - the address to add to or remove from the filter.
 * @remove - non zero to remove the ip from the filter
 * @reset - non zero to reset all filters before applying this filter.
 *
 * Filters denote which functions should be enabled when tracing is enabled
 * If @ip is NULL, it failes to update filter.
 */
int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
			 int remove, int reset)
{
4287
	ftrace_ops_init(ops);
4288 4289 4290 4291
	return ftrace_set_addr(ops, ip, remove, reset, 1);
}
EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);

4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308
/**
 * ftrace_ops_set_global_filter - setup ops to use global filters
 * @ops - the ops which will use the global filters
 *
 * ftrace users who need global function trace filtering should call this.
 * It can set the global filter only if ops were not initialized before.
 */
void ftrace_ops_set_global_filter(struct ftrace_ops *ops)
{
	if (ops->flags & FTRACE_OPS_FL_INITIALIZED)
		return;

	ftrace_ops_init(ops);
	ops->func_hash = &global_ops.local_hash;
}
EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter);

4309 4310 4311 4312 4313 4314 4315
static int
ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
		 int reset, int enable)
{
	return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
}

4316 4317
/**
 * ftrace_set_filter - set a function to filter on in ftrace
4318 4319 4320 4321 4322 4323 4324 4325
 * @ops - the ops to set the filter with
 * @buf - the string that holds the function filter text.
 * @len - the length of the string.
 * @reset - non zero to reset all filters before applying this filter.
 *
 * Filters denote which functions should be enabled when tracing is enabled.
 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
 */
4326
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
4327 4328
		       int len, int reset)
{
4329
	ftrace_ops_init(ops);
4330
	return ftrace_set_regex(ops, buf, len, reset, 1);
4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344
}
EXPORT_SYMBOL_GPL(ftrace_set_filter);

/**
 * ftrace_set_notrace - set a function to not trace in ftrace
 * @ops - the ops to set the notrace filter with
 * @buf - the string that holds the function notrace text.
 * @len - the length of the string.
 * @reset - non zero to reset all filters before applying this filter.
 *
 * Notrace Filters denote which functions should not be enabled when tracing
 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
 * for tracing.
 */
4345
int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
4346 4347
			int len, int reset)
{
4348
	ftrace_ops_init(ops);
4349
	return ftrace_set_regex(ops, buf, len, reset, 0);
4350 4351 4352
}
EXPORT_SYMBOL_GPL(ftrace_set_notrace);
/**
4353
 * ftrace_set_global_filter - set a function to filter on with global tracers
4354 4355 4356 4357 4358 4359 4360
 * @buf - the string that holds the function filter text.
 * @len - the length of the string.
 * @reset - non zero to reset all filters before applying this filter.
 *
 * Filters denote which functions should be enabled when tracing is enabled.
 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
 */
4361
void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
4362
{
4363
	ftrace_set_regex(&global_ops, buf, len, reset, 1);
4364
}
4365
EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
4366

4367
/**
4368
 * ftrace_set_global_notrace - set a function to not trace with global tracers
4369 4370 4371 4372 4373 4374 4375 4376
 * @buf - the string that holds the function notrace text.
 * @len - the length of the string.
 * @reset - non zero to reset all filters before applying this filter.
 *
 * Notrace Filters denote which functions should not be enabled when tracing
 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
 * for tracing.
 */
4377
void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
4378
{
4379
	ftrace_set_regex(&global_ops, buf, len, reset, 0);
4380
}
4381
EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
4382

4383 4384 4385 4386 4387 4388 4389
/*
 * command line interface to allow users to set filters on boot up.
 */
#define FTRACE_FILTER_SIZE		COMMAND_LINE_SIZE
static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;

4390 4391 4392
/* Used by function selftest to not test if filter is set */
bool ftrace_filter_param __initdata;

4393 4394
static int __init set_ftrace_notrace(char *str)
{
4395
	ftrace_filter_param = true;
4396
	strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
4397 4398 4399 4400 4401 4402
	return 1;
}
__setup("ftrace_notrace=", set_ftrace_notrace);

static int __init set_ftrace_filter(char *str)
{
4403
	ftrace_filter_param = true;
4404
	strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
4405 4406 4407 4408
	return 1;
}
__setup("ftrace_filter=", set_ftrace_filter);

4409
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4410
static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
4411
static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
4412
static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
4413

4414 4415 4416
static unsigned long save_global_trampoline;
static unsigned long save_global_flags;

4417 4418
static int __init set_graph_function(char *str)
{
4419
	strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
4420 4421 4422 4423
	return 1;
}
__setup("ftrace_graph_filter=", set_graph_function);

4424 4425 4426 4427 4428 4429 4430
static int __init set_graph_notrace_function(char *str)
{
	strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
	return 1;
}
__setup("ftrace_graph_notrace=", set_graph_notrace_function);

4431 4432 4433 4434 4435 4436 4437 4438
static int __init set_graph_max_depth_function(char *str)
{
	if (!str)
		return 0;
	fgraph_max_depth = simple_strtoul(str, NULL, 0);
	return 1;
}
__setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
4439 4440

static void __init set_ftrace_early_graph(char *buf, int enable)
4441 4442 4443
{
	int ret;
	char *func;
4444
	struct ftrace_hash *hash;
4445

4446 4447 4448
	hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
	if (WARN_ON(!hash))
		return;
4449 4450 4451 4452

	while (buf) {
		func = strsep(&buf, ",");
		/* we allow only one expression at a time */
4453
		ret = ftrace_graph_set_hash(hash, func);
4454 4455 4456 4457
		if (ret)
			printk(KERN_DEBUG "ftrace: function %s not "
					  "traceable\n", func);
	}
4458 4459 4460 4461 4462

	if (enable)
		ftrace_graph_hash = hash;
	else
		ftrace_graph_notrace_hash = hash;
4463 4464 4465
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */

4466 4467
void __init
ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
4468 4469 4470
{
	char *func;

4471 4472
	ftrace_ops_init(ops);

4473 4474
	while (buf) {
		func = strsep(&buf, ",");
4475
		ftrace_set_regex(ops, func, strlen(func), 0, enable);
4476 4477 4478 4479 4480 4481
	}
}

static void __init set_ftrace_early_filters(void)
{
	if (ftrace_filter_buf[0])
4482
		ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
4483
	if (ftrace_notrace_buf[0])
4484
		ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
4485 4486
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	if (ftrace_graph_buf[0])
4487 4488 4489
		set_ftrace_early_graph(ftrace_graph_buf, 1);
	if (ftrace_graph_notrace_buf[0])
		set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
4490
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4491 4492
}

4493
int ftrace_regex_release(struct inode *inode, struct file *file)
4494 4495
{
	struct seq_file *m = (struct seq_file *)file->private_data;
4496
	struct ftrace_ops_hash old_hash_ops;
4497
	struct ftrace_iterator *iter;
4498
	struct ftrace_hash **orig_hash;
4499
	struct ftrace_hash *old_hash;
4500
	struct trace_parser *parser;
4501
	int filter_hash;
4502
	int ret;
4503 4504 4505 4506 4507 4508 4509

	if (file->f_mode & FMODE_READ) {
		iter = m->private;
		seq_release(inode, file);
	} else
		iter = file->private_data;

4510 4511 4512
	parser = &iter->parser;
	if (trace_parser_loaded(parser)) {
		parser->buffer[parser->idx] = 0;
4513
		ftrace_match_records(iter->hash, parser->buffer, parser->idx);
4514 4515
	}

4516 4517
	trace_parser_put(parser);

4518
	mutex_lock(&iter->ops->func_hash->regex_lock);
4519

4520
	if (file->f_mode & FMODE_WRITE) {
4521 4522 4523
		filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);

		if (filter_hash)
4524
			orig_hash = &iter->ops->func_hash->filter_hash;
4525
		else
4526
			orig_hash = &iter->ops->func_hash->notrace_hash;
4527

4528
		mutex_lock(&ftrace_lock);
4529
		old_hash = *orig_hash;
4530 4531
		old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash;
		old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash;
4532 4533
		ret = ftrace_hash_move(iter->ops, filter_hash,
				       orig_hash, iter->hash);
4534
		if (!ret) {
4535
			ftrace_ops_update_code(iter->ops, &old_hash_ops);
4536 4537
			free_ftrace_hash_rcu(old_hash);
		}
4538 4539
		mutex_unlock(&ftrace_lock);
	}
4540

4541
	mutex_unlock(&iter->ops->func_hash->regex_lock);
4542 4543
	free_ftrace_hash(iter->hash);
	kfree(iter);
4544

4545 4546 4547
	return 0;
}

4548
static const struct file_operations ftrace_avail_fops = {
4549 4550 4551
	.open = ftrace_avail_open,
	.read = seq_read,
	.llseek = seq_lseek,
L
Li Zefan 已提交
4552
	.release = seq_release_private,
4553 4554
};

4555 4556 4557 4558 4559 4560 4561
static const struct file_operations ftrace_enabled_fops = {
	.open = ftrace_enabled_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = seq_release_private,
};

4562
static const struct file_operations ftrace_filter_fops = {
4563
	.open = ftrace_filter_open,
L
Lai Jiangshan 已提交
4564
	.read = seq_read,
4565
	.write = ftrace_filter_write,
4566
	.llseek = tracing_lseek,
4567
	.release = ftrace_regex_release,
4568 4569
};

4570
static const struct file_operations ftrace_notrace_fops = {
4571
	.open = ftrace_notrace_open,
L
Lai Jiangshan 已提交
4572
	.read = seq_read,
4573
	.write = ftrace_notrace_write,
4574
	.llseek = tracing_lseek,
4575
	.release = ftrace_regex_release,
4576 4577
};

4578 4579 4580 4581
#ifdef CONFIG_FUNCTION_GRAPH_TRACER

static DEFINE_MUTEX(graph_lock);

4582 4583 4584 4585 4586 4587 4588
struct ftrace_hash *ftrace_graph_hash = EMPTY_HASH;
struct ftrace_hash *ftrace_graph_notrace_hash = EMPTY_HASH;

enum graph_filter_type {
	GRAPH_FILTER_NOTRACE	= 0,
	GRAPH_FILTER_FUNCTION,
};
4589

4590 4591
#define FTRACE_GRAPH_EMPTY	((void *)1)

4592
struct ftrace_graph_data {
4593 4594 4595 4596 4597 4598 4599
	struct ftrace_hash		*hash;
	struct ftrace_func_entry	*entry;
	int				idx;   /* for hash table iteration */
	enum graph_filter_type		type;
	struct ftrace_hash		*new_hash;
	const struct seq_operations	*seq_ops;
	struct trace_parser		parser;
4600 4601
};

4602
static void *
4603
__g_next(struct seq_file *m, loff_t *pos)
4604
{
4605
	struct ftrace_graph_data *fgd = m->private;
4606 4607 4608
	struct ftrace_func_entry *entry = fgd->entry;
	struct hlist_head *head;
	int i, idx = fgd->idx;
4609

4610
	if (*pos >= fgd->hash->count)
4611
		return NULL;
4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630

	if (entry) {
		hlist_for_each_entry_continue(entry, hlist) {
			fgd->entry = entry;
			return entry;
		}

		idx++;
	}

	for (i = idx; i < 1 << fgd->hash->size_bits; i++) {
		head = &fgd->hash->buckets[i];
		hlist_for_each_entry(entry, head, hlist) {
			fgd->entry = entry;
			fgd->idx = i;
			return entry;
		}
	}
	return NULL;
4631
}
4632

4633 4634 4635 4636 4637
static void *
g_next(struct seq_file *m, void *v, loff_t *pos)
{
	(*pos)++;
	return __g_next(m, pos);
4638 4639 4640 4641
}

static void *g_start(struct seq_file *m, loff_t *pos)
{
4642 4643
	struct ftrace_graph_data *fgd = m->private;

4644 4645
	mutex_lock(&graph_lock);

4646 4647 4648 4649 4650 4651 4652
	if (fgd->type == GRAPH_FILTER_FUNCTION)
		fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
					lockdep_is_held(&graph_lock));
	else
		fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
					lockdep_is_held(&graph_lock));

4653
	/* Nothing, tell g_show to print all functions are enabled */
4654
	if (ftrace_hash_empty(fgd->hash) && !*pos)
4655
		return FTRACE_GRAPH_EMPTY;
4656

4657 4658
	fgd->idx = 0;
	fgd->entry = NULL;
4659
	return __g_next(m, pos);
4660 4661 4662 4663 4664 4665 4666 4667 4668
}

static void g_stop(struct seq_file *m, void *p)
{
	mutex_unlock(&graph_lock);
}

static int g_show(struct seq_file *m, void *v)
{
4669
	struct ftrace_func_entry *entry = v;
4670

4671
	if (!entry)
4672 4673
		return 0;

4674
	if (entry == FTRACE_GRAPH_EMPTY) {
4675 4676
		struct ftrace_graph_data *fgd = m->private;

4677
		if (fgd->type == GRAPH_FILTER_FUNCTION)
4678
			seq_puts(m, "#### all functions enabled ####\n");
4679
		else
4680
			seq_puts(m, "#### no functions disabled ####\n");
4681 4682 4683
		return 0;
	}

4684
	seq_printf(m, "%ps\n", (void *)entry->ip);
4685 4686 4687 4688

	return 0;
}

J
James Morris 已提交
4689
static const struct seq_operations ftrace_graph_seq_ops = {
4690 4691 4692 4693 4694 4695 4696
	.start = g_start,
	.next = g_next,
	.stop = g_stop,
	.show = g_show,
};

static int
4697 4698
__ftrace_graph_open(struct inode *inode, struct file *file,
		    struct ftrace_graph_data *fgd)
4699 4700
{
	int ret = 0;
4701
	struct ftrace_hash *new_hash = NULL;
4702

4703 4704 4705
	if (file->f_mode & FMODE_WRITE) {
		const int size_bits = FTRACE_HASH_DEFAULT_BITS;

4706 4707 4708
		if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX))
			return -ENOMEM;

4709 4710 4711 4712 4713 4714 4715 4716 4717
		if (file->f_flags & O_TRUNC)
			new_hash = alloc_ftrace_hash(size_bits);
		else
			new_hash = alloc_and_copy_ftrace_hash(size_bits,
							      fgd->hash);
		if (!new_hash) {
			ret = -ENOMEM;
			goto out;
		}
4718 4719
	}

4720
	if (file->f_mode & FMODE_READ) {
4721
		ret = seq_open(file, &ftrace_graph_seq_ops);
4722 4723 4724
		if (!ret) {
			struct seq_file *m = file->private_data;
			m->private = fgd;
4725 4726 4727 4728
		} else {
			/* Failed */
			free_ftrace_hash(new_hash);
			new_hash = NULL;
4729 4730 4731
		}
	} else
		file->private_data = fgd;
4732

4733
out:
4734 4735 4736
	if (ret < 0 && file->f_mode & FMODE_WRITE)
		trace_parser_put(&fgd->parser);

4737
	fgd->new_hash = new_hash;
4738 4739 4740 4741 4742 4743 4744 4745

	/*
	 * All uses of fgd->hash must be taken with the graph_lock
	 * held. The graph_lock is going to be released, so force
	 * fgd->hash to be reinitialized when it is taken again.
	 */
	fgd->hash = NULL;

4746 4747 4748
	return ret;
}

4749 4750 4751 4752
static int
ftrace_graph_open(struct inode *inode, struct file *file)
{
	struct ftrace_graph_data *fgd;
4753
	int ret;
4754 4755 4756 4757 4758 4759 4760 4761

	if (unlikely(ftrace_disabled))
		return -ENODEV;

	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
	if (fgd == NULL)
		return -ENOMEM;

4762 4763
	mutex_lock(&graph_lock);

4764 4765
	fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
					lockdep_is_held(&graph_lock));
4766
	fgd->type = GRAPH_FILTER_FUNCTION;
4767 4768
	fgd->seq_ops = &ftrace_graph_seq_ops;

4769 4770 4771 4772 4773 4774
	ret = __ftrace_graph_open(inode, file, fgd);
	if (ret < 0)
		kfree(fgd);

	mutex_unlock(&graph_lock);
	return ret;
4775 4776
}

4777 4778 4779 4780
static int
ftrace_graph_notrace_open(struct inode *inode, struct file *file)
{
	struct ftrace_graph_data *fgd;
4781
	int ret;
4782 4783 4784 4785 4786 4787 4788 4789

	if (unlikely(ftrace_disabled))
		return -ENODEV;

	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
	if (fgd == NULL)
		return -ENOMEM;

4790 4791
	mutex_lock(&graph_lock);

4792 4793
	fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
					lockdep_is_held(&graph_lock));
4794
	fgd->type = GRAPH_FILTER_NOTRACE;
4795 4796
	fgd->seq_ops = &ftrace_graph_seq_ops;

4797 4798 4799 4800 4801 4802
	ret = __ftrace_graph_open(inode, file, fgd);
	if (ret < 0)
		kfree(fgd);

	mutex_unlock(&graph_lock);
	return ret;
4803 4804
}

4805 4806 4807
static int
ftrace_graph_release(struct inode *inode, struct file *file)
{
4808
	struct ftrace_graph_data *fgd;
4809 4810 4811
	struct ftrace_hash *old_hash, *new_hash;
	struct trace_parser *parser;
	int ret = 0;
4812

4813 4814 4815
	if (file->f_mode & FMODE_READ) {
		struct seq_file *m = file->private_data;

4816
		fgd = m->private;
4817
		seq_release(inode, file);
4818
	} else {
4819
		fgd = file->private_data;
4820 4821
	}

4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861

	if (file->f_mode & FMODE_WRITE) {

		parser = &fgd->parser;

		if (trace_parser_loaded((parser))) {
			parser->buffer[parser->idx] = 0;
			ret = ftrace_graph_set_hash(fgd->new_hash,
						    parser->buffer);
		}

		trace_parser_put(parser);

		new_hash = __ftrace_hash_move(fgd->new_hash);
		if (!new_hash) {
			ret = -ENOMEM;
			goto out;
		}

		mutex_lock(&graph_lock);

		if (fgd->type == GRAPH_FILTER_FUNCTION) {
			old_hash = rcu_dereference_protected(ftrace_graph_hash,
					lockdep_is_held(&graph_lock));
			rcu_assign_pointer(ftrace_graph_hash, new_hash);
		} else {
			old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
					lockdep_is_held(&graph_lock));
			rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash);
		}

		mutex_unlock(&graph_lock);

		/* Wait till all users are no longer using the old hash */
		synchronize_sched();

		free_ftrace_hash(old_hash);
	}

 out:
4862 4863 4864
	kfree(fgd->new_hash);
	kfree(fgd);

4865
	return ret;
4866 4867
}

4868
static int
4869
ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
4870
{
4871
	struct ftrace_glob func_g;
4872 4873
	struct dyn_ftrace *rec;
	struct ftrace_page *pg;
4874
	struct ftrace_func_entry *entry;
4875
	int fail = 1;
4876
	int not;
4877

4878
	/* decode regex */
4879 4880
	func_g.type = filter_parse_regex(buffer, strlen(buffer),
					 &func_g.search, &not);
4881

4882
	func_g.len = strlen(func_g.search);
4883

4884
	mutex_lock(&ftrace_lock);
4885 4886 4887 4888 4889 4890

	if (unlikely(ftrace_disabled)) {
		mutex_unlock(&ftrace_lock);
		return -ENODEV;
	}

4891 4892
	do_for_each_ftrace_rec(pg, rec) {

4893 4894 4895
		if (rec->flags & FTRACE_FL_DISABLED)
			continue;

D
Dmitry Safonov 已提交
4896
		if (ftrace_match_record(rec, &func_g, NULL, 0)) {
4897
			entry = ftrace_lookup_ip(hash, rec->ip);
4898 4899 4900

			if (!not) {
				fail = 0;
4901 4902 4903 4904 4905

				if (entry)
					continue;
				if (add_hash_entry(hash, rec->ip) < 0)
					goto out;
4906
			} else {
4907 4908
				if (entry) {
					free_hash_entry(hash, entry);
4909 4910 4911
					fail = 0;
				}
			}
4912
		}
4913
	} while_for_each_ftrace_rec();
4914
out:
4915
	mutex_unlock(&ftrace_lock);
4916

4917 4918 4919 4920
	if (fail)
		return -EINVAL;

	return 0;
4921 4922 4923 4924 4925 4926
}

static ssize_t
ftrace_graph_write(struct file *file, const char __user *ubuf,
		   size_t cnt, loff_t *ppos)
{
4927
	ssize_t read, ret = 0;
4928
	struct ftrace_graph_data *fgd = file->private_data;
4929
	struct trace_parser *parser;
4930

4931
	if (!cnt)
4932 4933
		return 0;

4934 4935 4936 4937 4938 4939
	/* Read mode uses seq functions */
	if (file->f_mode & FMODE_READ) {
		struct seq_file *m = file->private_data;
		fgd = m->private;
	}

4940
	parser = &fgd->parser;
4941

4942
	read = trace_get_user(parser, ubuf, cnt, ppos);
4943

4944 4945
	if (read >= 0 && trace_parser_loaded(parser) &&
	    !trace_parser_cont(parser)) {
4946

4947
		ret = ftrace_graph_set_hash(fgd->new_hash,
4948 4949
					    parser->buffer);
		trace_parser_clear(parser);
4950 4951
	}

4952 4953
	if (!ret)
		ret = read;
4954

4955 4956 4957 4958
	return ret;
}

static const struct file_operations ftrace_graph_fops = {
4959 4960 4961
	.open		= ftrace_graph_open,
	.read		= seq_read,
	.write		= ftrace_graph_write,
4962
	.llseek		= tracing_lseek,
4963
	.release	= ftrace_graph_release,
4964
};
4965 4966 4967 4968 4969

static const struct file_operations ftrace_graph_notrace_fops = {
	.open		= ftrace_graph_notrace_open,
	.read		= seq_read,
	.write		= ftrace_graph_write,
4970
	.llseek		= tracing_lseek,
4971 4972
	.release	= ftrace_graph_release,
};
4973 4974
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */

4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004
void ftrace_create_filter_files(struct ftrace_ops *ops,
				struct dentry *parent)
{

	trace_create_file("set_ftrace_filter", 0644, parent,
			  ops, &ftrace_filter_fops);

	trace_create_file("set_ftrace_notrace", 0644, parent,
			  ops, &ftrace_notrace_fops);
}

/*
 * The name "destroy_filter_files" is really a misnomer. Although
 * in the future, it may actualy delete the files, but this is
 * really intended to make sure the ops passed in are disabled
 * and that when this function returns, the caller is free to
 * free the ops.
 *
 * The "destroy" name is only to match the "create" name that this
 * should be paired with.
 */
void ftrace_destroy_filter_files(struct ftrace_ops *ops)
{
	mutex_lock(&ftrace_lock);
	if (ops->flags & FTRACE_OPS_FL_ENABLED)
		ftrace_shutdown(ops, 0);
	ops->flags |= FTRACE_OPS_FL_DELETED;
	mutex_unlock(&ftrace_lock);
}

5005
static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
5006 5007
{

5008 5009
	trace_create_file("available_filter_functions", 0444,
			d_tracer, NULL, &ftrace_avail_fops);
5010

5011 5012 5013
	trace_create_file("enabled_functions", 0444,
			d_tracer, NULL, &ftrace_enabled_fops);

5014
	ftrace_create_filter_files(&global_ops, d_tracer);
5015

5016
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5017
	trace_create_file("set_graph_function", 0444, d_tracer,
5018 5019
				    NULL,
				    &ftrace_graph_fops);
5020 5021 5022
	trace_create_file("set_graph_notrace", 0444, d_tracer,
				    NULL,
				    &ftrace_graph_notrace_fops);
5023 5024
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */

5025 5026 5027
	return 0;
}

5028
static int ftrace_cmp_ips(const void *a, const void *b)
5029
{
5030 5031
	const unsigned long *ipa = a;
	const unsigned long *ipb = b;
5032

5033 5034 5035 5036 5037 5038 5039
	if (*ipa > *ipb)
		return 1;
	if (*ipa < *ipb)
		return -1;
	return 0;
}

5040
static int ftrace_process_locs(struct module *mod,
5041
			       unsigned long *start,
5042 5043
			       unsigned long *end)
{
5044
	struct ftrace_page *start_pg;
5045
	struct ftrace_page *pg;
5046
	struct dyn_ftrace *rec;
5047
	unsigned long count;
5048 5049
	unsigned long *p;
	unsigned long addr;
5050
	unsigned long flags = 0; /* Shut up gcc */
5051 5052 5053 5054 5055 5056 5057
	int ret = -ENOMEM;

	count = end - start;

	if (!count)
		return 0;

5058
	sort(start, count, sizeof(*start),
5059
	     ftrace_cmp_ips, NULL);
5060

5061 5062
	start_pg = ftrace_allocate_pages(count);
	if (!start_pg)
5063
		return -ENOMEM;
5064

S
Steven Rostedt 已提交
5065
	mutex_lock(&ftrace_lock);
5066

5067 5068 5069 5070 5071
	/*
	 * Core and each module needs their own pages, as
	 * modules will free them when they are removed.
	 * Force a new page to be allocated for modules.
	 */
5072 5073 5074
	if (!mod) {
		WARN_ON(ftrace_pages || ftrace_pages_start);
		/* First initialization */
5075
		ftrace_pages = ftrace_pages_start = start_pg;
5076
	} else {
5077
		if (!ftrace_pages)
5078
			goto out;
5079

5080 5081 5082 5083
		if (WARN_ON(ftrace_pages->next)) {
			/* Hmm, we have free pages? */
			while (ftrace_pages->next)
				ftrace_pages = ftrace_pages->next;
5084
		}
5085

5086
		ftrace_pages->next = start_pg;
5087 5088
	}

5089
	p = start;
5090
	pg = start_pg;
5091 5092
	while (p < end) {
		addr = ftrace_call_adjust(*p++);
5093 5094 5095 5096 5097 5098 5099 5100
		/*
		 * Some architecture linkers will pad between
		 * the different mcount_loc sections of different
		 * object files to satisfy alignments.
		 * Skip any NULL pointers.
		 */
		if (!addr)
			continue;
5101 5102 5103 5104 5105 5106 5107 5108 5109 5110

		if (pg->index == pg->size) {
			/* We should have allocated enough */
			if (WARN_ON(!pg->next))
				break;
			pg = pg->next;
		}

		rec = &pg->records[pg->index++];
		rec->ip = addr;
5111 5112
	}

5113 5114 5115 5116 5117 5118
	/* We should have used all pages */
	WARN_ON(pg->next);

	/* Assign the last page to ftrace_pages */
	ftrace_pages = pg;

5119
	/*
5120 5121 5122 5123 5124 5125
	 * We only need to disable interrupts on start up
	 * because we are modifying code that an interrupt
	 * may execute, and the modification is not atomic.
	 * But for modules, nothing runs the code we modify
	 * until we are finished with it, and there's no
	 * reason to cause large interrupt latencies while we do it.
5126
	 */
5127 5128
	if (!mod)
		local_irq_save(flags);
5129
	ftrace_update_code(mod, start_pg);
5130 5131
	if (!mod)
		local_irq_restore(flags);
5132 5133
	ret = 0;
 out:
S
Steven Rostedt 已提交
5134
	mutex_unlock(&ftrace_lock);
5135

5136
	return ret;
5137 5138
}

5139
#ifdef CONFIG_MODULES
5140 5141 5142

#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)

5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155
static int referenced_filters(struct dyn_ftrace *rec)
{
	struct ftrace_ops *ops;
	int cnt = 0;

	for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
		if (ops_references_rec(ops, rec))
		    cnt++;
	}

	return cnt;
}

5156
void ftrace_release_mod(struct module *mod)
5157 5158
{
	struct dyn_ftrace *rec;
5159
	struct ftrace_page **last_pg;
5160
	struct ftrace_page *pg;
5161
	int order;
5162

5163 5164
	mutex_lock(&ftrace_lock);

5165
	if (ftrace_disabled)
5166
		goto out_unlock;
5167

5168 5169 5170 5171 5172 5173 5174
	/*
	 * Each module has its own ftrace_pages, remove
	 * them from the list.
	 */
	last_pg = &ftrace_pages_start;
	for (pg = ftrace_pages_start; pg; pg = *last_pg) {
		rec = &pg->records[0];
5175
		if (within_module_core(rec->ip, mod)) {
5176
			/*
5177 5178
			 * As core pages are first, the first
			 * page should never be a module page.
5179
			 */
5180 5181 5182 5183 5184 5185 5186 5187
			if (WARN_ON(pg == ftrace_pages_start))
				goto out_unlock;

			/* Check if we are deleting the last page */
			if (pg == ftrace_pages)
				ftrace_pages = next_to_ftrace_page(last_pg);

			*last_pg = pg->next;
5188 5189 5190
			order = get_count_order(pg->size / ENTRIES_PER_PAGE);
			free_pages((unsigned long)pg->records, order);
			kfree(pg);
5191 5192 5193
		} else
			last_pg = &pg->next;
	}
5194
 out_unlock:
5195 5196 5197
	mutex_unlock(&ftrace_lock);
}

5198
void ftrace_module_enable(struct module *mod)
5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266
{
	struct dyn_ftrace *rec;
	struct ftrace_page *pg;

	mutex_lock(&ftrace_lock);

	if (ftrace_disabled)
		goto out_unlock;

	/*
	 * If the tracing is enabled, go ahead and enable the record.
	 *
	 * The reason not to enable the record immediatelly is the
	 * inherent check of ftrace_make_nop/ftrace_make_call for
	 * correct previous instructions.  Making first the NOP
	 * conversion puts the module to the correct state, thus
	 * passing the ftrace_make_call check.
	 *
	 * We also delay this to after the module code already set the
	 * text to read-only, as we now need to set it back to read-write
	 * so that we can modify the text.
	 */
	if (ftrace_start_up)
		ftrace_arch_code_modify_prepare();

	do_for_each_ftrace_rec(pg, rec) {
		int cnt;
		/*
		 * do_for_each_ftrace_rec() is a double loop.
		 * module text shares the pg. If a record is
		 * not part of this module, then skip this pg,
		 * which the "break" will do.
		 */
		if (!within_module_core(rec->ip, mod))
			break;

		cnt = 0;

		/*
		 * When adding a module, we need to check if tracers are
		 * currently enabled and if they are, and can trace this record,
		 * we need to enable the module functions as well as update the
		 * reference counts for those function records.
		 */
		if (ftrace_start_up)
			cnt += referenced_filters(rec);

		/* This clears FTRACE_FL_DISABLED */
		rec->flags = cnt;

		if (ftrace_start_up && cnt) {
			int failed = __ftrace_replace_code(rec, 1);
			if (failed) {
				ftrace_bug(failed, rec);
				goto out_loop;
			}
		}

	} while_for_each_ftrace_rec();

 out_loop:
	if (ftrace_start_up)
		ftrace_arch_code_modify_post_process();

 out_unlock:
	mutex_unlock(&ftrace_lock);
}

5267
void ftrace_module_init(struct module *mod)
5268
{
5269
	if (ftrace_disabled || !mod->num_ftrace_callsites)
5270
		return;
5271

5272 5273
	ftrace_process_locs(mod, mod->ftrace_callsites,
			    mod->ftrace_callsites + mod->num_ftrace_callsites);
5274
}
5275 5276
#endif /* CONFIG_MODULES */

5277 5278
void __init ftrace_init(void)
{
5279 5280
	extern unsigned long __start_mcount_loc[];
	extern unsigned long __stop_mcount_loc[];
5281
	unsigned long count, flags;
5282 5283 5284
	int ret;

	local_irq_save(flags);
5285
	ret = ftrace_dyn_arch_init();
5286
	local_irq_restore(flags);
5287
	if (ret)
5288 5289 5290
		goto failed;

	count = __stop_mcount_loc - __start_mcount_loc;
5291 5292
	if (!count) {
		pr_info("ftrace: No functions to be traced?\n");
5293
		goto failed;
5294 5295 5296 5297
	}

	pr_info("ftrace: allocating %ld entries in %ld pages\n",
		count, count / ENTRIES_PER_PAGE + 1);
5298 5299 5300

	last_ftrace_enabled = ftrace_enabled = 1;

5301
	ret = ftrace_process_locs(NULL,
5302
				  __start_mcount_loc,
5303 5304
				  __stop_mcount_loc);

5305 5306
	set_ftrace_early_filters();

5307 5308 5309 5310 5311
	return;
 failed:
	ftrace_disabled = 1;
}

5312 5313 5314 5315 5316 5317 5318
/* Do nothing if arch does not support this */
void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
{
}

static void ftrace_update_trampoline(struct ftrace_ops *ops)
{
5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329

/*
 * Currently there's no safe way to free a trampoline when the kernel
 * is configured with PREEMPT. That is because a task could be preempted
 * when it jumped to the trampoline, it may be preempted for a long time
 * depending on the system load, and currently there's no way to know
 * when it will be off the trampoline. If the trampoline is freed
 * too early, when the task runs again, it will be executing on freed
 * memory and crash.
 */
#ifdef CONFIG_PREEMPT
5330 5331 5332
	/* Currently, only non dynamic ops can have a trampoline */
	if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
		return;
5333
#endif
5334 5335 5336 5337

	arch_ftrace_update_trampoline(ops);
}

5338
#else
5339

5340
static struct ftrace_ops global_ops = {
5341
	.func			= ftrace_stub,
5342 5343 5344
	.flags			= FTRACE_OPS_FL_RECURSION_SAFE |
				  FTRACE_OPS_FL_INITIALIZED |
				  FTRACE_OPS_FL_PID,
5345 5346
};

5347 5348 5349 5350 5351
static int __init ftrace_nodyn_init(void)
{
	ftrace_enabled = 1;
	return 0;
}
5352
core_initcall(ftrace_nodyn_init);
5353

5354
static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
5355
static inline void ftrace_startup_enable(int command) { }
5356
static inline void ftrace_startup_all(int command) { }
5357
/* Keep as macros so we do not need to define the commands */
5358 5359 5360 5361 5362 5363
# define ftrace_startup(ops, command)					\
	({								\
		int ___ret = __register_ftrace_function(ops);		\
		if (!___ret)						\
			(ops)->flags |= FTRACE_OPS_FL_ENABLED;		\
		___ret;							\
5364
	})
5365 5366 5367 5368 5369 5370 5371
# define ftrace_shutdown(ops, command)					\
	({								\
		int ___ret = __unregister_ftrace_function(ops);		\
		if (!___ret)						\
			(ops)->flags &= ~FTRACE_OPS_FL_ENABLED;		\
		___ret;							\
	})
5372

I
Ingo Molnar 已提交
5373 5374
# define ftrace_startup_sysctl()	do { } while (0)
# define ftrace_shutdown_sysctl()	do { } while (0)
5375 5376

static inline int
5377
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
5378 5379 5380 5381
{
	return 1;
}

5382 5383 5384 5385
static void ftrace_update_trampoline(struct ftrace_ops *ops)
{
}

5386 5387
#endif /* CONFIG_DYNAMIC_FTRACE */

5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410
__init void ftrace_init_global_array_ops(struct trace_array *tr)
{
	tr->ops = &global_ops;
	tr->ops->private = tr;
}

void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
{
	/* If we filter on pids, update to use the pid function */
	if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
		if (WARN_ON(tr->ops->func != ftrace_stub))
			printk("ftrace ops had %pS for function\n",
			       tr->ops->func);
	}
	tr->ops->func = func;
	tr->ops->private = tr;
}

void ftrace_reset_array_ops(struct trace_array *tr)
{
	tr->ops->func = ftrace_stub;
}

5411 5412
static inline void
__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
5413
		       struct ftrace_ops *ignored, struct pt_regs *regs)
5414
{
5415
	struct ftrace_ops *op;
5416
	int bit;
5417

5418 5419 5420
	bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
	if (bit < 0)
		return;
5421

5422 5423 5424 5425 5426
	/*
	 * Some of the ops may be dynamically allocated,
	 * they must be freed after a synchronize_sched().
	 */
	preempt_disable_notrace();
5427

5428
	do_for_each_ftrace_op(op, ftrace_ops_list) {
5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442
		/*
		 * Check the following for each ops before calling their func:
		 *  if RCU flag is set, then rcu_is_watching() must be true
		 *  if PER_CPU is set, then ftrace_function_local_disable()
		 *                          must be false
		 *  Otherwise test if the ip matches the ops filter
		 *
		 * If any of the above fails then the op->func() is not executed.
		 */
		if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
		    (!(op->flags & FTRACE_OPS_FL_PER_CPU) ||
		     !ftrace_function_local_disabled(op)) &&
		    ftrace_ops_test(op, ip, regs)) {
		    
5443 5444
			if (FTRACE_WARN_ON(!op->func)) {
				pr_warn("op=%p %pS\n", op, op);
5445 5446
				goto out;
			}
5447
			op->func(ip, parent_ip, op, regs);
5448
		}
5449
	} while_for_each_ftrace_op(op);
5450
out:
5451
	preempt_enable_notrace();
5452
	trace_clear_recursion(bit);
5453 5454
}

5455 5456 5457 5458 5459
/*
 * Some archs only support passing ip and parent_ip. Even though
 * the list function ignores the op parameter, we do not want any
 * C side effects, where a function is called without the caller
 * sending a third parameter.
5460 5461 5462
 * Archs are to support both the regs and ftrace_ops at the same time.
 * If they support ftrace_ops, it is assumed they support regs.
 * If call backs want to use regs, they must either check for regs
5463 5464
 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
5465
 * An architecture can pass partial regs with ftrace_ops and still
L
Li Bin 已提交
5466
 * set the ARCH_SUPPORTS_FTRACE_OPS.
5467 5468 5469
 */
#if ARCH_SUPPORTS_FTRACE_OPS
static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
5470
				 struct ftrace_ops *op, struct pt_regs *regs)
5471
{
5472
	__ftrace_ops_list_func(ip, parent_ip, NULL, regs);
5473 5474 5475 5476
}
#else
static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
{
5477
	__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
5478 5479 5480
}
#endif

5481 5482
/*
 * If there's only one function registered but it does not support
5483 5484
 * recursion, needs RCU protection and/or requires per cpu handling, then
 * this function will be called by the mcount trampoline.
5485
 */
5486
static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
5487 5488 5489 5490
				   struct ftrace_ops *op, struct pt_regs *regs)
{
	int bit;

5491 5492 5493
	if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching())
		return;

5494 5495 5496 5497
	bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
	if (bit < 0)
		return;

5498
	preempt_disable_notrace();
5499

5500 5501 5502 5503 5504 5505
	if (!(op->flags & FTRACE_OPS_FL_PER_CPU) ||
	    !ftrace_function_local_disabled(op)) {
		op->func(ip, parent_ip, op, regs);
	}

	preempt_enable_notrace();
5506 5507 5508
	trace_clear_recursion(bit);
}

5509 5510 5511 5512 5513 5514 5515
/**
 * ftrace_ops_get_func - get the function a trampoline should call
 * @ops: the ops to get the function for
 *
 * Normally the mcount trampoline will call the ops->func, but there
 * are times that it should not. For example, if the ops does not
 * have its own recursion protection, then it should call the
5516
 * ftrace_ops_assist_func() instead.
5517 5518 5519 5520 5521 5522
 *
 * Returns the function that the trampoline should call for @ops.
 */
ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
{
	/*
5523 5524
	 * If the function does not handle recursion, needs to be RCU safe,
	 * or does per cpu logic, then we need to call the assist handler.
5525
	 */
5526 5527 5528
	if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) ||
	    ops->flags & (FTRACE_OPS_FL_RCU | FTRACE_OPS_FL_PER_CPU))
		return ftrace_ops_assist_func;
5529 5530 5531 5532

	return ops->func;
}

5533 5534 5535
static void
ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
		    struct task_struct *prev, struct task_struct *next)
S
Steven Rostedt 已提交
5536
{
5537 5538
	struct trace_array *tr = data;
	struct trace_pid_list *pid_list;
S
Steven Rostedt 已提交
5539

5540
	pid_list = rcu_dereference_sched(tr->function_pids);
5541

5542 5543
	this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
		       trace_ignore_this_task(pid_list, next));
S
Steven Rostedt 已提交
5544 5545
}

5546
static void clear_ftrace_pids(struct trace_array *tr)
5547
{
5548 5549
	struct trace_pid_list *pid_list;
	int cpu;
5550

5551 5552 5553 5554
	pid_list = rcu_dereference_protected(tr->function_pids,
					     lockdep_is_held(&ftrace_lock));
	if (!pid_list)
		return;
5555

5556
	unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
5557

5558 5559
	for_each_possible_cpu(cpu)
		per_cpu_ptr(tr->trace_buffer.data, cpu)->ftrace_ignore_pid = false;
S
Steven Rostedt 已提交
5560

5561
	rcu_assign_pointer(tr->function_pids, NULL);
S
Steven Rostedt 已提交
5562

5563 5564
	/* Wait till all users are no longer using pid filtering */
	synchronize_sched();
5565

5566
	trace_free_pid_list(pid_list);
5567 5568
}

5569
static void ftrace_pid_reset(struct trace_array *tr)
5570
{
5571
	mutex_lock(&ftrace_lock);
5572
	clear_ftrace_pids(tr);
S
Steven Rostedt 已提交
5573

5574
	ftrace_update_pid_func();
5575
	ftrace_startup_all(0);
5576 5577 5578 5579

	mutex_unlock(&ftrace_lock);
}

5580 5581
/* Greater than any max PID */
#define FTRACE_NO_PIDS		(void *)(PID_MAX_LIMIT + 1)
5582

5583
static void *fpid_start(struct seq_file *m, loff_t *pos)
5584
	__acquires(RCU)
5585
{
5586 5587 5588
	struct trace_pid_list *pid_list;
	struct trace_array *tr = m->private;

5589
	mutex_lock(&ftrace_lock);
5590 5591 5592
	rcu_read_lock_sched();

	pid_list = rcu_dereference_sched(tr->function_pids);
5593

5594 5595
	if (!pid_list)
		return !(*pos) ? FTRACE_NO_PIDS : NULL;
5596

5597
	return trace_pid_start(pid_list, pos);
5598 5599 5600 5601
}

static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
{
5602 5603 5604 5605
	struct trace_array *tr = m->private;
	struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);

	if (v == FTRACE_NO_PIDS)
5606 5607
		return NULL;

5608
	return trace_pid_next(pid_list, v, pos);
5609 5610 5611
}

static void fpid_stop(struct seq_file *m, void *p)
5612
	__releases(RCU)
5613
{
5614
	rcu_read_unlock_sched();
5615 5616 5617 5618 5619
	mutex_unlock(&ftrace_lock);
}

static int fpid_show(struct seq_file *m, void *v)
{
5620
	if (v == FTRACE_NO_PIDS) {
5621
		seq_puts(m, "no pid\n");
5622 5623 5624
		return 0;
	}

5625
	return trace_pid_show(m, v);
5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637
}

static const struct seq_operations ftrace_pid_sops = {
	.start = fpid_start,
	.next = fpid_next,
	.stop = fpid_stop,
	.show = fpid_show,
};

static int
ftrace_pid_open(struct inode *inode, struct file *file)
{
5638 5639
	struct trace_array *tr = inode->i_private;
	struct seq_file *m;
5640 5641
	int ret = 0;

5642 5643 5644
	if (trace_array_get(tr) < 0)
		return -ENODEV;

5645 5646
	if ((file->f_mode & FMODE_WRITE) &&
	    (file->f_flags & O_TRUNC))
5647
		ftrace_pid_reset(tr);
5648

5649 5650 5651 5652 5653 5654 5655 5656
	ret = seq_open(file, &ftrace_pid_sops);
	if (ret < 0) {
		trace_array_put(tr);
	} else {
		m = file->private_data;
		/* copy tr over to seq ops */
		m->private = tr;
	}
5657 5658 5659 5660

	return ret;
}

5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676
static void ignore_task_cpu(void *data)
{
	struct trace_array *tr = data;
	struct trace_pid_list *pid_list;

	/*
	 * This function is called by on_each_cpu() while the
	 * event_mutex is held.
	 */
	pid_list = rcu_dereference_protected(tr->function_pids,
					     mutex_is_locked(&ftrace_lock));

	this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
		       trace_ignore_this_task(pid_list, current));
}

5677 5678 5679 5680
static ssize_t
ftrace_pid_write(struct file *filp, const char __user *ubuf,
		   size_t cnt, loff_t *ppos)
{
5681 5682 5683 5684 5685
	struct seq_file *m = filp->private_data;
	struct trace_array *tr = m->private;
	struct trace_pid_list *filtered_pids = NULL;
	struct trace_pid_list *pid_list;
	ssize_t ret;
5686

5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697
	if (!cnt)
		return 0;

	mutex_lock(&ftrace_lock);

	filtered_pids = rcu_dereference_protected(tr->function_pids,
					     lockdep_is_held(&ftrace_lock));

	ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
	if (ret < 0)
		goto out;
5698

5699
	rcu_assign_pointer(tr->function_pids, pid_list);
5700

5701 5702 5703 5704 5705 5706 5707
	if (filtered_pids) {
		synchronize_sched();
		trace_free_pid_list(filtered_pids);
	} else if (pid_list) {
		/* Register a probe to set whether to ignore the tracing of a task */
		register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
	}
5708

5709
	/*
5710 5711 5712
	 * Ignoring of pids is done at task switch. But we have to
	 * check for those tasks that are currently running.
	 * Always do this in case a pid was appended or removed.
5713
	 */
5714
	on_each_cpu(ignore_task_cpu, tr, 1);
5715

5716 5717 5718 5719
	ftrace_update_pid_func();
	ftrace_startup_all(0);
 out:
	mutex_unlock(&ftrace_lock);
5720

5721 5722
	if (ret > 0)
		*ppos += ret;
5723

5724
	return ret;
5725
}
5726

5727 5728 5729
static int
ftrace_pid_release(struct inode *inode, struct file *file)
{
5730
	struct trace_array *tr = inode->i_private;
5731

5732 5733 5734
	trace_array_put(tr);

	return seq_release(inode, file);
5735 5736
}

5737
static const struct file_operations ftrace_pid_fops = {
5738 5739 5740
	.open		= ftrace_pid_open,
	.write		= ftrace_pid_write,
	.read		= seq_read,
5741
	.llseek		= tracing_lseek,
5742
	.release	= ftrace_pid_release,
5743 5744
};

5745
void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
5746
{
5747
	trace_create_file("set_ftrace_pid", 0644, d_tracer,
5748
			    tr, &ftrace_pid_fops);
5749 5750
}

5751 5752 5753 5754 5755 5756 5757 5758 5759 5760
void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
					 struct dentry *d_tracer)
{
	/* Only the top level directory has the dyn_tracefs and profile */
	WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));

	ftrace_init_dyn_tracefs(d_tracer);
	ftrace_profile_tracefs(d_tracer);
}

S
Steven Rostedt 已提交
5761
/**
5762
 * ftrace_kill - kill ftrace
S
Steven Rostedt 已提交
5763 5764 5765 5766 5767
 *
 * This function should be used by panic code. It stops ftrace
 * but in a not so nice way. If you need to simply kill ftrace
 * from a non-atomic section, use ftrace_kill.
 */
5768
void ftrace_kill(void)
S
Steven Rostedt 已提交
5769 5770 5771 5772 5773 5774
{
	ftrace_disabled = 1;
	ftrace_enabled = 0;
	clear_ftrace_function();
}

5775 5776 5777 5778 5779 5780 5781 5782
/**
 * Test if ftrace is dead or not.
 */
int ftrace_is_dead(void)
{
	return ftrace_disabled;
}

5783
/**
5784 5785
 * register_ftrace_function - register a function for profiling
 * @ops - ops structure that holds the function for profiling.
5786
 *
5787 5788 5789 5790 5791 5792
 * Register a function to be called by all functions in the
 * kernel.
 *
 * Note: @ops->func and all the functions it calls must be labeled
 *       with "notrace", otherwise it will go into a
 *       recursive loop.
5793
 */
5794
int register_ftrace_function(struct ftrace_ops *ops)
5795
{
5796
	int ret = -1;
5797

5798 5799
	ftrace_ops_init(ops);

S
Steven Rostedt 已提交
5800
	mutex_lock(&ftrace_lock);
5801

5802
	ret = ftrace_startup(ops, 0);
5803

S
Steven Rostedt 已提交
5804
	mutex_unlock(&ftrace_lock);
5805

5806
	return ret;
5807
}
5808
EXPORT_SYMBOL_GPL(register_ftrace_function);
5809 5810

/**
5811
 * unregister_ftrace_function - unregister a function for profiling.
5812 5813 5814 5815 5816 5817 5818 5819
 * @ops - ops structure that holds the function to unregister
 *
 * Unregister a function that was added to be called by ftrace profiling.
 */
int unregister_ftrace_function(struct ftrace_ops *ops)
{
	int ret;

S
Steven Rostedt 已提交
5820
	mutex_lock(&ftrace_lock);
5821
	ret = ftrace_shutdown(ops, 0);
S
Steven Rostedt 已提交
5822
	mutex_unlock(&ftrace_lock);
5823 5824 5825

	return ret;
}
5826
EXPORT_SYMBOL_GPL(unregister_ftrace_function);
5827

I
Ingo Molnar 已提交
5828
int
5829
ftrace_enable_sysctl(struct ctl_table *table, int write,
5830
		     void __user *buffer, size_t *lenp,
5831 5832
		     loff_t *ppos)
{
5833
	int ret = -ENODEV;
5834

S
Steven Rostedt 已提交
5835
	mutex_lock(&ftrace_lock);
5836

5837 5838 5839 5840
	if (unlikely(ftrace_disabled))
		goto out;

	ret = proc_dointvec(table, write, buffer, lenp, ppos);
5841

5842
	if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
5843 5844
		goto out;

5845
	last_ftrace_enabled = !!ftrace_enabled;
5846 5847 5848 5849

	if (ftrace_enabled) {

		/* we are starting ftrace again */
5850 5851
		if (ftrace_ops_list != &ftrace_list_end)
			update_ftrace_function();
5852

5853 5854
		ftrace_startup_sysctl();

5855 5856 5857 5858 5859 5860 5861 5862
	} else {
		/* stopping ftrace calls (just send to ftrace_stub) */
		ftrace_trace_function = ftrace_stub;

		ftrace_shutdown_sysctl();
	}

 out:
S
Steven Rostedt 已提交
5863
	mutex_unlock(&ftrace_lock);
5864
	return ret;
5865
}
5866

5867
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5868

5869 5870 5871 5872
static struct ftrace_ops graph_ops = {
	.func			= ftrace_stub,
	.flags			= FTRACE_OPS_FL_RECURSION_SAFE |
				   FTRACE_OPS_FL_INITIALIZED |
5873
				   FTRACE_OPS_FL_PID |
5874 5875 5876
				   FTRACE_OPS_FL_STUB,
#ifdef FTRACE_GRAPH_TRAMP_ADDR
	.trampoline		= FTRACE_GRAPH_TRAMP_ADDR,
5877
	/* trampoline_size is only needed for dynamically allocated tramps */
5878 5879 5880 5881
#endif
	ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
};

5882 5883 5884 5885 5886 5887 5888 5889 5890 5891
void ftrace_graph_sleep_time_control(bool enable)
{
	fgraph_sleep_time = enable;
}

void ftrace_graph_graph_time_control(bool enable)
{
	fgraph_graph_time = enable;
}

5892 5893 5894 5895 5896
int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
{
	return 0;
}

5897 5898 5899
/* The callbacks that hook a function */
trace_func_graph_ret_t ftrace_graph_return =
			(trace_func_graph_ret_t)ftrace_stub;
5900
trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
5901
static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922

/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
{
	int i;
	int ret = 0;
	int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
	struct task_struct *g, *t;

	for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
		ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
					* sizeof(struct ftrace_ret_stack),
					GFP_KERNEL);
		if (!ret_stack_list[i]) {
			start = 0;
			end = i;
			ret = -ENOMEM;
			goto free;
		}
	}

5923
	read_lock(&tasklist_lock);
5924 5925 5926 5927 5928 5929 5930
	do_each_thread(g, t) {
		if (start == end) {
			ret = -EAGAIN;
			goto unlock;
		}

		if (t->ret_stack == NULL) {
5931
			atomic_set(&t->tracing_graph_pause, 0);
5932
			atomic_set(&t->trace_overrun, 0);
5933 5934 5935 5936
			t->curr_ret_stack = -1;
			/* Make sure the tasks see the -1 first: */
			smp_wmb();
			t->ret_stack = ret_stack_list[start++];
5937 5938 5939 5940
		}
	} while_each_thread(g, t);

unlock:
5941
	read_unlock(&tasklist_lock);
5942 5943 5944 5945 5946 5947
free:
	for (i = start; i < end; i++)
		kfree(ret_stack_list[i]);
	return ret;
}

5948
static void
5949
ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
5950
			struct task_struct *prev, struct task_struct *next)
5951 5952 5953 5954
{
	unsigned long long timestamp;
	int index;

5955 5956 5957 5958
	/*
	 * Does the user want to count the time a function was asleep.
	 * If so, do not update the time stamps.
	 */
5959
	if (fgraph_sleep_time)
5960 5961
		return;

5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979
	timestamp = trace_clock_local();

	prev->ftrace_timestamp = timestamp;

	/* only process tasks that we timestamped */
	if (!next->ftrace_timestamp)
		return;

	/*
	 * Update all the counters in next to make up for the
	 * time next was sleeping.
	 */
	timestamp -= next->ftrace_timestamp;

	for (index = next->curr_ret_stack; index >= 0; index--)
		next->ret_stack[index].calltime += timestamp;
}

5980
/* Allocate a return stack for each task */
5981
static int start_graph_tracing(void)
5982 5983
{
	struct ftrace_ret_stack **ret_stack_list;
5984
	int ret, cpu;
5985 5986 5987 5988 5989 5990 5991 5992

	ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
				sizeof(struct ftrace_ret_stack *),
				GFP_KERNEL);

	if (!ret_stack_list)
		return -ENOMEM;

5993
	/* The cpu_boot init_task->ret_stack will never be freed */
5994 5995
	for_each_online_cpu(cpu) {
		if (!idle_task(cpu)->ret_stack)
5996
			ftrace_graph_init_idle_task(idle_task(cpu), cpu);
5997
	}
5998

5999 6000 6001 6002
	do {
		ret = alloc_retstack_tasklist(ret_stack_list);
	} while (ret == -EAGAIN);

6003
	if (!ret) {
6004
		ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
6005 6006 6007 6008 6009
		if (ret)
			pr_info("ftrace_graph: Couldn't activate tracepoint"
				" probe to kernel_sched_switch\n");
	}

6010 6011 6012 6013
	kfree(ret_stack_list);
	return ret;
}

6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034
/*
 * Hibernation protection.
 * The state of the current task is too much unstable during
 * suspend/restore to disk. We want to protect against that.
 */
static int
ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
							void *unused)
{
	switch (state) {
	case PM_HIBERNATION_PREPARE:
		pause_graph_tracing();
		break;

	case PM_POST_HIBERNATION:
		unpause_graph_tracing();
		break;
	}
	return NOTIFY_DONE;
}

6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050
static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
{
	if (!ftrace_ops_test(&global_ops, trace->func, NULL))
		return 0;
	return __ftrace_graph_entry(trace);
}

/*
 * The function graph tracer should only trace the functions defined
 * by set_ftrace_filter and set_ftrace_notrace. If another function
 * tracer ops is registered, the graph tracer requires testing the
 * function against the global ops, and not just trace any function
 * that any ftrace_ops registered.
 */
static void update_function_graph_func(void)
{
6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069
	struct ftrace_ops *op;
	bool do_test = false;

	/*
	 * The graph and global ops share the same set of functions
	 * to test. If any other ops is on the list, then
	 * the graph tracing needs to test if its the function
	 * it should call.
	 */
	do_for_each_ftrace_op(op, ftrace_ops_list) {
		if (op != &global_ops && op != &graph_ops &&
		    op != &ftrace_list_end) {
			do_test = true;
			/* in double loop, break out with goto */
			goto out;
		}
	} while_for_each_ftrace_op(op);
 out:
	if (do_test)
6070
		ftrace_graph_entry = ftrace_graph_entry_test;
6071 6072
	else
		ftrace_graph_entry = __ftrace_graph_entry;
6073 6074
}

6075 6076 6077 6078
static struct notifier_block ftrace_suspend_notifier = {
	.notifier_call = ftrace_suspend_notifier_call,
};

6079 6080
int register_ftrace_graph(trace_func_graph_ret_t retfunc,
			trace_func_graph_ent_t entryfunc)
6081
{
6082 6083
	int ret = 0;

S
Steven Rostedt 已提交
6084
	mutex_lock(&ftrace_lock);
6085

6086
	/* we currently allow only one tracer registered at a time */
6087
	if (ftrace_graph_active) {
6088 6089 6090 6091
		ret = -EBUSY;
		goto out;
	}

6092 6093
	register_pm_notifier(&ftrace_suspend_notifier);

6094
	ftrace_graph_active++;
6095
	ret = start_graph_tracing();
6096
	if (ret) {
6097
		ftrace_graph_active--;
6098 6099
		goto out;
	}
6100

6101
	ftrace_graph_return = retfunc;
6102 6103 6104 6105 6106 6107 6108 6109 6110 6111

	/*
	 * Update the indirect function to the entryfunc, and the
	 * function that gets called to the entry_test first. Then
	 * call the update fgraph entry function to determine if
	 * the entryfunc should be called directly or not.
	 */
	__ftrace_graph_entry = entryfunc;
	ftrace_graph_entry = ftrace_graph_entry_test;
	update_function_graph_func();
6112

6113
	ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
6114
out:
S
Steven Rostedt 已提交
6115
	mutex_unlock(&ftrace_lock);
6116
	return ret;
6117 6118
}

6119
void unregister_ftrace_graph(void)
6120
{
S
Steven Rostedt 已提交
6121
	mutex_lock(&ftrace_lock);
6122

6123
	if (unlikely(!ftrace_graph_active))
6124 6125
		goto out;

6126
	ftrace_graph_active--;
6127
	ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
6128
	ftrace_graph_entry = ftrace_graph_entry_stub;
6129
	__ftrace_graph_entry = ftrace_graph_entry_stub;
6130
	ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
6131
	unregister_pm_notifier(&ftrace_suspend_notifier);
6132
	unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
6133

6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144
#ifdef CONFIG_DYNAMIC_FTRACE
	/*
	 * Function graph does not allocate the trampoline, but
	 * other global_ops do. We need to reset the ALLOC_TRAMP flag
	 * if one was used.
	 */
	global_ops.trampoline = save_global_trampoline;
	if (save_global_flags & FTRACE_OPS_FL_ALLOC_TRAMP)
		global_ops.flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
#endif

6145
 out:
S
Steven Rostedt 已提交
6146
	mutex_unlock(&ftrace_lock);
6147
}
6148

6149 6150 6151 6152 6153 6154 6155 6156
static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);

static void
graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
{
	atomic_set(&t->tracing_graph_pause, 0);
	atomic_set(&t->trace_overrun, 0);
	t->ftrace_timestamp = 0;
L
Lucas De Marchi 已提交
6157
	/* make curr_ret_stack visible before we add the ret_stack */
6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191
	smp_wmb();
	t->ret_stack = ret_stack;
}

/*
 * Allocate a return stack for the idle task. May be the first
 * time through, or it may be done by CPU hotplug online.
 */
void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
{
	t->curr_ret_stack = -1;
	/*
	 * The idle task has no parent, it either has its own
	 * stack or no stack at all.
	 */
	if (t->ret_stack)
		WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));

	if (ftrace_graph_active) {
		struct ftrace_ret_stack *ret_stack;

		ret_stack = per_cpu(idle_ret_stack, cpu);
		if (!ret_stack) {
			ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
					    * sizeof(struct ftrace_ret_stack),
					    GFP_KERNEL);
			if (!ret_stack)
				return;
			per_cpu(idle_ret_stack, cpu) = ret_stack;
		}
		graph_init_task(t, ret_stack);
	}
}

6192
/* Allocate a return stack for newly created task */
6193
void ftrace_graph_init_task(struct task_struct *t)
6194
{
6195 6196
	/* Make sure we do not use the parent ret_stack */
	t->ret_stack = NULL;
6197
	t->curr_ret_stack = -1;
6198

6199
	if (ftrace_graph_active) {
6200 6201 6202
		struct ftrace_ret_stack *ret_stack;

		ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
6203 6204
				* sizeof(struct ftrace_ret_stack),
				GFP_KERNEL);
6205
		if (!ret_stack)
6206
			return;
6207
		graph_init_task(t, ret_stack);
6208
	}
6209 6210
}

6211
void ftrace_graph_exit_task(struct task_struct *t)
6212
{
6213 6214
	struct ftrace_ret_stack	*ret_stack = t->ret_stack;

6215
	t->ret_stack = NULL;
6216 6217 6218 6219
	/* NULL must become visible to IRQs before we free it: */
	barrier();

	kfree(ret_stack);
6220
}
6221
#endif