trace_kprobe.c 35.0 KB
Newer Older
1
/*
2
 * Kprobes-based tracing events
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *
 * Created by Masami Hiramatsu <mhiramat@redhat.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */

#include <linux/module.h>
#include <linux/uaccess.h>

23
#include "trace_probe.h"
24

25
#define KPROBE_EVENT_SYSTEM "kprobes"
26

27
/**
28
 * Kprobe event core functions
29 30 31
 */
struct trace_probe {
	struct list_head	list;
32
	struct kretprobe	rp;	/* Use rp.kp for kprobe use */
33
	unsigned long 		nhit;
34
	unsigned int		flags;	/* For TP_FLAG_* */
35
	const char		*symbol;	/* symbol name */
36
	struct ftrace_event_class	class;
37
	struct ftrace_event_call	call;
38
	struct ftrace_event_file	**files;
39
	ssize_t			size;		/* trace entry size */
40
	unsigned int		nr_args;
41
	struct probe_arg	args[];
42 43
};

44 45
#define SIZEOF_TRACE_PROBE(n)			\
	(offsetof(struct trace_probe, args) +	\
46
	(sizeof(struct probe_arg) * (n)))
47

48

49
static __kprobes bool trace_probe_is_return(struct trace_probe *tp)
50
{
51
	return tp->rp.handler != NULL;
52 53
}

54
static __kprobes const char *trace_probe_symbol(struct trace_probe *tp)
55 56 57 58
{
	return tp->symbol ? tp->symbol : "unknown";
}

59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
static __kprobes unsigned long trace_probe_offset(struct trace_probe *tp)
{
	return tp->rp.kp.offset;
}

static __kprobes bool trace_probe_is_enabled(struct trace_probe *tp)
{
	return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE));
}

static __kprobes bool trace_probe_is_registered(struct trace_probe *tp)
{
	return !!(tp->flags & TP_FLAG_REGISTERED);
}

static __kprobes bool trace_probe_has_gone(struct trace_probe *tp)
{
	return !!(kprobe_gone(&tp->rp.kp));
}

static __kprobes bool trace_probe_within_module(struct trace_probe *tp,
						struct module *mod)
{
	int len = strlen(mod->name);
	const char *name = trace_probe_symbol(tp);
	return strncmp(mod->name, name, len) == 0 && name[len] == ':';
}

static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
{
	return !!strchr(trace_probe_symbol(tp), ':');
}

92 93 94 95 96 97
static int register_probe_event(struct trace_probe *tp);
static void unregister_probe_event(struct trace_probe *tp);

static DEFINE_MUTEX(probe_lock);
static LIST_HEAD(probe_list);

98 99 100 101
static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
static int kretprobe_dispatcher(struct kretprobe_instance *ri,
				struct pt_regs *regs);

102 103 104
/*
 * Allocate new trace_probe and initialize it (including kprobes).
 */
105 106
static struct trace_probe *alloc_trace_probe(const char *group,
					     const char *event,
107 108 109
					     void *addr,
					     const char *symbol,
					     unsigned long offs,
110
					     int nargs, bool is_return)
111 112
{
	struct trace_probe *tp;
113
	int ret = -ENOMEM;
114

115
	tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL);
116
	if (!tp)
117
		return ERR_PTR(ret);
118 119 120 121 122

	if (symbol) {
		tp->symbol = kstrdup(symbol, GFP_KERNEL);
		if (!tp->symbol)
			goto error;
123 124 125 126 127 128
		tp->rp.kp.symbol_name = tp->symbol;
		tp->rp.kp.offset = offs;
	} else
		tp->rp.kp.addr = addr;

	if (is_return)
129
		tp->rp.handler = kretprobe_dispatcher;
130
	else
131
		tp->rp.kp.pre_handler = kprobe_dispatcher;
132

133
	if (!event || !is_good_name(event)) {
134
		ret = -EINVAL;
135
		goto error;
136 137
	}

138
	tp->call.class = &tp->class;
139 140 141
	tp->call.name = kstrdup(event, GFP_KERNEL);
	if (!tp->call.name)
		goto error;
142

143
	if (!group || !is_good_name(group)) {
144
		ret = -EINVAL;
145
		goto error;
146 147
	}

148 149
	tp->class.system = kstrdup(group, GFP_KERNEL);
	if (!tp->class.system)
150 151
		goto error;

152 153 154
	INIT_LIST_HEAD(&tp->list);
	return tp;
error:
155
	kfree(tp->call.name);
156 157
	kfree(tp->symbol);
	kfree(tp);
158
	return ERR_PTR(ret);
159 160 161 162 163 164 165
}

static void free_trace_probe(struct trace_probe *tp)
{
	int i;

	for (i = 0; i < tp->nr_args; i++)
166
		traceprobe_free_probe_arg(&tp->args[i]);
167

168
	kfree(tp->call.class->system);
169 170 171 172 173
	kfree(tp->call.name);
	kfree(tp->symbol);
	kfree(tp);
}

174
static struct trace_probe *find_trace_probe(const char *event,
175
					    const char *group)
176 177 178 179
{
	struct trace_probe *tp;

	list_for_each_entry(tp, &probe_list, list)
180
		if (strcmp(tp->call.name, event) == 0 &&
181
		    strcmp(tp->call.class->system, group) == 0)
182 183 184 185
			return tp;
	return NULL;
}

186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
static int trace_probe_nr_files(struct trace_probe *tp)
{
	struct ftrace_event_file **file = tp->files;
	int ret = 0;

	if (file)
		while (*(file++))
			ret++;

	return ret;
}

static DEFINE_MUTEX(probe_enable_lock);

/*
 * Enable trace_probe
 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
 */
static int
enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
206 207 208
{
	int ret = 0;

209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
	mutex_lock(&probe_enable_lock);

	if (file) {
		struct ftrace_event_file **new, **old = tp->files;
		int n = trace_probe_nr_files(tp);

		/* 1 is for new one and 1 is for stopper */
		new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *),
			      GFP_KERNEL);
		if (!new) {
			ret = -ENOMEM;
			goto out_unlock;
		}
		memcpy(new, old, n * sizeof(struct ftrace_event_file *));
		new[n] = file;
		/* The last one keeps a NULL */

		rcu_assign_pointer(tp->files, new);
		tp->flags |= TP_FLAG_TRACE;

		if (old) {
			/* Make sure the probe is done with old files */
			synchronize_sched();
			kfree(old);
		}
	} else
		tp->flags |= TP_FLAG_PROFILE;

237 238
	if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) &&
	    !trace_probe_has_gone(tp)) {
239 240 241 242 243 244
		if (trace_probe_is_return(tp))
			ret = enable_kretprobe(&tp->rp);
		else
			ret = enable_kprobe(&tp->rp.kp);
	}

245 246 247
 out_unlock:
	mutex_unlock(&probe_enable_lock);

248 249 250
	return ret;
}

251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
static int
trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file)
{
	int i;

	if (tp->files) {
		for (i = 0; tp->files[i]; i++)
			if (tp->files[i] == file)
				return i;
	}

	return -1;
}

/*
 * Disable trace_probe
 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
 */
static int
disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
271
{
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
	int ret = 0;

	mutex_lock(&probe_enable_lock);

	if (file) {
		struct ftrace_event_file **new, **old = tp->files;
		int n = trace_probe_nr_files(tp);
		int i, j;

		if (n == 0 || trace_probe_file_index(tp, file) < 0) {
			ret = -EINVAL;
			goto out_unlock;
		}

		if (n == 1) {	/* Remove the last file */
			tp->flags &= ~TP_FLAG_TRACE;
			new = NULL;
		} else {
			new = kzalloc(n * sizeof(struct ftrace_event_file *),
				      GFP_KERNEL);
			if (!new) {
				ret = -ENOMEM;
				goto out_unlock;
			}

			/* This copy & check loop copies the NULL stopper too */
			for (i = 0, j = 0; j < n && i < n + 1; i++)
				if (old[i] != file)
					new[j++] = old[i];
		}

		rcu_assign_pointer(tp->files, new);

		/* Make sure the probe is done with old files */
		synchronize_sched();
		kfree(old);
	} else
		tp->flags &= ~TP_FLAG_PROFILE;

311
	if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) {
312 313 314 315 316
		if (trace_probe_is_return(tp))
			disable_kretprobe(&tp->rp);
		else
			disable_kprobe(&tp->rp.kp);
	}
317 318 319 320 321

 out_unlock:
	mutex_unlock(&probe_enable_lock);

	return ret;
322 323
}

324 325
/* Internal register function - just handle k*probes and flags */
static int __register_trace_probe(struct trace_probe *tp)
326
{
327
	int i, ret;
328 329 330 331

	if (trace_probe_is_registered(tp))
		return -EINVAL;

332
	for (i = 0; i < tp->nr_args; i++)
333
		traceprobe_update_arg(&tp->args[i]);
334

335 336 337 338 339 340
	/* Set/clear disabled flag according to tp->flag */
	if (trace_probe_is_enabled(tp))
		tp->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
	else
		tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;

341
	if (trace_probe_is_return(tp))
342
		ret = register_kretprobe(&tp->rp);
343
	else
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
		ret = register_kprobe(&tp->rp.kp);

	if (ret == 0)
		tp->flags |= TP_FLAG_REGISTERED;
	else {
		pr_warning("Could not insert probe at %s+%lu: %d\n",
			   trace_probe_symbol(tp), trace_probe_offset(tp), ret);
		if (ret == -ENOENT && trace_probe_is_on_module(tp)) {
			pr_warning("This probe might be able to register after"
				   "target module is loaded. Continue.\n");
			ret = 0;
		} else if (ret == -EILSEQ) {
			pr_warning("Probing address(0x%p) is not an "
				   "instruction boundary.\n",
				   tp->rp.kp.addr);
			ret = -EINVAL;
		}
	}

	return ret;
}

/* Internal unregister function - just handle k*probes and flags */
static void __unregister_trace_probe(struct trace_probe *tp)
{
	if (trace_probe_is_registered(tp)) {
		if (trace_probe_is_return(tp))
			unregister_kretprobe(&tp->rp);
		else
			unregister_kprobe(&tp->rp.kp);
		tp->flags &= ~TP_FLAG_REGISTERED;
		/* Cleanup kprobe for reuse */
		if (tp->rp.kp.symbol_name)
			tp->rp.kp.addr = NULL;
	}
}

/* Unregister a trace_probe and probe_event: call with locking probe_lock */
382
static int unregister_trace_probe(struct trace_probe *tp)
383
{
384 385 386 387
	/* Enabled event can not be unregistered */
	if (trace_probe_is_enabled(tp))
		return -EBUSY;

388
	__unregister_trace_probe(tp);
389
	list_del(&tp->list);
390
	unregister_probe_event(tp);
391 392

	return 0;
393 394 395 396 397 398 399 400 401 402
}

/* Register a trace_probe and probe_event */
static int register_trace_probe(struct trace_probe *tp)
{
	struct trace_probe *old_tp;
	int ret;

	mutex_lock(&probe_lock);

403
	/* Delete old (same name) event if exist */
404
	old_tp = find_trace_probe(tp->call.name, tp->call.class->system);
405
	if (old_tp) {
406 407 408
		ret = unregister_trace_probe(old_tp);
		if (ret < 0)
			goto end;
409 410
		free_trace_probe(old_tp);
	}
411 412

	/* Register new event */
413 414
	ret = register_probe_event(tp);
	if (ret) {
P
Paul Bolle 已提交
415
		pr_warning("Failed to register probe event(%d)\n", ret);
416 417 418
		goto end;
	}

419 420 421
	/* Register k*probe */
	ret = __register_trace_probe(tp);
	if (ret < 0)
422
		unregister_probe_event(tp);
423
	else
424
		list_add_tail(&tp->list, &probe_list);
425

426 427 428 429 430
end:
	mutex_unlock(&probe_lock);
	return ret;
}

431 432 433 434 435 436 437 438 439 440 441 442 443 444 445
/* Module notifier call back, checking event on the module */
static int trace_probe_module_callback(struct notifier_block *nb,
				       unsigned long val, void *data)
{
	struct module *mod = data;
	struct trace_probe *tp;
	int ret;

	if (val != MODULE_STATE_COMING)
		return NOTIFY_DONE;

	/* Update probes on coming module */
	mutex_lock(&probe_lock);
	list_for_each_entry(tp, &probe_list, list) {
		if (trace_probe_within_module(tp, mod)) {
446
			/* Don't need to check busy - this should have gone. */
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
			__unregister_trace_probe(tp);
			ret = __register_trace_probe(tp);
			if (ret)
				pr_warning("Failed to re-register probe %s on"
					   "%s: %d\n",
					   tp->call.name, mod->name, ret);
		}
	}
	mutex_unlock(&probe_lock);

	return NOTIFY_DONE;
}

static struct notifier_block trace_probe_module_nb = {
	.notifier_call = trace_probe_module_callback,
	.priority = 1	/* Invoked after kprobe module callback */
};

465 466 467 468
static int create_trace_probe(int argc, char **argv)
{
	/*
	 * Argument syntax:
469 470
	 *  - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
	 *  - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
471
	 * Fetch args:
472 473 474
	 *  $retval	: fetch return value
	 *  $stack	: fetch stack address
	 *  $stackN	: fetch Nth of stack (N:0-)
475 476 477
	 *  @ADDR	: fetch memory at ADDR (ADDR should be in kernel)
	 *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
	 *  %REG	: fetch register REG
478
	 * Dereferencing memory fetch:
479
	 *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
480 481
	 * Alias name of args:
	 *  NAME=FETCHARG : set NAME as alias of FETCHARG.
482 483
	 * Type of args:
	 *  FETCHARG:TYPE : use TYPE instead of unsigned long.
484 485 486
	 */
	struct trace_probe *tp;
	int i, ret = 0;
487
	bool is_return = false, is_delete = false;
488
	char *symbol = NULL, *event = NULL, *group = NULL;
489
	char *arg;
490
	unsigned long offset = 0;
491
	void *addr = NULL;
492
	char buf[MAX_EVENT_NAME_LEN];
493

494
	/* argc must be >= 1 */
495
	if (argv[0][0] == 'p')
496
		is_return = false;
497
	else if (argv[0][0] == 'r')
498
		is_return = true;
499
	else if (argv[0][0] == '-')
500
		is_delete = true;
501
	else {
502 503
		pr_info("Probe definition must be started with 'p', 'r' or"
			" '-'.\n");
504
		return -EINVAL;
505
	}
506 507 508

	if (argv[0][1] == ':') {
		event = &argv[0][2];
509 510 511 512 513
		if (strchr(event, '/')) {
			group = event;
			event = strchr(group, '/') + 1;
			event[-1] = '\0';
			if (strlen(group) == 0) {
514
				pr_info("Group name is not specified\n");
515 516 517
				return -EINVAL;
			}
		}
518
		if (strlen(event) == 0) {
519
			pr_info("Event name is not specified\n");
520 521 522
			return -EINVAL;
		}
	}
523 524
	if (!group)
		group = KPROBE_EVENT_SYSTEM;
525

526 527 528 529 530
	if (is_delete) {
		if (!event) {
			pr_info("Delete command needs an event name.\n");
			return -EINVAL;
		}
531
		mutex_lock(&probe_lock);
532
		tp = find_trace_probe(event, group);
533
		if (!tp) {
534
			mutex_unlock(&probe_lock);
535 536 537 538
			pr_info("Event %s/%s doesn't exist.\n", group, event);
			return -ENOENT;
		}
		/* delete an event */
539 540 541
		ret = unregister_trace_probe(tp);
		if (ret == 0)
			free_trace_probe(tp);
542
		mutex_unlock(&probe_lock);
543
		return ret;
544 545 546 547 548 549
	}

	if (argc < 2) {
		pr_info("Probe point is not specified.\n");
		return -EINVAL;
	}
550
	if (isdigit(argv[1][0])) {
551 552
		if (is_return) {
			pr_info("Return probe point must be a symbol.\n");
553
			return -EINVAL;
554
		}
555
		/* an address specified */
556
		ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
557 558
		if (ret) {
			pr_info("Failed to parse address.\n");
559
			return ret;
560
		}
561 562 563 564
	} else {
		/* a symbol specified */
		symbol = argv[1];
		/* TODO: support .init module functions */
565
		ret = traceprobe_split_symbol_offset(symbol, &offset);
566 567
		if (ret) {
			pr_info("Failed to parse symbol.\n");
568
			return ret;
569 570 571
		}
		if (offset && is_return) {
			pr_info("Return probe must be used without offset.\n");
572
			return -EINVAL;
573
		}
574
	}
575
	argc -= 2; argv += 2;
576 577

	/* setup a probe */
578 579 580
	if (!event) {
		/* Make a new event name */
		if (symbol)
581
			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
582 583
				 is_return ? 'r' : 'p', symbol, offset);
		else
584
			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
585
				 is_return ? 'r' : 'p', addr);
586 587
		event = buf;
	}
588 589
	tp = alloc_trace_probe(group, event, addr, symbol, offset, argc,
			       is_return);
590 591 592
	if (IS_ERR(tp)) {
		pr_info("Failed to allocate trace_probe.(%d)\n",
			(int)PTR_ERR(tp));
593
		return PTR_ERR(tp);
594
	}
595 596

	/* parse arguments */
597 598
	ret = 0;
	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
599 600 601
		/* Increment count for freeing args in error case */
		tp->nr_args++;

602 603
		/* Parse argument name */
		arg = strchr(argv[i], '=');
604
		if (arg) {
605
			*arg++ = '\0';
606 607
			tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
		} else {
608
			arg = argv[i];
609 610 611 612
			/* If argument name is omitted, set "argN" */
			snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
			tp->args[i].name = kstrdup(buf, GFP_KERNEL);
		}
613

614
		if (!tp->args[i].name) {
615
			pr_info("Failed to allocate argument[%d] name.\n", i);
616
			ret = -ENOMEM;
617 618
			goto error;
		}
619 620 621 622 623 624 625

		if (!is_good_name(tp->args[i].name)) {
			pr_info("Invalid argument[%d] name: %s\n",
				i, tp->args[i].name);
			ret = -EINVAL;
			goto error;
		}
626

627 628
		if (traceprobe_conflict_field_name(tp->args[i].name,
							tp->args, i)) {
629
			pr_info("Argument[%d] name '%s' conflicts with "
630 631 632 633
				"another field.\n", i, argv[i]);
			ret = -EINVAL;
			goto error;
		}
634 635

		/* Parse fetch argument */
636
		ret = traceprobe_parse_probe_arg(arg, &tp->size, &tp->args[i],
637
						is_return, true);
638
		if (ret) {
639
			pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
640
			goto error;
641
		}
642 643 644 645 646 647 648 649 650 651 652 653
	}

	ret = register_trace_probe(tp);
	if (ret)
		goto error;
	return 0;

error:
	free_trace_probe(tp);
	return ret;
}

654
static int release_all_trace_probes(void)
655 656
{
	struct trace_probe *tp;
657
	int ret = 0;
658 659

	mutex_lock(&probe_lock);
660 661 662 663 664 665
	/* Ensure no probe is in use. */
	list_for_each_entry(tp, &probe_list, list)
		if (trace_probe_is_enabled(tp)) {
			ret = -EBUSY;
			goto end;
		}
666 667 668 669 670 671
	/* TODO: Use batch unregistration */
	while (!list_empty(&probe_list)) {
		tp = list_entry(probe_list.next, struct trace_probe, list);
		unregister_trace_probe(tp);
		free_trace_probe(tp);
	}
672 673

end:
674
	mutex_unlock(&probe_lock);
675 676

	return ret;
677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698
}

/* Probes listing interfaces */
static void *probes_seq_start(struct seq_file *m, loff_t *pos)
{
	mutex_lock(&probe_lock);
	return seq_list_start(&probe_list, *pos);
}

static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
{
	return seq_list_next(v, &probe_list, pos);
}

static void probes_seq_stop(struct seq_file *m, void *v)
{
	mutex_unlock(&probe_lock);
}

static int probes_seq_show(struct seq_file *m, void *v)
{
	struct trace_probe *tp = v;
699
	int i;
700

701
	seq_printf(m, "%c", trace_probe_is_return(tp) ? 'r' : 'p');
702
	seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name);
703

704 705 706
	if (!tp->symbol)
		seq_printf(m, " 0x%p", tp->rp.kp.addr);
	else if (tp->rp.kp.offset)
707 708
		seq_printf(m, " %s+%u", trace_probe_symbol(tp),
			   tp->rp.kp.offset);
709
	else
710
		seq_printf(m, " %s", trace_probe_symbol(tp));
711

712 713
	for (i = 0; i < tp->nr_args; i++)
		seq_printf(m, " %s=%s", tp->args[i].name, tp->args[i].comm);
714
	seq_printf(m, "\n");
715

716 717 718 719 720 721 722 723 724 725 726 727
	return 0;
}

static const struct seq_operations probes_seq_op = {
	.start  = probes_seq_start,
	.next   = probes_seq_next,
	.stop   = probes_seq_stop,
	.show   = probes_seq_show
};

static int probes_open(struct inode *inode, struct file *file)
{
728 729 730 731 732 733 734
	int ret;

	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
		ret = release_all_trace_probes();
		if (ret < 0)
			return ret;
	}
735 736 737 738 739 740 741

	return seq_open(file, &probes_seq_op);
}

static ssize_t probes_write(struct file *file, const char __user *buffer,
			    size_t count, loff_t *ppos)
{
742 743
	return traceprobe_probes_write(file, buffer, count, ppos,
			create_trace_probe);
744 745 746 747 748 749 750 751 752 753 754
}

static const struct file_operations kprobe_events_ops = {
	.owner          = THIS_MODULE,
	.open           = probes_open,
	.read           = seq_read,
	.llseek         = seq_lseek,
	.release        = seq_release,
	.write		= probes_write,
};

755 756 757 758 759 760
/* Probes profiling interfaces */
static int probes_profile_seq_show(struct seq_file *m, void *v)
{
	struct trace_probe *tp = v;

	seq_printf(m, "  %-44s %15lu %15lu\n", tp->call.name, tp->nhit,
761
		   tp->rp.kp.nmissed);
762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785

	return 0;
}

static const struct seq_operations profile_seq_op = {
	.start  = probes_seq_start,
	.next   = probes_seq_next,
	.stop   = probes_seq_stop,
	.show   = probes_profile_seq_show
};

static int profile_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &profile_seq_op);
}

static const struct file_operations kprobe_profile_ops = {
	.owner          = THIS_MODULE,
	.open           = profile_open,
	.read           = seq_read,
	.llseek         = seq_lseek,
	.release        = seq_release,
};

786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833
/* Sum up total data length for dynamic arraies (strings) */
static __kprobes int __get_data_size(struct trace_probe *tp,
				     struct pt_regs *regs)
{
	int i, ret = 0;
	u32 len;

	for (i = 0; i < tp->nr_args; i++)
		if (unlikely(tp->args[i].fetch_size.fn)) {
			call_fetch(&tp->args[i].fetch_size, regs, &len);
			ret += len;
		}

	return ret;
}

/* Store the value of each argument */
static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp,
				       struct pt_regs *regs,
				       u8 *data, int maxlen)
{
	int i;
	u32 end = tp->size;
	u32 *dl;	/* Data (relative) location */

	for (i = 0; i < tp->nr_args; i++) {
		if (unlikely(tp->args[i].fetch_size.fn)) {
			/*
			 * First, we set the relative location and
			 * maximum data length to *dl
			 */
			dl = (u32 *)(data + tp->args[i].offset);
			*dl = make_data_rloc(maxlen, end - tp->args[i].offset);
			/* Then try to fetch string or dynamic array data */
			call_fetch(&tp->args[i].fetch, regs, dl);
			/* Reduce maximum length */
			end += get_rloc_len(*dl);
			maxlen -= get_rloc_len(*dl);
			/* Trick here, convert data_rloc to data_loc */
			*dl = convert_rloc_to_loc(*dl,
				 ent_size + tp->args[i].offset);
		} else
			/* Just fetching data normally */
			call_fetch(&tp->args[i].fetch, regs,
				   data + tp->args[i].offset);
	}
}

834
/* Kprobe handler */
835
static __kprobes void
836 837
__kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs,
		    struct ftrace_event_file *ftrace_file)
838
{
839
	struct kprobe_trace_entry_head *entry;
840
	struct ring_buffer_event *event;
841
	struct ring_buffer *buffer;
842
	int size, dsize, pc;
843
	unsigned long irq_flags;
844
	struct ftrace_event_call *call = &tp->call;
845

846 847
	WARN_ON(call != ftrace_file->event_call);

848 849 850
	if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
		return;

851 852 853
	local_save_flags(irq_flags);
	pc = preempt_count();

854 855
	dsize = __get_data_size(tp, regs);
	size = sizeof(*entry) + tp->size + dsize;
856

857 858 859
	event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
						call->event.type,
						size, irq_flags, pc);
860
	if (!event)
861
		return;
862 863

	entry = ring_buffer_event_data(event);
864
	entry->ip = (unsigned long)tp->rp.kp.addr;
865
	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
866

867
	if (!filter_current_check_discard(buffer, call, entry, event))
868 869
		trace_buffer_unlock_commit_regs(buffer, event,
						irq_flags, pc, regs);
870 871
}

872 873 874 875 876 877 878 879 880 881 882 883
static __kprobes void
kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs)
{
	struct ftrace_event_file **file = tp->files;

	/* Note: preempt is already disabled around the kprobe handler */
	while (*file) {
		__kprobe_trace_func(tp, regs, *file);
		file++;
	}
}

884
/* Kretprobe handler */
885
static __kprobes void
886 887 888
__kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
		       struct pt_regs *regs,
		       struct ftrace_event_file *ftrace_file)
889
{
890
	struct kretprobe_trace_entry_head *entry;
891
	struct ring_buffer_event *event;
892
	struct ring_buffer *buffer;
893
	int size, pc, dsize;
894
	unsigned long irq_flags;
895
	struct ftrace_event_call *call = &tp->call;
896

897 898
	WARN_ON(call != ftrace_file->event_call);

899 900 901
	if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
		return;

902 903 904
	local_save_flags(irq_flags);
	pc = preempt_count();

905 906
	dsize = __get_data_size(tp, regs);
	size = sizeof(*entry) + tp->size + dsize;
907

908 909 910
	event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
						call->event.type,
						size, irq_flags, pc);
911
	if (!event)
912
		return;
913 914

	entry = ring_buffer_event_data(event);
915
	entry->func = (unsigned long)tp->rp.kp.addr;
916
	entry->ret_ip = (unsigned long)ri->ret_addr;
917
	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
918

919
	if (!filter_current_check_discard(buffer, call, entry, event))
920 921
		trace_buffer_unlock_commit_regs(buffer, event,
						irq_flags, pc, regs);
922 923
}

924 925 926 927 928 929 930 931 932 933 934 935 936
static __kprobes void
kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
		     struct pt_regs *regs)
{
	struct ftrace_event_file **file = tp->files;

	/* Note: preempt is already disabled around the kprobe handler */
	while (*file) {
		__kretprobe_trace_func(tp, ri, regs, *file);
		file++;
	}
}

937 938
/* Event entry printers */
enum print_line_t
939 940
print_kprobe_event(struct trace_iterator *iter, int flags,
		   struct trace_event *event)
941
{
942
	struct kprobe_trace_entry_head *field;
943
	struct trace_seq *s = &iter->seq;
944
	struct trace_probe *tp;
945
	u8 *data;
946 947
	int i;

948
	field = (struct kprobe_trace_entry_head *)iter->ent;
949
	tp = container_of(event, struct trace_probe, call.event);
950

951 952 953
	if (!trace_seq_printf(s, "%s: (", tp->call.name))
		goto partial;

954 955 956
	if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
		goto partial;

957
	if (!trace_seq_puts(s, ")"))
958 959
		goto partial;

960 961 962
	data = (u8 *)&field[1];
	for (i = 0; i < tp->nr_args; i++)
		if (!tp->args[i].type->print(s, tp->args[i].name,
963
					     data + tp->args[i].offset, field))
964 965 966 967 968 969 970 971 972 973 974
			goto partial;

	if (!trace_seq_puts(s, "\n"))
		goto partial;

	return TRACE_TYPE_HANDLED;
partial:
	return TRACE_TYPE_PARTIAL_LINE;
}

enum print_line_t
975 976
print_kretprobe_event(struct trace_iterator *iter, int flags,
		      struct trace_event *event)
977
{
978
	struct kretprobe_trace_entry_head *field;
979
	struct trace_seq *s = &iter->seq;
980
	struct trace_probe *tp;
981
	u8 *data;
982 983
	int i;

984
	field = (struct kretprobe_trace_entry_head *)iter->ent;
985
	tp = container_of(event, struct trace_probe, call.event);
986

987 988 989
	if (!trace_seq_printf(s, "%s: (", tp->call.name))
		goto partial;

990 991 992 993 994 995 996 997 998
	if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
		goto partial;

	if (!trace_seq_puts(s, " <- "))
		goto partial;

	if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
		goto partial;

999
	if (!trace_seq_puts(s, ")"))
1000 1001
		goto partial;

1002 1003 1004
	data = (u8 *)&field[1];
	for (i = 0; i < tp->nr_args; i++)
		if (!tp->args[i].type->print(s, tp->args[i].name,
1005
					     data + tp->args[i].offset, field))
1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
			goto partial;

	if (!trace_seq_puts(s, "\n"))
		goto partial;

	return TRACE_TYPE_HANDLED;
partial:
	return TRACE_TYPE_PARTIAL_LINE;
}


static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
{
	int ret, i;
1020
	struct kprobe_trace_entry_head field;
1021 1022
	struct trace_probe *tp = (struct trace_probe *)event_call->data;

1023
	DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1024
	/* Set argument names as fields */
1025
	for (i = 0; i < tp->nr_args; i++) {
1026
		ret = trace_define_field(event_call, tp->args[i].type->fmttype,
1027 1028 1029 1030 1031 1032 1033 1034
					 tp->args[i].name,
					 sizeof(field) + tp->args[i].offset,
					 tp->args[i].type->size,
					 tp->args[i].type->is_signed,
					 FILTER_OTHER);
		if (ret)
			return ret;
	}
1035 1036 1037 1038 1039 1040
	return 0;
}

static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
{
	int ret, i;
1041
	struct kretprobe_trace_entry_head field;
1042 1043
	struct trace_probe *tp = (struct trace_probe *)event_call->data;

1044 1045
	DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
	DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1046
	/* Set argument names as fields */
1047
	for (i = 0; i < tp->nr_args; i++) {
1048
		ret = trace_define_field(event_call, tp->args[i].type->fmttype,
1049 1050 1051 1052 1053 1054 1055 1056
					 tp->args[i].name,
					 sizeof(field) + tp->args[i].offset,
					 tp->args[i].type->size,
					 tp->args[i].type->is_signed,
					 FILTER_OTHER);
		if (ret)
			return ret;
	}
1057 1058 1059
	return 0;
}

1060 1061 1062 1063 1064 1065 1066
static int __set_print_fmt(struct trace_probe *tp, char *buf, int len)
{
	int i;
	int pos = 0;

	const char *fmt, *arg;

1067
	if (!trace_probe_is_return(tp)) {
1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
		fmt = "(%lx)";
		arg = "REC->" FIELD_STRING_IP;
	} else {
		fmt = "(%lx <- %lx)";
		arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
	}

	/* When len=0, we just calculate the needed length */
#define LEN_OR_ZERO (len ? len - pos : 0)

	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);

	for (i = 0; i < tp->nr_args; i++) {
1081 1082
		pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s",
				tp->args[i].name, tp->args[i].type->fmt);
1083 1084 1085 1086 1087
	}

	pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);

	for (i = 0; i < tp->nr_args; i++) {
1088 1089 1090 1091 1092 1093 1094
		if (strcmp(tp->args[i].type->name, "string") == 0)
			pos += snprintf(buf + pos, LEN_OR_ZERO,
					", __get_str(%s)",
					tp->args[i].name);
		else
			pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s",
					tp->args[i].name);
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120
	}

#undef LEN_OR_ZERO

	/* return the length of print_fmt */
	return pos;
}

static int set_print_fmt(struct trace_probe *tp)
{
	int len;
	char *print_fmt;

	/* First: called with 0 length to calculate the needed length */
	len = __set_print_fmt(tp, NULL, 0);
	print_fmt = kmalloc(len + 1, GFP_KERNEL);
	if (!print_fmt)
		return -ENOMEM;

	/* Second: actually write the @print_fmt */
	__set_print_fmt(tp, print_fmt, len + 1);
	tp->call.print_fmt = print_fmt;

	return 0;
}

1121
#ifdef CONFIG_PERF_EVENTS
1122 1123

/* Kprobe profile handler */
1124 1125
static __kprobes void
kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs)
1126 1127
{
	struct ftrace_event_call *call = &tp->call;
1128
	struct kprobe_trace_entry_head *entry;
1129
	struct hlist_head *head;
1130
	int size, __size, dsize;
1131
	int rctx;
1132

1133 1134
	dsize = __get_data_size(tp, regs);
	__size = sizeof(*entry) + tp->size + dsize;
1135 1136
	size = ALIGN(__size + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);
1137
	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1138
		     "profile buffer not large enough"))
1139
		return;
1140

S
Steven Rostedt 已提交
1141
	entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1142
	if (!entry)
1143
		return;
1144

1145
	entry->ip = (unsigned long)tp->rp.kp.addr;
1146 1147
	memset(&entry[1], 0, dsize);
	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
1148

1149
	head = this_cpu_ptr(call->perf_events);
1150 1151
	perf_trace_buf_submit(entry, size, rctx,
					entry->ip, 1, regs, head, NULL);
1152 1153 1154
}

/* Kretprobe profile handler */
1155 1156 1157
static __kprobes void
kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
		    struct pt_regs *regs)
1158 1159
{
	struct ftrace_event_call *call = &tp->call;
1160
	struct kretprobe_trace_entry_head *entry;
1161
	struct hlist_head *head;
1162
	int size, __size, dsize;
1163
	int rctx;
1164

1165 1166
	dsize = __get_data_size(tp, regs);
	__size = sizeof(*entry) + tp->size + dsize;
1167 1168
	size = ALIGN(__size + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);
1169
	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1170
		     "profile buffer not large enough"))
1171
		return;
1172

S
Steven Rostedt 已提交
1173
	entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1174
	if (!entry)
1175
		return;
1176

1177 1178
	entry->func = (unsigned long)tp->rp.kp.addr;
	entry->ret_ip = (unsigned long)ri->ret_addr;
1179
	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
1180

1181
	head = this_cpu_ptr(call->perf_events);
1182 1183
	perf_trace_buf_submit(entry, size, rctx,
					entry->ret_ip, 1, regs, head, NULL);
1184
}
1185
#endif	/* CONFIG_PERF_EVENTS */
1186

1187
static __kprobes
1188 1189
int kprobe_register(struct ftrace_event_call *event,
		    enum trace_reg type, void *data)
1190
{
1191
	struct trace_probe *tp = (struct trace_probe *)event->data;
1192
	struct ftrace_event_file *file = data;
1193

1194 1195
	switch (type) {
	case TRACE_REG_REGISTER:
1196
		return enable_trace_probe(tp, file);
1197
	case TRACE_REG_UNREGISTER:
1198
		return disable_trace_probe(tp, file);
1199 1200 1201

#ifdef CONFIG_PERF_EVENTS
	case TRACE_REG_PERF_REGISTER:
1202
		return enable_trace_probe(tp, NULL);
1203
	case TRACE_REG_PERF_UNREGISTER:
1204
		return disable_trace_probe(tp, NULL);
1205 1206
	case TRACE_REG_PERF_OPEN:
	case TRACE_REG_PERF_CLOSE:
1207 1208
	case TRACE_REG_PERF_ADD:
	case TRACE_REG_PERF_DEL:
1209
		return 0;
1210 1211 1212 1213
#endif
	}
	return 0;
}
1214 1215 1216 1217 1218

static __kprobes
int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
{
	struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1219

1220 1221
	tp->nhit++;

1222
	if (tp->flags & TP_FLAG_TRACE)
1223
		kprobe_trace_func(tp, regs);
1224
#ifdef CONFIG_PERF_EVENTS
1225
	if (tp->flags & TP_FLAG_PROFILE)
1226
		kprobe_perf_func(tp, regs);
1227
#endif
1228 1229 1230 1231 1232 1233 1234 1235
	return 0;	/* We don't tweek kernel, so just return 0 */
}

static __kprobes
int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
{
	struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);

1236 1237
	tp->nhit++;

1238
	if (tp->flags & TP_FLAG_TRACE)
1239
		kretprobe_trace_func(tp, ri, regs);
1240
#ifdef CONFIG_PERF_EVENTS
1241
	if (tp->flags & TP_FLAG_PROFILE)
1242
		kretprobe_perf_func(tp, ri, regs);
1243
#endif
1244 1245
	return 0;	/* We don't tweek kernel, so just return 0 */
}
1246

1247 1248 1249 1250 1251 1252 1253 1254
static struct trace_event_functions kretprobe_funcs = {
	.trace		= print_kretprobe_event
};

static struct trace_event_functions kprobe_funcs = {
	.trace		= print_kprobe_event
};

1255 1256 1257 1258 1259 1260
static int register_probe_event(struct trace_probe *tp)
{
	struct ftrace_event_call *call = &tp->call;
	int ret;

	/* Initialize ftrace_event_call */
1261
	INIT_LIST_HEAD(&call->class->fields);
1262
	if (trace_probe_is_return(tp)) {
1263
		call->event.funcs = &kretprobe_funcs;
1264
		call->class->define_fields = kretprobe_event_define_fields;
1265
	} else {
1266
		call->event.funcs = &kprobe_funcs;
1267
		call->class->define_fields = kprobe_event_define_fields;
1268
	}
1269 1270
	if (set_print_fmt(tp) < 0)
		return -ENOMEM;
1271 1272
	ret = register_ftrace_event(&call->event);
	if (!ret) {
1273
		kfree(call->print_fmt);
1274
		return -ENODEV;
1275
	}
1276
	call->flags = 0;
1277
	call->class->reg = kprobe_register;
1278 1279
	call->data = tp;
	ret = trace_add_event_call(call);
1280
	if (ret) {
1281
		pr_info("Failed to register kprobe event: %s\n", call->name);
1282
		kfree(call->print_fmt);
1283
		unregister_ftrace_event(&call->event);
1284
	}
1285 1286 1287 1288 1289
	return ret;
}

static void unregister_probe_event(struct trace_probe *tp)
{
1290
	/* tp->event is unregistered in trace_remove_event_call() */
1291
	trace_remove_event_call(&tp->call);
1292
	kfree(tp->call.print_fmt);
1293 1294
}

L
Lucas De Marchi 已提交
1295
/* Make a debugfs interface for controlling probe points */
1296 1297 1298 1299 1300
static __init int init_kprobe_trace(void)
{
	struct dentry *d_tracer;
	struct dentry *entry;

1301 1302 1303
	if (register_module_notifier(&trace_probe_module_nb))
		return -EINVAL;

1304 1305 1306 1307 1308 1309 1310
	d_tracer = tracing_init_dentry();
	if (!d_tracer)
		return 0;

	entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
				    NULL, &kprobe_events_ops);

1311
	/* Event list interface */
1312 1313 1314
	if (!entry)
		pr_warning("Could not create debugfs "
			   "'kprobe_events' entry\n");
1315 1316 1317 1318 1319 1320 1321 1322

	/* Profile interface */
	entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
				    NULL, &kprobe_profile_ops);

	if (!entry)
		pr_warning("Could not create debugfs "
			   "'kprobe_profile' entry\n");
1323 1324 1325 1326 1327 1328 1329
	return 0;
}
fs_initcall(init_kprobe_trace);


#ifdef CONFIG_FTRACE_STARTUP_TEST

1330 1331 1332 1333 1334 1335
/*
 * The "__used" keeps gcc from removing the function symbol
 * from the kallsyms table.
 */
static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
					       int a4, int a5, int a6)
1336 1337 1338 1339
{
	return a1 + a2 + a3 + a4 + a5 + a6;
}

1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351
static struct ftrace_event_file *
find_trace_probe_file(struct trace_probe *tp, struct trace_array *tr)
{
	struct ftrace_event_file *file;

	list_for_each_entry(file, &tr->events, list)
		if (file->event_call == &tp->call)
			return file;

	return NULL;
}

1352 1353
static __init int kprobe_trace_self_tests_init(void)
{
1354
	int ret, warn = 0;
1355
	int (*target)(int, int, int, int, int, int);
1356
	struct trace_probe *tp;
1357
	struct ftrace_event_file *file;
1358 1359 1360 1361 1362

	target = kprobe_trace_selftest_target;

	pr_info("Testing kprobe tracing: ");

1363 1364 1365
	ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
				  "$stack $stack0 +0($stack)",
				  create_trace_probe);
1366
	if (WARN_ON_ONCE(ret)) {
1367
		pr_warn("error on probing function entry.\n");
1368 1369 1370
		warn++;
	} else {
		/* Enable trace point */
1371
		tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
1372
		if (WARN_ON_ONCE(tp == NULL)) {
1373
			pr_warn("error on getting new probe.\n");
1374
			warn++;
1375 1376 1377 1378 1379 1380 1381 1382
		} else {
			file = find_trace_probe_file(tp, top_trace_array());
			if (WARN_ON_ONCE(file == NULL)) {
				pr_warn("error on getting probe file.\n");
				warn++;
			} else
				enable_trace_probe(tp, file);
		}
1383
	}
1384

1385 1386
	ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
				  "$retval", create_trace_probe);
1387
	if (WARN_ON_ONCE(ret)) {
1388
		pr_warn("error on probing function return.\n");
1389 1390 1391
		warn++;
	} else {
		/* Enable trace point */
1392
		tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
1393
		if (WARN_ON_ONCE(tp == NULL)) {
1394
			pr_warn("error on getting 2nd new probe.\n");
1395
			warn++;
1396 1397 1398 1399 1400 1401 1402 1403
		} else {
			file = find_trace_probe_file(tp, top_trace_array());
			if (WARN_ON_ONCE(file == NULL)) {
				pr_warn("error on getting probe file.\n");
				warn++;
			} else
				enable_trace_probe(tp, file);
		}
1404 1405 1406 1407
	}

	if (warn)
		goto end;
1408 1409 1410

	ret = target(1, 2, 3, 4, 5, 6);

1411 1412 1413
	/* Disable trace points before removing it */
	tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
	if (WARN_ON_ONCE(tp == NULL)) {
1414
		pr_warn("error on getting test probe.\n");
1415
		warn++;
1416 1417 1418 1419 1420 1421 1422 1423
	} else {
		file = find_trace_probe_file(tp, top_trace_array());
		if (WARN_ON_ONCE(file == NULL)) {
			pr_warn("error on getting probe file.\n");
			warn++;
		} else
			disable_trace_probe(tp, file);
	}
1424 1425 1426

	tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
	if (WARN_ON_ONCE(tp == NULL)) {
1427
		pr_warn("error on getting 2nd test probe.\n");
1428
		warn++;
1429 1430 1431 1432 1433 1434 1435 1436
	} else {
		file = find_trace_probe_file(tp, top_trace_array());
		if (WARN_ON_ONCE(file == NULL)) {
			pr_warn("error on getting probe file.\n");
			warn++;
		} else
			disable_trace_probe(tp, file);
	}
1437

1438
	ret = traceprobe_command("-:testprobe", create_trace_probe);
1439
	if (WARN_ON_ONCE(ret)) {
1440
		pr_warn("error on deleting a probe.\n");
1441 1442 1443
		warn++;
	}

1444
	ret = traceprobe_command("-:testprobe2", create_trace_probe);
1445
	if (WARN_ON_ONCE(ret)) {
1446
		pr_warn("error on deleting a probe.\n");
1447 1448
		warn++;
	}
1449

1450
end:
1451
	release_all_trace_probes();
1452 1453 1454 1455
	if (warn)
		pr_cont("NG: Some tests are failed. Please check them.\n");
	else
		pr_cont("OK\n");
1456 1457 1458 1459 1460 1461
	return 0;
}

late_initcall(kprobe_trace_self_tests_init);

#endif