trace_kprobe.c 34.6 KB
Newer Older
1
/*
2
 * Kprobes-based tracing events
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *
 * Created by Masami Hiramatsu <mhiramat@redhat.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */

#include <linux/module.h>
#include <linux/uaccess.h>

23
#include "trace_probe.h"
24

25
#define KPROBE_EVENT_SYSTEM "kprobes"
26

27
/**
28
 * Kprobe event core functions
29 30 31
 */
struct trace_probe {
	struct list_head	list;
32
	struct kretprobe	rp;	/* Use rp.kp for kprobe use */
33
	unsigned long 		nhit;
34
	unsigned int		flags;	/* For TP_FLAG_* */
35
	const char		*symbol;	/* symbol name */
36
	struct ftrace_event_class	class;
37
	struct ftrace_event_call	call;
38
	struct list_head	files;
39
	ssize_t			size;		/* trace entry size */
40
	unsigned int		nr_args;
41
	struct probe_arg	args[];
42 43
};

44 45 46 47 48
struct event_file_link {
	struct ftrace_event_file	*file;
	struct list_head		list;
};

49 50
#define SIZEOF_TRACE_PROBE(n)			\
	(offsetof(struct trace_probe, args) +	\
51
	(sizeof(struct probe_arg) * (n)))
52

53

54
static __kprobes bool trace_probe_is_return(struct trace_probe *tp)
55
{
56
	return tp->rp.handler != NULL;
57 58
}

59
static __kprobes const char *trace_probe_symbol(struct trace_probe *tp)
60 61 62 63
{
	return tp->symbol ? tp->symbol : "unknown";
}

64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
static __kprobes unsigned long trace_probe_offset(struct trace_probe *tp)
{
	return tp->rp.kp.offset;
}

static __kprobes bool trace_probe_is_enabled(struct trace_probe *tp)
{
	return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE));
}

static __kprobes bool trace_probe_is_registered(struct trace_probe *tp)
{
	return !!(tp->flags & TP_FLAG_REGISTERED);
}

static __kprobes bool trace_probe_has_gone(struct trace_probe *tp)
{
	return !!(kprobe_gone(&tp->rp.kp));
}

static __kprobes bool trace_probe_within_module(struct trace_probe *tp,
						struct module *mod)
{
	int len = strlen(mod->name);
	const char *name = trace_probe_symbol(tp);
	return strncmp(mod->name, name, len) == 0 && name[len] == ':';
}

static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
{
	return !!strchr(trace_probe_symbol(tp), ':');
}

97
static int register_probe_event(struct trace_probe *tp);
98
static int unregister_probe_event(struct trace_probe *tp);
99 100 101 102

static DEFINE_MUTEX(probe_lock);
static LIST_HEAD(probe_list);

103 104 105 106
static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
static int kretprobe_dispatcher(struct kretprobe_instance *ri,
				struct pt_regs *regs);

107 108 109
/*
 * Allocate new trace_probe and initialize it (including kprobes).
 */
110 111
static struct trace_probe *alloc_trace_probe(const char *group,
					     const char *event,
112 113 114
					     void *addr,
					     const char *symbol,
					     unsigned long offs,
115
					     int nargs, bool is_return)
116 117
{
	struct trace_probe *tp;
118
	int ret = -ENOMEM;
119

120
	tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL);
121
	if (!tp)
122
		return ERR_PTR(ret);
123 124 125 126 127

	if (symbol) {
		tp->symbol = kstrdup(symbol, GFP_KERNEL);
		if (!tp->symbol)
			goto error;
128 129 130 131 132 133
		tp->rp.kp.symbol_name = tp->symbol;
		tp->rp.kp.offset = offs;
	} else
		tp->rp.kp.addr = addr;

	if (is_return)
134
		tp->rp.handler = kretprobe_dispatcher;
135
	else
136
		tp->rp.kp.pre_handler = kprobe_dispatcher;
137

138
	if (!event || !is_good_name(event)) {
139
		ret = -EINVAL;
140
		goto error;
141 142
	}

143
	tp->call.class = &tp->class;
144 145 146
	tp->call.name = kstrdup(event, GFP_KERNEL);
	if (!tp->call.name)
		goto error;
147

148
	if (!group || !is_good_name(group)) {
149
		ret = -EINVAL;
150
		goto error;
151 152
	}

153 154
	tp->class.system = kstrdup(group, GFP_KERNEL);
	if (!tp->class.system)
155 156
		goto error;

157
	INIT_LIST_HEAD(&tp->list);
158
	INIT_LIST_HEAD(&tp->files);
159 160
	return tp;
error:
161
	kfree(tp->call.name);
162 163
	kfree(tp->symbol);
	kfree(tp);
164
	return ERR_PTR(ret);
165 166 167 168 169 170 171
}

static void free_trace_probe(struct trace_probe *tp)
{
	int i;

	for (i = 0; i < tp->nr_args; i++)
172
		traceprobe_free_probe_arg(&tp->args[i]);
173

174
	kfree(tp->call.class->system);
175 176 177 178 179
	kfree(tp->call.name);
	kfree(tp->symbol);
	kfree(tp);
}

180
static struct trace_probe *find_trace_probe(const char *event,
181
					    const char *group)
182 183 184 185
{
	struct trace_probe *tp;

	list_for_each_entry(tp, &probe_list, list)
186
		if (strcmp(tp->call.name, event) == 0 &&
187
		    strcmp(tp->call.class->system, group) == 0)
188 189 190 191
			return tp;
	return NULL;
}

192 193 194 195 196 197
/*
 * Enable trace_probe
 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
 */
static int
enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
198 199 200
{
	int ret = 0;

201
	if (file) {
202 203 204 205
		struct event_file_link *link;

		link = kmalloc(sizeof(*link), GFP_KERNEL);
		if (!link) {
206
			ret = -ENOMEM;
207
			goto out;
208 209
		}

210 211
		link->file = file;
		list_add_tail_rcu(&link->list, &tp->files);
212

213
		tp->flags |= TP_FLAG_TRACE;
214 215 216
	} else
		tp->flags |= TP_FLAG_PROFILE;

217
	if (trace_probe_is_registered(tp) && !trace_probe_has_gone(tp)) {
218 219 220 221 222
		if (trace_probe_is_return(tp))
			ret = enable_kretprobe(&tp->rp);
		else
			ret = enable_kprobe(&tp->rp.kp);
	}
223
 out:
224 225 226
	return ret;
}

227 228
static struct event_file_link *
find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file)
229
{
230
	struct event_file_link *link;
231

232 233 234
	list_for_each_entry(link, &tp->files, list)
		if (link->file == file)
			return link;
235

236
	return NULL;
237 238 239 240 241 242 243 244
}

/*
 * Disable trace_probe
 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
 */
static int
disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
245
{
246 247
	struct event_file_link *link = NULL;
	int wait = 0;
248 249 250
	int ret = 0;

	if (file) {
251 252
		link = find_event_file_link(tp, file);
		if (!link) {
253
			ret = -EINVAL;
254
			goto out;
255 256
		}

257
		list_del_rcu(&link->list);
258
		wait = 1;
259 260
		if (!list_empty(&tp->files))
			goto out;
261

262
		tp->flags &= ~TP_FLAG_TRACE;
263 264 265
	} else
		tp->flags &= ~TP_FLAG_PROFILE;

266
	if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) {
267 268 269 270
		if (trace_probe_is_return(tp))
			disable_kretprobe(&tp->rp);
		else
			disable_kprobe(&tp->rp.kp);
271
		wait = 1;
272
	}
273
 out:
274 275 276 277 278 279 280 281 282 283 284 285 286
	if (wait) {
		/*
		 * Synchronize with kprobe_trace_func/kretprobe_trace_func
		 * to ensure disabled (all running handlers are finished).
		 * This is not only for kfree(), but also the caller,
		 * trace_remove_event_call() supposes it for releasing
		 * event_call related objects, which will be accessed in
		 * the kprobe_trace_func/kretprobe_trace_func.
		 */
		synchronize_sched();
		kfree(link);	/* Ignored if link == NULL */
	}

287
	return ret;
288 289
}

290 291
/* Internal register function - just handle k*probes and flags */
static int __register_trace_probe(struct trace_probe *tp)
292
{
293
	int i, ret;
294 295 296 297

	if (trace_probe_is_registered(tp))
		return -EINVAL;

298
	for (i = 0; i < tp->nr_args; i++)
299
		traceprobe_update_arg(&tp->args[i]);
300

301 302 303 304 305 306
	/* Set/clear disabled flag according to tp->flag */
	if (trace_probe_is_enabled(tp))
		tp->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
	else
		tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;

307
	if (trace_probe_is_return(tp))
308
		ret = register_kretprobe(&tp->rp);
309
	else
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
		ret = register_kprobe(&tp->rp.kp);

	if (ret == 0)
		tp->flags |= TP_FLAG_REGISTERED;
	else {
		pr_warning("Could not insert probe at %s+%lu: %d\n",
			   trace_probe_symbol(tp), trace_probe_offset(tp), ret);
		if (ret == -ENOENT && trace_probe_is_on_module(tp)) {
			pr_warning("This probe might be able to register after"
				   "target module is loaded. Continue.\n");
			ret = 0;
		} else if (ret == -EILSEQ) {
			pr_warning("Probing address(0x%p) is not an "
				   "instruction boundary.\n",
				   tp->rp.kp.addr);
			ret = -EINVAL;
		}
	}

	return ret;
}

/* Internal unregister function - just handle k*probes and flags */
static void __unregister_trace_probe(struct trace_probe *tp)
{
	if (trace_probe_is_registered(tp)) {
		if (trace_probe_is_return(tp))
			unregister_kretprobe(&tp->rp);
		else
			unregister_kprobe(&tp->rp.kp);
		tp->flags &= ~TP_FLAG_REGISTERED;
		/* Cleanup kprobe for reuse */
		if (tp->rp.kp.symbol_name)
			tp->rp.kp.addr = NULL;
	}
}

/* Unregister a trace_probe and probe_event: call with locking probe_lock */
348
static int unregister_trace_probe(struct trace_probe *tp)
349
{
350 351 352 353
	/* Enabled event can not be unregistered */
	if (trace_probe_is_enabled(tp))
		return -EBUSY;

354 355 356 357
	/* Will fail if probe is being used by ftrace or perf */
	if (unregister_probe_event(tp))
		return -EBUSY;

358
	__unregister_trace_probe(tp);
359
	list_del(&tp->list);
360 361

	return 0;
362 363 364 365 366 367 368 369 370 371
}

/* Register a trace_probe and probe_event */
static int register_trace_probe(struct trace_probe *tp)
{
	struct trace_probe *old_tp;
	int ret;

	mutex_lock(&probe_lock);

372
	/* Delete old (same name) event if exist */
373
	old_tp = find_trace_probe(tp->call.name, tp->call.class->system);
374
	if (old_tp) {
375 376 377
		ret = unregister_trace_probe(old_tp);
		if (ret < 0)
			goto end;
378 379
		free_trace_probe(old_tp);
	}
380 381

	/* Register new event */
382 383
	ret = register_probe_event(tp);
	if (ret) {
P
Paul Bolle 已提交
384
		pr_warning("Failed to register probe event(%d)\n", ret);
385 386 387
		goto end;
	}

388 389 390
	/* Register k*probe */
	ret = __register_trace_probe(tp);
	if (ret < 0)
391
		unregister_probe_event(tp);
392
	else
393
		list_add_tail(&tp->list, &probe_list);
394

395 396 397 398 399
end:
	mutex_unlock(&probe_lock);
	return ret;
}

400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
/* Module notifier call back, checking event on the module */
static int trace_probe_module_callback(struct notifier_block *nb,
				       unsigned long val, void *data)
{
	struct module *mod = data;
	struct trace_probe *tp;
	int ret;

	if (val != MODULE_STATE_COMING)
		return NOTIFY_DONE;

	/* Update probes on coming module */
	mutex_lock(&probe_lock);
	list_for_each_entry(tp, &probe_list, list) {
		if (trace_probe_within_module(tp, mod)) {
415
			/* Don't need to check busy - this should have gone. */
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
			__unregister_trace_probe(tp);
			ret = __register_trace_probe(tp);
			if (ret)
				pr_warning("Failed to re-register probe %s on"
					   "%s: %d\n",
					   tp->call.name, mod->name, ret);
		}
	}
	mutex_unlock(&probe_lock);

	return NOTIFY_DONE;
}

static struct notifier_block trace_probe_module_nb = {
	.notifier_call = trace_probe_module_callback,
	.priority = 1	/* Invoked after kprobe module callback */
};

434 435 436 437
static int create_trace_probe(int argc, char **argv)
{
	/*
	 * Argument syntax:
438 439
	 *  - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
	 *  - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
440
	 * Fetch args:
441 442 443
	 *  $retval	: fetch return value
	 *  $stack	: fetch stack address
	 *  $stackN	: fetch Nth of stack (N:0-)
444 445 446
	 *  @ADDR	: fetch memory at ADDR (ADDR should be in kernel)
	 *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
	 *  %REG	: fetch register REG
447
	 * Dereferencing memory fetch:
448
	 *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
449 450
	 * Alias name of args:
	 *  NAME=FETCHARG : set NAME as alias of FETCHARG.
451 452
	 * Type of args:
	 *  FETCHARG:TYPE : use TYPE instead of unsigned long.
453 454 455
	 */
	struct trace_probe *tp;
	int i, ret = 0;
456
	bool is_return = false, is_delete = false;
457
	char *symbol = NULL, *event = NULL, *group = NULL;
458
	char *arg;
459
	unsigned long offset = 0;
460
	void *addr = NULL;
461
	char buf[MAX_EVENT_NAME_LEN];
462

463
	/* argc must be >= 1 */
464
	if (argv[0][0] == 'p')
465
		is_return = false;
466
	else if (argv[0][0] == 'r')
467
		is_return = true;
468
	else if (argv[0][0] == '-')
469
		is_delete = true;
470
	else {
471 472
		pr_info("Probe definition must be started with 'p', 'r' or"
			" '-'.\n");
473
		return -EINVAL;
474
	}
475 476 477

	if (argv[0][1] == ':') {
		event = &argv[0][2];
478 479 480 481 482
		if (strchr(event, '/')) {
			group = event;
			event = strchr(group, '/') + 1;
			event[-1] = '\0';
			if (strlen(group) == 0) {
483
				pr_info("Group name is not specified\n");
484 485 486
				return -EINVAL;
			}
		}
487
		if (strlen(event) == 0) {
488
			pr_info("Event name is not specified\n");
489 490 491
			return -EINVAL;
		}
	}
492 493
	if (!group)
		group = KPROBE_EVENT_SYSTEM;
494

495 496 497 498 499
	if (is_delete) {
		if (!event) {
			pr_info("Delete command needs an event name.\n");
			return -EINVAL;
		}
500
		mutex_lock(&probe_lock);
501
		tp = find_trace_probe(event, group);
502
		if (!tp) {
503
			mutex_unlock(&probe_lock);
504 505 506 507
			pr_info("Event %s/%s doesn't exist.\n", group, event);
			return -ENOENT;
		}
		/* delete an event */
508 509 510
		ret = unregister_trace_probe(tp);
		if (ret == 0)
			free_trace_probe(tp);
511
		mutex_unlock(&probe_lock);
512
		return ret;
513 514 515 516 517 518
	}

	if (argc < 2) {
		pr_info("Probe point is not specified.\n");
		return -EINVAL;
	}
519
	if (isdigit(argv[1][0])) {
520 521
		if (is_return) {
			pr_info("Return probe point must be a symbol.\n");
522
			return -EINVAL;
523
		}
524
		/* an address specified */
525
		ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
526 527
		if (ret) {
			pr_info("Failed to parse address.\n");
528
			return ret;
529
		}
530 531 532 533
	} else {
		/* a symbol specified */
		symbol = argv[1];
		/* TODO: support .init module functions */
534
		ret = traceprobe_split_symbol_offset(symbol, &offset);
535 536
		if (ret) {
			pr_info("Failed to parse symbol.\n");
537
			return ret;
538 539 540
		}
		if (offset && is_return) {
			pr_info("Return probe must be used without offset.\n");
541
			return -EINVAL;
542
		}
543
	}
544
	argc -= 2; argv += 2;
545 546

	/* setup a probe */
547 548 549
	if (!event) {
		/* Make a new event name */
		if (symbol)
550
			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
551 552
				 is_return ? 'r' : 'p', symbol, offset);
		else
553
			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
554
				 is_return ? 'r' : 'p', addr);
555 556
		event = buf;
	}
557 558
	tp = alloc_trace_probe(group, event, addr, symbol, offset, argc,
			       is_return);
559 560 561
	if (IS_ERR(tp)) {
		pr_info("Failed to allocate trace_probe.(%d)\n",
			(int)PTR_ERR(tp));
562
		return PTR_ERR(tp);
563
	}
564 565

	/* parse arguments */
566 567
	ret = 0;
	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
568 569 570
		/* Increment count for freeing args in error case */
		tp->nr_args++;

571 572
		/* Parse argument name */
		arg = strchr(argv[i], '=');
573
		if (arg) {
574
			*arg++ = '\0';
575 576
			tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
		} else {
577
			arg = argv[i];
578 579 580 581
			/* If argument name is omitted, set "argN" */
			snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
			tp->args[i].name = kstrdup(buf, GFP_KERNEL);
		}
582

583
		if (!tp->args[i].name) {
584
			pr_info("Failed to allocate argument[%d] name.\n", i);
585
			ret = -ENOMEM;
586 587
			goto error;
		}
588 589 590 591 592 593 594

		if (!is_good_name(tp->args[i].name)) {
			pr_info("Invalid argument[%d] name: %s\n",
				i, tp->args[i].name);
			ret = -EINVAL;
			goto error;
		}
595

596 597
		if (traceprobe_conflict_field_name(tp->args[i].name,
							tp->args, i)) {
598
			pr_info("Argument[%d] name '%s' conflicts with "
599 600 601 602
				"another field.\n", i, argv[i]);
			ret = -EINVAL;
			goto error;
		}
603 604

		/* Parse fetch argument */
605
		ret = traceprobe_parse_probe_arg(arg, &tp->size, &tp->args[i],
606
						is_return, true);
607
		if (ret) {
608
			pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
609
			goto error;
610
		}
611 612 613 614 615 616 617 618 619 620 621 622
	}

	ret = register_trace_probe(tp);
	if (ret)
		goto error;
	return 0;

error:
	free_trace_probe(tp);
	return ret;
}

623
static int release_all_trace_probes(void)
624 625
{
	struct trace_probe *tp;
626
	int ret = 0;
627 628

	mutex_lock(&probe_lock);
629 630 631 632 633 634
	/* Ensure no probe is in use. */
	list_for_each_entry(tp, &probe_list, list)
		if (trace_probe_is_enabled(tp)) {
			ret = -EBUSY;
			goto end;
		}
635 636 637
	/* TODO: Use batch unregistration */
	while (!list_empty(&probe_list)) {
		tp = list_entry(probe_list.next, struct trace_probe, list);
638 639 640
		ret = unregister_trace_probe(tp);
		if (ret)
			goto end;
641 642
		free_trace_probe(tp);
	}
643 644

end:
645
	mutex_unlock(&probe_lock);
646 647

	return ret;
648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669
}

/* Probes listing interfaces */
static void *probes_seq_start(struct seq_file *m, loff_t *pos)
{
	mutex_lock(&probe_lock);
	return seq_list_start(&probe_list, *pos);
}

static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
{
	return seq_list_next(v, &probe_list, pos);
}

static void probes_seq_stop(struct seq_file *m, void *v)
{
	mutex_unlock(&probe_lock);
}

static int probes_seq_show(struct seq_file *m, void *v)
{
	struct trace_probe *tp = v;
670
	int i;
671

672
	seq_printf(m, "%c", trace_probe_is_return(tp) ? 'r' : 'p');
673
	seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name);
674

675 676 677
	if (!tp->symbol)
		seq_printf(m, " 0x%p", tp->rp.kp.addr);
	else if (tp->rp.kp.offset)
678 679
		seq_printf(m, " %s+%u", trace_probe_symbol(tp),
			   tp->rp.kp.offset);
680
	else
681
		seq_printf(m, " %s", trace_probe_symbol(tp));
682

683 684
	for (i = 0; i < tp->nr_args; i++)
		seq_printf(m, " %s=%s", tp->args[i].name, tp->args[i].comm);
685
	seq_printf(m, "\n");
686

687 688 689 690 691 692 693 694 695 696 697 698
	return 0;
}

static const struct seq_operations probes_seq_op = {
	.start  = probes_seq_start,
	.next   = probes_seq_next,
	.stop   = probes_seq_stop,
	.show   = probes_seq_show
};

static int probes_open(struct inode *inode, struct file *file)
{
699 700 701 702 703 704 705
	int ret;

	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
		ret = release_all_trace_probes();
		if (ret < 0)
			return ret;
	}
706 707 708 709 710 711 712

	return seq_open(file, &probes_seq_op);
}

static ssize_t probes_write(struct file *file, const char __user *buffer,
			    size_t count, loff_t *ppos)
{
713 714
	return traceprobe_probes_write(file, buffer, count, ppos,
			create_trace_probe);
715 716 717 718 719 720 721 722 723 724 725
}

static const struct file_operations kprobe_events_ops = {
	.owner          = THIS_MODULE,
	.open           = probes_open,
	.read           = seq_read,
	.llseek         = seq_lseek,
	.release        = seq_release,
	.write		= probes_write,
};

726 727 728 729 730 731
/* Probes profiling interfaces */
static int probes_profile_seq_show(struct seq_file *m, void *v)
{
	struct trace_probe *tp = v;

	seq_printf(m, "  %-44s %15lu %15lu\n", tp->call.name, tp->nhit,
732
		   tp->rp.kp.nmissed);
733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756

	return 0;
}

static const struct seq_operations profile_seq_op = {
	.start  = probes_seq_start,
	.next   = probes_seq_next,
	.stop   = probes_seq_stop,
	.show   = probes_profile_seq_show
};

static int profile_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &profile_seq_op);
}

static const struct file_operations kprobe_profile_ops = {
	.owner          = THIS_MODULE,
	.open           = profile_open,
	.read           = seq_read,
	.llseek         = seq_lseek,
	.release        = seq_release,
};

757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
/* Sum up total data length for dynamic arraies (strings) */
static __kprobes int __get_data_size(struct trace_probe *tp,
				     struct pt_regs *regs)
{
	int i, ret = 0;
	u32 len;

	for (i = 0; i < tp->nr_args; i++)
		if (unlikely(tp->args[i].fetch_size.fn)) {
			call_fetch(&tp->args[i].fetch_size, regs, &len);
			ret += len;
		}

	return ret;
}

/* Store the value of each argument */
static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp,
				       struct pt_regs *regs,
				       u8 *data, int maxlen)
{
	int i;
	u32 end = tp->size;
	u32 *dl;	/* Data (relative) location */

	for (i = 0; i < tp->nr_args; i++) {
		if (unlikely(tp->args[i].fetch_size.fn)) {
			/*
			 * First, we set the relative location and
			 * maximum data length to *dl
			 */
			dl = (u32 *)(data + tp->args[i].offset);
			*dl = make_data_rloc(maxlen, end - tp->args[i].offset);
			/* Then try to fetch string or dynamic array data */
			call_fetch(&tp->args[i].fetch, regs, dl);
			/* Reduce maximum length */
			end += get_rloc_len(*dl);
			maxlen -= get_rloc_len(*dl);
			/* Trick here, convert data_rloc to data_loc */
			*dl = convert_rloc_to_loc(*dl,
				 ent_size + tp->args[i].offset);
		} else
			/* Just fetching data normally */
			call_fetch(&tp->args[i].fetch, regs,
				   data + tp->args[i].offset);
	}
}

805
/* Kprobe handler */
806
static __kprobes void
807 808
__kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs,
		    struct ftrace_event_file *ftrace_file)
809
{
810
	struct kprobe_trace_entry_head *entry;
811
	struct ring_buffer_event *event;
812
	struct ring_buffer *buffer;
813
	int size, dsize, pc;
814
	unsigned long irq_flags;
815
	struct ftrace_event_call *call = &tp->call;
816

817 818
	WARN_ON(call != ftrace_file->event_call);

819 820 821
	if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
		return;

822 823 824
	local_save_flags(irq_flags);
	pc = preempt_count();

825 826
	dsize = __get_data_size(tp, regs);
	size = sizeof(*entry) + tp->size + dsize;
827

828 829 830
	event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
						call->event.type,
						size, irq_flags, pc);
831
	if (!event)
832
		return;
833 834

	entry = ring_buffer_event_data(event);
835
	entry->ip = (unsigned long)tp->rp.kp.addr;
836
	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
837

838
	if (!filter_check_discard(ftrace_file, entry, buffer, event))
839 840
		trace_buffer_unlock_commit_regs(buffer, event,
						irq_flags, pc, regs);
841 842
}

843 844 845
static __kprobes void
kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs)
{
846
	struct event_file_link *link;
847

848 849
	list_for_each_entry_rcu(link, &tp->files, list)
		__kprobe_trace_func(tp, regs, link->file);
850 851
}

852
/* Kretprobe handler */
853
static __kprobes void
854 855 856
__kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
		       struct pt_regs *regs,
		       struct ftrace_event_file *ftrace_file)
857
{
858
	struct kretprobe_trace_entry_head *entry;
859
	struct ring_buffer_event *event;
860
	struct ring_buffer *buffer;
861
	int size, pc, dsize;
862
	unsigned long irq_flags;
863
	struct ftrace_event_call *call = &tp->call;
864

865 866
	WARN_ON(call != ftrace_file->event_call);

867 868 869
	if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
		return;

870 871 872
	local_save_flags(irq_flags);
	pc = preempt_count();

873 874
	dsize = __get_data_size(tp, regs);
	size = sizeof(*entry) + tp->size + dsize;
875

876 877 878
	event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
						call->event.type,
						size, irq_flags, pc);
879
	if (!event)
880
		return;
881 882

	entry = ring_buffer_event_data(event);
883
	entry->func = (unsigned long)tp->rp.kp.addr;
884
	entry->ret_ip = (unsigned long)ri->ret_addr;
885
	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
886

887
	if (!filter_check_discard(ftrace_file, entry, buffer, event))
888 889
		trace_buffer_unlock_commit_regs(buffer, event,
						irq_flags, pc, regs);
890 891
}

892 893 894 895
static __kprobes void
kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
		     struct pt_regs *regs)
{
896
	struct event_file_link *link;
897

898 899
	list_for_each_entry_rcu(link, &tp->files, list)
		__kretprobe_trace_func(tp, ri, regs, link->file);
900 901
}

902
/* Event entry printers */
903
static enum print_line_t
904 905
print_kprobe_event(struct trace_iterator *iter, int flags,
		   struct trace_event *event)
906
{
907
	struct kprobe_trace_entry_head *field;
908
	struct trace_seq *s = &iter->seq;
909
	struct trace_probe *tp;
910
	u8 *data;
911 912
	int i;

913
	field = (struct kprobe_trace_entry_head *)iter->ent;
914
	tp = container_of(event, struct trace_probe, call.event);
915

916 917 918
	if (!trace_seq_printf(s, "%s: (", tp->call.name))
		goto partial;

919 920 921
	if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
		goto partial;

922
	if (!trace_seq_puts(s, ")"))
923 924
		goto partial;

925 926 927
	data = (u8 *)&field[1];
	for (i = 0; i < tp->nr_args; i++)
		if (!tp->args[i].type->print(s, tp->args[i].name,
928
					     data + tp->args[i].offset, field))
929 930 931 932 933 934 935 936 937 938
			goto partial;

	if (!trace_seq_puts(s, "\n"))
		goto partial;

	return TRACE_TYPE_HANDLED;
partial:
	return TRACE_TYPE_PARTIAL_LINE;
}

939
static enum print_line_t
940 941
print_kretprobe_event(struct trace_iterator *iter, int flags,
		      struct trace_event *event)
942
{
943
	struct kretprobe_trace_entry_head *field;
944
	struct trace_seq *s = &iter->seq;
945
	struct trace_probe *tp;
946
	u8 *data;
947 948
	int i;

949
	field = (struct kretprobe_trace_entry_head *)iter->ent;
950
	tp = container_of(event, struct trace_probe, call.event);
951

952 953 954
	if (!trace_seq_printf(s, "%s: (", tp->call.name))
		goto partial;

955 956 957 958 959 960 961 962 963
	if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
		goto partial;

	if (!trace_seq_puts(s, " <- "))
		goto partial;

	if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
		goto partial;

964
	if (!trace_seq_puts(s, ")"))
965 966
		goto partial;

967 968 969
	data = (u8 *)&field[1];
	for (i = 0; i < tp->nr_args; i++)
		if (!tp->args[i].type->print(s, tp->args[i].name,
970
					     data + tp->args[i].offset, field))
971 972 973 974 975 976 977 978 979 980 981 982 983 984
			goto partial;

	if (!trace_seq_puts(s, "\n"))
		goto partial;

	return TRACE_TYPE_HANDLED;
partial:
	return TRACE_TYPE_PARTIAL_LINE;
}


static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
{
	int ret, i;
985
	struct kprobe_trace_entry_head field;
986 987
	struct trace_probe *tp = (struct trace_probe *)event_call->data;

988
	DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
989
	/* Set argument names as fields */
990
	for (i = 0; i < tp->nr_args; i++) {
991
		ret = trace_define_field(event_call, tp->args[i].type->fmttype,
992 993 994 995 996 997 998 999
					 tp->args[i].name,
					 sizeof(field) + tp->args[i].offset,
					 tp->args[i].type->size,
					 tp->args[i].type->is_signed,
					 FILTER_OTHER);
		if (ret)
			return ret;
	}
1000 1001 1002 1003 1004 1005
	return 0;
}

static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
{
	int ret, i;
1006
	struct kretprobe_trace_entry_head field;
1007 1008
	struct trace_probe *tp = (struct trace_probe *)event_call->data;

1009 1010
	DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
	DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1011
	/* Set argument names as fields */
1012
	for (i = 0; i < tp->nr_args; i++) {
1013
		ret = trace_define_field(event_call, tp->args[i].type->fmttype,
1014 1015 1016 1017 1018 1019 1020 1021
					 tp->args[i].name,
					 sizeof(field) + tp->args[i].offset,
					 tp->args[i].type->size,
					 tp->args[i].type->is_signed,
					 FILTER_OTHER);
		if (ret)
			return ret;
	}
1022 1023 1024
	return 0;
}

1025 1026 1027 1028 1029 1030 1031
static int __set_print_fmt(struct trace_probe *tp, char *buf, int len)
{
	int i;
	int pos = 0;

	const char *fmt, *arg;

1032
	if (!trace_probe_is_return(tp)) {
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
		fmt = "(%lx)";
		arg = "REC->" FIELD_STRING_IP;
	} else {
		fmt = "(%lx <- %lx)";
		arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
	}

	/* When len=0, we just calculate the needed length */
#define LEN_OR_ZERO (len ? len - pos : 0)

	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);

	for (i = 0; i < tp->nr_args; i++) {
1046 1047
		pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s",
				tp->args[i].name, tp->args[i].type->fmt);
1048 1049 1050 1051 1052
	}

	pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);

	for (i = 0; i < tp->nr_args; i++) {
1053 1054 1055 1056 1057 1058 1059
		if (strcmp(tp->args[i].type->name, "string") == 0)
			pos += snprintf(buf + pos, LEN_OR_ZERO,
					", __get_str(%s)",
					tp->args[i].name);
		else
			pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s",
					tp->args[i].name);
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
	}

#undef LEN_OR_ZERO

	/* return the length of print_fmt */
	return pos;
}

static int set_print_fmt(struct trace_probe *tp)
{
	int len;
	char *print_fmt;

	/* First: called with 0 length to calculate the needed length */
	len = __set_print_fmt(tp, NULL, 0);
	print_fmt = kmalloc(len + 1, GFP_KERNEL);
	if (!print_fmt)
		return -ENOMEM;

	/* Second: actually write the @print_fmt */
	__set_print_fmt(tp, print_fmt, len + 1);
	tp->call.print_fmt = print_fmt;

	return 0;
}

1086
#ifdef CONFIG_PERF_EVENTS
1087 1088

/* Kprobe profile handler */
1089 1090
static __kprobes void
kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs)
1091 1092
{
	struct ftrace_event_call *call = &tp->call;
1093
	struct kprobe_trace_entry_head *entry;
1094
	struct hlist_head *head;
1095
	int size, __size, dsize;
1096
	int rctx;
1097

1098 1099 1100 1101
	head = this_cpu_ptr(call->perf_events);
	if (hlist_empty(head))
		return;

1102 1103
	dsize = __get_data_size(tp, regs);
	__size = sizeof(*entry) + tp->size + dsize;
1104 1105
	size = ALIGN(__size + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);
1106

S
Steven Rostedt 已提交
1107
	entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1108
	if (!entry)
1109
		return;
1110

1111
	entry->ip = (unsigned long)tp->rp.kp.addr;
1112 1113
	memset(&entry[1], 0, dsize);
	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
1114
	perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1115 1116 1117
}

/* Kretprobe profile handler */
1118 1119 1120
static __kprobes void
kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
		    struct pt_regs *regs)
1121 1122
{
	struct ftrace_event_call *call = &tp->call;
1123
	struct kretprobe_trace_entry_head *entry;
1124
	struct hlist_head *head;
1125
	int size, __size, dsize;
1126
	int rctx;
1127

1128 1129 1130 1131
	head = this_cpu_ptr(call->perf_events);
	if (hlist_empty(head))
		return;

1132 1133
	dsize = __get_data_size(tp, regs);
	__size = sizeof(*entry) + tp->size + dsize;
1134 1135
	size = ALIGN(__size + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);
1136

S
Steven Rostedt 已提交
1137
	entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1138
	if (!entry)
1139
		return;
1140

1141 1142
	entry->func = (unsigned long)tp->rp.kp.addr;
	entry->ret_ip = (unsigned long)ri->ret_addr;
1143
	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
1144
	perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1145
}
1146
#endif	/* CONFIG_PERF_EVENTS */
1147

1148 1149 1150 1151 1152 1153
/*
 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
 *
 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
 * lockless, but we can't race with this __init function.
 */
1154
static __kprobes
1155 1156
int kprobe_register(struct ftrace_event_call *event,
		    enum trace_reg type, void *data)
1157
{
1158
	struct trace_probe *tp = (struct trace_probe *)event->data;
1159
	struct ftrace_event_file *file = data;
1160

1161 1162
	switch (type) {
	case TRACE_REG_REGISTER:
1163
		return enable_trace_probe(tp, file);
1164
	case TRACE_REG_UNREGISTER:
1165
		return disable_trace_probe(tp, file);
1166 1167 1168

#ifdef CONFIG_PERF_EVENTS
	case TRACE_REG_PERF_REGISTER:
1169
		return enable_trace_probe(tp, NULL);
1170
	case TRACE_REG_PERF_UNREGISTER:
1171
		return disable_trace_probe(tp, NULL);
1172 1173
	case TRACE_REG_PERF_OPEN:
	case TRACE_REG_PERF_CLOSE:
1174 1175
	case TRACE_REG_PERF_ADD:
	case TRACE_REG_PERF_DEL:
1176
		return 0;
1177 1178 1179 1180
#endif
	}
	return 0;
}
1181 1182 1183 1184 1185

static __kprobes
int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
{
	struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1186

1187 1188
	tp->nhit++;

1189
	if (tp->flags & TP_FLAG_TRACE)
1190
		kprobe_trace_func(tp, regs);
1191
#ifdef CONFIG_PERF_EVENTS
1192
	if (tp->flags & TP_FLAG_PROFILE)
1193
		kprobe_perf_func(tp, regs);
1194
#endif
1195 1196 1197 1198 1199 1200 1201 1202
	return 0;	/* We don't tweek kernel, so just return 0 */
}

static __kprobes
int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
{
	struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);

1203 1204
	tp->nhit++;

1205
	if (tp->flags & TP_FLAG_TRACE)
1206
		kretprobe_trace_func(tp, ri, regs);
1207
#ifdef CONFIG_PERF_EVENTS
1208
	if (tp->flags & TP_FLAG_PROFILE)
1209
		kretprobe_perf_func(tp, ri, regs);
1210
#endif
1211 1212
	return 0;	/* We don't tweek kernel, so just return 0 */
}
1213

1214 1215 1216 1217 1218 1219 1220 1221
static struct trace_event_functions kretprobe_funcs = {
	.trace		= print_kretprobe_event
};

static struct trace_event_functions kprobe_funcs = {
	.trace		= print_kprobe_event
};

1222 1223 1224 1225 1226 1227
static int register_probe_event(struct trace_probe *tp)
{
	struct ftrace_event_call *call = &tp->call;
	int ret;

	/* Initialize ftrace_event_call */
1228
	INIT_LIST_HEAD(&call->class->fields);
1229
	if (trace_probe_is_return(tp)) {
1230
		call->event.funcs = &kretprobe_funcs;
1231
		call->class->define_fields = kretprobe_event_define_fields;
1232
	} else {
1233
		call->event.funcs = &kprobe_funcs;
1234
		call->class->define_fields = kprobe_event_define_fields;
1235
	}
1236 1237
	if (set_print_fmt(tp) < 0)
		return -ENOMEM;
1238 1239
	ret = register_ftrace_event(&call->event);
	if (!ret) {
1240
		kfree(call->print_fmt);
1241
		return -ENODEV;
1242
	}
1243
	call->flags = 0;
1244
	call->class->reg = kprobe_register;
1245 1246
	call->data = tp;
	ret = trace_add_event_call(call);
1247
	if (ret) {
1248
		pr_info("Failed to register kprobe event: %s\n", call->name);
1249
		kfree(call->print_fmt);
1250
		unregister_ftrace_event(&call->event);
1251
	}
1252 1253 1254
	return ret;
}

1255
static int unregister_probe_event(struct trace_probe *tp)
1256
{
1257 1258
	int ret;

1259
	/* tp->event is unregistered in trace_remove_event_call() */
1260 1261 1262 1263
	ret = trace_remove_event_call(&tp->call);
	if (!ret)
		kfree(tp->call.print_fmt);
	return ret;
1264 1265
}

L
Lucas De Marchi 已提交
1266
/* Make a debugfs interface for controlling probe points */
1267 1268 1269 1270 1271
static __init int init_kprobe_trace(void)
{
	struct dentry *d_tracer;
	struct dentry *entry;

1272 1273 1274
	if (register_module_notifier(&trace_probe_module_nb))
		return -EINVAL;

1275 1276 1277 1278 1279 1280 1281
	d_tracer = tracing_init_dentry();
	if (!d_tracer)
		return 0;

	entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
				    NULL, &kprobe_events_ops);

1282
	/* Event list interface */
1283 1284 1285
	if (!entry)
		pr_warning("Could not create debugfs "
			   "'kprobe_events' entry\n");
1286 1287 1288 1289 1290 1291 1292 1293

	/* Profile interface */
	entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
				    NULL, &kprobe_profile_ops);

	if (!entry)
		pr_warning("Could not create debugfs "
			   "'kprobe_profile' entry\n");
1294 1295 1296 1297 1298 1299 1300
	return 0;
}
fs_initcall(init_kprobe_trace);


#ifdef CONFIG_FTRACE_STARTUP_TEST

1301 1302 1303 1304 1305 1306
/*
 * The "__used" keeps gcc from removing the function symbol
 * from the kallsyms table.
 */
static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
					       int a4, int a5, int a6)
1307 1308 1309 1310
{
	return a1 + a2 + a3 + a4 + a5 + a6;
}

1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322
static struct ftrace_event_file *
find_trace_probe_file(struct trace_probe *tp, struct trace_array *tr)
{
	struct ftrace_event_file *file;

	list_for_each_entry(file, &tr->events, list)
		if (file->event_call == &tp->call)
			return file;

	return NULL;
}

1323 1324 1325 1326
/*
 * Nobody but us can call enable_trace_probe/disable_trace_probe at this
 * stage, we can do this lockless.
 */
1327 1328
static __init int kprobe_trace_self_tests_init(void)
{
1329
	int ret, warn = 0;
1330
	int (*target)(int, int, int, int, int, int);
1331
	struct trace_probe *tp;
1332
	struct ftrace_event_file *file;
1333 1334 1335 1336 1337

	target = kprobe_trace_selftest_target;

	pr_info("Testing kprobe tracing: ");

1338 1339 1340
	ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
				  "$stack $stack0 +0($stack)",
				  create_trace_probe);
1341
	if (WARN_ON_ONCE(ret)) {
1342
		pr_warn("error on probing function entry.\n");
1343 1344 1345
		warn++;
	} else {
		/* Enable trace point */
1346
		tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
1347
		if (WARN_ON_ONCE(tp == NULL)) {
1348
			pr_warn("error on getting new probe.\n");
1349
			warn++;
1350 1351 1352 1353 1354 1355 1356 1357
		} else {
			file = find_trace_probe_file(tp, top_trace_array());
			if (WARN_ON_ONCE(file == NULL)) {
				pr_warn("error on getting probe file.\n");
				warn++;
			} else
				enable_trace_probe(tp, file);
		}
1358
	}
1359

1360 1361
	ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
				  "$retval", create_trace_probe);
1362
	if (WARN_ON_ONCE(ret)) {
1363
		pr_warn("error on probing function return.\n");
1364 1365 1366
		warn++;
	} else {
		/* Enable trace point */
1367
		tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
1368
		if (WARN_ON_ONCE(tp == NULL)) {
1369
			pr_warn("error on getting 2nd new probe.\n");
1370
			warn++;
1371 1372 1373 1374 1375 1376 1377 1378
		} else {
			file = find_trace_probe_file(tp, top_trace_array());
			if (WARN_ON_ONCE(file == NULL)) {
				pr_warn("error on getting probe file.\n");
				warn++;
			} else
				enable_trace_probe(tp, file);
		}
1379 1380 1381 1382
	}

	if (warn)
		goto end;
1383 1384 1385

	ret = target(1, 2, 3, 4, 5, 6);

1386 1387 1388
	/* Disable trace points before removing it */
	tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
	if (WARN_ON_ONCE(tp == NULL)) {
1389
		pr_warn("error on getting test probe.\n");
1390
		warn++;
1391 1392 1393 1394 1395 1396 1397 1398
	} else {
		file = find_trace_probe_file(tp, top_trace_array());
		if (WARN_ON_ONCE(file == NULL)) {
			pr_warn("error on getting probe file.\n");
			warn++;
		} else
			disable_trace_probe(tp, file);
	}
1399 1400 1401

	tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
	if (WARN_ON_ONCE(tp == NULL)) {
1402
		pr_warn("error on getting 2nd test probe.\n");
1403
		warn++;
1404 1405 1406 1407 1408 1409 1410 1411
	} else {
		file = find_trace_probe_file(tp, top_trace_array());
		if (WARN_ON_ONCE(file == NULL)) {
			pr_warn("error on getting probe file.\n");
			warn++;
		} else
			disable_trace_probe(tp, file);
	}
1412

1413
	ret = traceprobe_command("-:testprobe", create_trace_probe);
1414
	if (WARN_ON_ONCE(ret)) {
1415
		pr_warn("error on deleting a probe.\n");
1416 1417 1418
		warn++;
	}

1419
	ret = traceprobe_command("-:testprobe2", create_trace_probe);
1420
	if (WARN_ON_ONCE(ret)) {
1421
		pr_warn("error on deleting a probe.\n");
1422 1423
		warn++;
	}
1424

1425
end:
1426
	release_all_trace_probes();
1427 1428 1429 1430
	if (warn)
		pr_cont("NG: Some tests are failed. Please check them.\n");
	else
		pr_cont("OK\n");
1431 1432 1433 1434 1435 1436
	return 0;
}

late_initcall(kprobe_trace_self_tests_init);

#endif