trace_kprobe.c 34.5 KB
Newer Older
1
/*
2
 * Kprobes-based tracing events
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *
 * Created by Masami Hiramatsu <mhiramat@redhat.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */

#include <linux/module.h>
#include <linux/uaccess.h>

23
#include "trace_probe.h"
24

25
#define KPROBE_EVENT_SYSTEM "kprobes"
26

27
/**
28
 * Kprobe event core functions
29 30 31
 */
struct trace_probe {
	struct list_head	list;
32
	struct kretprobe	rp;	/* Use rp.kp for kprobe use */
33
	unsigned long 		nhit;
34
	unsigned int		flags;	/* For TP_FLAG_* */
35
	const char		*symbol;	/* symbol name */
36
	struct ftrace_event_class	class;
37
	struct ftrace_event_call	call;
38
	struct list_head	files;
39
	ssize_t			size;		/* trace entry size */
40
	unsigned int		nr_args;
41
	struct probe_arg	args[];
42 43
};

44 45 46 47 48
struct event_file_link {
	struct ftrace_event_file	*file;
	struct list_head		list;
};

49 50
#define SIZEOF_TRACE_PROBE(n)			\
	(offsetof(struct trace_probe, args) +	\
51
	(sizeof(struct probe_arg) * (n)))
52

53

54
static __kprobes bool trace_probe_is_return(struct trace_probe *tp)
55
{
56
	return tp->rp.handler != NULL;
57 58
}

59
static __kprobes const char *trace_probe_symbol(struct trace_probe *tp)
60 61 62 63
{
	return tp->symbol ? tp->symbol : "unknown";
}

64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
static __kprobes unsigned long trace_probe_offset(struct trace_probe *tp)
{
	return tp->rp.kp.offset;
}

static __kprobes bool trace_probe_is_enabled(struct trace_probe *tp)
{
	return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE));
}

static __kprobes bool trace_probe_is_registered(struct trace_probe *tp)
{
	return !!(tp->flags & TP_FLAG_REGISTERED);
}

static __kprobes bool trace_probe_has_gone(struct trace_probe *tp)
{
	return !!(kprobe_gone(&tp->rp.kp));
}

static __kprobes bool trace_probe_within_module(struct trace_probe *tp,
						struct module *mod)
{
	int len = strlen(mod->name);
	const char *name = trace_probe_symbol(tp);
	return strncmp(mod->name, name, len) == 0 && name[len] == ':';
}

static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
{
	return !!strchr(trace_probe_symbol(tp), ':');
}

97 98 99 100 101 102
static int register_probe_event(struct trace_probe *tp);
static void unregister_probe_event(struct trace_probe *tp);

static DEFINE_MUTEX(probe_lock);
static LIST_HEAD(probe_list);

103 104 105 106
static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
static int kretprobe_dispatcher(struct kretprobe_instance *ri,
				struct pt_regs *regs);

107 108 109
/*
 * Allocate new trace_probe and initialize it (including kprobes).
 */
110 111
static struct trace_probe *alloc_trace_probe(const char *group,
					     const char *event,
112 113 114
					     void *addr,
					     const char *symbol,
					     unsigned long offs,
115
					     int nargs, bool is_return)
116 117
{
	struct trace_probe *tp;
118
	int ret = -ENOMEM;
119

120
	tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL);
121
	if (!tp)
122
		return ERR_PTR(ret);
123 124 125 126 127

	if (symbol) {
		tp->symbol = kstrdup(symbol, GFP_KERNEL);
		if (!tp->symbol)
			goto error;
128 129 130 131 132 133
		tp->rp.kp.symbol_name = tp->symbol;
		tp->rp.kp.offset = offs;
	} else
		tp->rp.kp.addr = addr;

	if (is_return)
134
		tp->rp.handler = kretprobe_dispatcher;
135
	else
136
		tp->rp.kp.pre_handler = kprobe_dispatcher;
137

138
	if (!event || !is_good_name(event)) {
139
		ret = -EINVAL;
140
		goto error;
141 142
	}

143
	tp->call.class = &tp->class;
144 145 146
	tp->call.name = kstrdup(event, GFP_KERNEL);
	if (!tp->call.name)
		goto error;
147

148
	if (!group || !is_good_name(group)) {
149
		ret = -EINVAL;
150
		goto error;
151 152
	}

153 154
	tp->class.system = kstrdup(group, GFP_KERNEL);
	if (!tp->class.system)
155 156
		goto error;

157
	INIT_LIST_HEAD(&tp->list);
158
	INIT_LIST_HEAD(&tp->files);
159 160
	return tp;
error:
161
	kfree(tp->call.name);
162 163
	kfree(tp->symbol);
	kfree(tp);
164
	return ERR_PTR(ret);
165 166 167 168 169 170 171
}

static void free_trace_probe(struct trace_probe *tp)
{
	int i;

	for (i = 0; i < tp->nr_args; i++)
172
		traceprobe_free_probe_arg(&tp->args[i]);
173

174
	kfree(tp->call.class->system);
175 176 177 178 179
	kfree(tp->call.name);
	kfree(tp->symbol);
	kfree(tp);
}

180
static struct trace_probe *find_trace_probe(const char *event,
181
					    const char *group)
182 183 184 185
{
	struct trace_probe *tp;

	list_for_each_entry(tp, &probe_list, list)
186
		if (strcmp(tp->call.name, event) == 0 &&
187
		    strcmp(tp->call.class->system, group) == 0)
188 189 190 191
			return tp;
	return NULL;
}

192 193 194 195 196 197
/*
 * Enable trace_probe
 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
 */
static int
enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
198 199 200
{
	int ret = 0;

201
	if (file) {
202 203 204 205
		struct event_file_link *link;

		link = kmalloc(sizeof(*link), GFP_KERNEL);
		if (!link) {
206
			ret = -ENOMEM;
207
			goto out;
208 209
		}

210 211
		link->file = file;
		list_add_tail_rcu(&link->list, &tp->files);
212

213
		tp->flags |= TP_FLAG_TRACE;
214 215 216
	} else
		tp->flags |= TP_FLAG_PROFILE;

217
	if (trace_probe_is_registered(tp) && !trace_probe_has_gone(tp)) {
218 219 220 221 222
		if (trace_probe_is_return(tp))
			ret = enable_kretprobe(&tp->rp);
		else
			ret = enable_kprobe(&tp->rp.kp);
	}
223
 out:
224 225 226
	return ret;
}

227 228
static struct event_file_link *
find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file)
229
{
230
	struct event_file_link *link;
231

232 233 234
	list_for_each_entry(link, &tp->files, list)
		if (link->file == file)
			return link;
235

236
	return NULL;
237 238 239 240 241 242 243 244
}

/*
 * Disable trace_probe
 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
 */
static int
disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
245
{
246 247
	struct event_file_link *link = NULL;
	int wait = 0;
248 249 250
	int ret = 0;

	if (file) {
251 252
		link = find_event_file_link(tp, file);
		if (!link) {
253
			ret = -EINVAL;
254
			goto out;
255 256
		}

257
		list_del_rcu(&link->list);
258
		wait = 1;
259 260
		if (!list_empty(&tp->files))
			goto out;
261

262
		tp->flags &= ~TP_FLAG_TRACE;
263 264 265
	} else
		tp->flags &= ~TP_FLAG_PROFILE;

266
	if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) {
267 268 269 270
		if (trace_probe_is_return(tp))
			disable_kretprobe(&tp->rp);
		else
			disable_kprobe(&tp->rp.kp);
271
		wait = 1;
272
	}
273
 out:
274 275 276 277 278 279 280 281 282 283 284 285 286
	if (wait) {
		/*
		 * Synchronize with kprobe_trace_func/kretprobe_trace_func
		 * to ensure disabled (all running handlers are finished).
		 * This is not only for kfree(), but also the caller,
		 * trace_remove_event_call() supposes it for releasing
		 * event_call related objects, which will be accessed in
		 * the kprobe_trace_func/kretprobe_trace_func.
		 */
		synchronize_sched();
		kfree(link);	/* Ignored if link == NULL */
	}

287
	return ret;
288 289
}

290 291
/* Internal register function - just handle k*probes and flags */
static int __register_trace_probe(struct trace_probe *tp)
292
{
293
	int i, ret;
294 295 296 297

	if (trace_probe_is_registered(tp))
		return -EINVAL;

298
	for (i = 0; i < tp->nr_args; i++)
299
		traceprobe_update_arg(&tp->args[i]);
300

301 302 303 304 305 306
	/* Set/clear disabled flag according to tp->flag */
	if (trace_probe_is_enabled(tp))
		tp->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
	else
		tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;

307
	if (trace_probe_is_return(tp))
308
		ret = register_kretprobe(&tp->rp);
309
	else
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
		ret = register_kprobe(&tp->rp.kp);

	if (ret == 0)
		tp->flags |= TP_FLAG_REGISTERED;
	else {
		pr_warning("Could not insert probe at %s+%lu: %d\n",
			   trace_probe_symbol(tp), trace_probe_offset(tp), ret);
		if (ret == -ENOENT && trace_probe_is_on_module(tp)) {
			pr_warning("This probe might be able to register after"
				   "target module is loaded. Continue.\n");
			ret = 0;
		} else if (ret == -EILSEQ) {
			pr_warning("Probing address(0x%p) is not an "
				   "instruction boundary.\n",
				   tp->rp.kp.addr);
			ret = -EINVAL;
		}
	}

	return ret;
}

/* Internal unregister function - just handle k*probes and flags */
static void __unregister_trace_probe(struct trace_probe *tp)
{
	if (trace_probe_is_registered(tp)) {
		if (trace_probe_is_return(tp))
			unregister_kretprobe(&tp->rp);
		else
			unregister_kprobe(&tp->rp.kp);
		tp->flags &= ~TP_FLAG_REGISTERED;
		/* Cleanup kprobe for reuse */
		if (tp->rp.kp.symbol_name)
			tp->rp.kp.addr = NULL;
	}
}

/* Unregister a trace_probe and probe_event: call with locking probe_lock */
348
static int unregister_trace_probe(struct trace_probe *tp)
349
{
350 351 352 353
	/* Enabled event can not be unregistered */
	if (trace_probe_is_enabled(tp))
		return -EBUSY;

354
	__unregister_trace_probe(tp);
355
	list_del(&tp->list);
356
	unregister_probe_event(tp);
357 358

	return 0;
359 360 361 362 363 364 365 366 367 368
}

/* Register a trace_probe and probe_event */
static int register_trace_probe(struct trace_probe *tp)
{
	struct trace_probe *old_tp;
	int ret;

	mutex_lock(&probe_lock);

369
	/* Delete old (same name) event if exist */
370
	old_tp = find_trace_probe(tp->call.name, tp->call.class->system);
371
	if (old_tp) {
372 373 374
		ret = unregister_trace_probe(old_tp);
		if (ret < 0)
			goto end;
375 376
		free_trace_probe(old_tp);
	}
377 378

	/* Register new event */
379 380
	ret = register_probe_event(tp);
	if (ret) {
P
Paul Bolle 已提交
381
		pr_warning("Failed to register probe event(%d)\n", ret);
382 383 384
		goto end;
	}

385 386 387
	/* Register k*probe */
	ret = __register_trace_probe(tp);
	if (ret < 0)
388
		unregister_probe_event(tp);
389
	else
390
		list_add_tail(&tp->list, &probe_list);
391

392 393 394 395 396
end:
	mutex_unlock(&probe_lock);
	return ret;
}

397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
/* Module notifier call back, checking event on the module */
static int trace_probe_module_callback(struct notifier_block *nb,
				       unsigned long val, void *data)
{
	struct module *mod = data;
	struct trace_probe *tp;
	int ret;

	if (val != MODULE_STATE_COMING)
		return NOTIFY_DONE;

	/* Update probes on coming module */
	mutex_lock(&probe_lock);
	list_for_each_entry(tp, &probe_list, list) {
		if (trace_probe_within_module(tp, mod)) {
412
			/* Don't need to check busy - this should have gone. */
413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
			__unregister_trace_probe(tp);
			ret = __register_trace_probe(tp);
			if (ret)
				pr_warning("Failed to re-register probe %s on"
					   "%s: %d\n",
					   tp->call.name, mod->name, ret);
		}
	}
	mutex_unlock(&probe_lock);

	return NOTIFY_DONE;
}

static struct notifier_block trace_probe_module_nb = {
	.notifier_call = trace_probe_module_callback,
	.priority = 1	/* Invoked after kprobe module callback */
};

431 432 433 434
static int create_trace_probe(int argc, char **argv)
{
	/*
	 * Argument syntax:
435 436
	 *  - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
	 *  - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
437
	 * Fetch args:
438 439 440
	 *  $retval	: fetch return value
	 *  $stack	: fetch stack address
	 *  $stackN	: fetch Nth of stack (N:0-)
441 442 443
	 *  @ADDR	: fetch memory at ADDR (ADDR should be in kernel)
	 *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
	 *  %REG	: fetch register REG
444
	 * Dereferencing memory fetch:
445
	 *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
446 447
	 * Alias name of args:
	 *  NAME=FETCHARG : set NAME as alias of FETCHARG.
448 449
	 * Type of args:
	 *  FETCHARG:TYPE : use TYPE instead of unsigned long.
450 451 452
	 */
	struct trace_probe *tp;
	int i, ret = 0;
453
	bool is_return = false, is_delete = false;
454
	char *symbol = NULL, *event = NULL, *group = NULL;
455
	char *arg;
456
	unsigned long offset = 0;
457
	void *addr = NULL;
458
	char buf[MAX_EVENT_NAME_LEN];
459

460
	/* argc must be >= 1 */
461
	if (argv[0][0] == 'p')
462
		is_return = false;
463
	else if (argv[0][0] == 'r')
464
		is_return = true;
465
	else if (argv[0][0] == '-')
466
		is_delete = true;
467
	else {
468 469
		pr_info("Probe definition must be started with 'p', 'r' or"
			" '-'.\n");
470
		return -EINVAL;
471
	}
472 473 474

	if (argv[0][1] == ':') {
		event = &argv[0][2];
475 476 477 478 479
		if (strchr(event, '/')) {
			group = event;
			event = strchr(group, '/') + 1;
			event[-1] = '\0';
			if (strlen(group) == 0) {
480
				pr_info("Group name is not specified\n");
481 482 483
				return -EINVAL;
			}
		}
484
		if (strlen(event) == 0) {
485
			pr_info("Event name is not specified\n");
486 487 488
			return -EINVAL;
		}
	}
489 490
	if (!group)
		group = KPROBE_EVENT_SYSTEM;
491

492 493 494 495 496
	if (is_delete) {
		if (!event) {
			pr_info("Delete command needs an event name.\n");
			return -EINVAL;
		}
497
		mutex_lock(&probe_lock);
498
		tp = find_trace_probe(event, group);
499
		if (!tp) {
500
			mutex_unlock(&probe_lock);
501 502 503 504
			pr_info("Event %s/%s doesn't exist.\n", group, event);
			return -ENOENT;
		}
		/* delete an event */
505 506 507
		ret = unregister_trace_probe(tp);
		if (ret == 0)
			free_trace_probe(tp);
508
		mutex_unlock(&probe_lock);
509
		return ret;
510 511 512 513 514 515
	}

	if (argc < 2) {
		pr_info("Probe point is not specified.\n");
		return -EINVAL;
	}
516
	if (isdigit(argv[1][0])) {
517 518
		if (is_return) {
			pr_info("Return probe point must be a symbol.\n");
519
			return -EINVAL;
520
		}
521
		/* an address specified */
522
		ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
523 524
		if (ret) {
			pr_info("Failed to parse address.\n");
525
			return ret;
526
		}
527 528 529 530
	} else {
		/* a symbol specified */
		symbol = argv[1];
		/* TODO: support .init module functions */
531
		ret = traceprobe_split_symbol_offset(symbol, &offset);
532 533
		if (ret) {
			pr_info("Failed to parse symbol.\n");
534
			return ret;
535 536 537
		}
		if (offset && is_return) {
			pr_info("Return probe must be used without offset.\n");
538
			return -EINVAL;
539
		}
540
	}
541
	argc -= 2; argv += 2;
542 543

	/* setup a probe */
544 545 546
	if (!event) {
		/* Make a new event name */
		if (symbol)
547
			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
548 549
				 is_return ? 'r' : 'p', symbol, offset);
		else
550
			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
551
				 is_return ? 'r' : 'p', addr);
552 553
		event = buf;
	}
554 555
	tp = alloc_trace_probe(group, event, addr, symbol, offset, argc,
			       is_return);
556 557 558
	if (IS_ERR(tp)) {
		pr_info("Failed to allocate trace_probe.(%d)\n",
			(int)PTR_ERR(tp));
559
		return PTR_ERR(tp);
560
	}
561 562

	/* parse arguments */
563 564
	ret = 0;
	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
565 566 567
		/* Increment count for freeing args in error case */
		tp->nr_args++;

568 569
		/* Parse argument name */
		arg = strchr(argv[i], '=');
570
		if (arg) {
571
			*arg++ = '\0';
572 573
			tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
		} else {
574
			arg = argv[i];
575 576 577 578
			/* If argument name is omitted, set "argN" */
			snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
			tp->args[i].name = kstrdup(buf, GFP_KERNEL);
		}
579

580
		if (!tp->args[i].name) {
581
			pr_info("Failed to allocate argument[%d] name.\n", i);
582
			ret = -ENOMEM;
583 584
			goto error;
		}
585 586 587 588 589 590 591

		if (!is_good_name(tp->args[i].name)) {
			pr_info("Invalid argument[%d] name: %s\n",
				i, tp->args[i].name);
			ret = -EINVAL;
			goto error;
		}
592

593 594
		if (traceprobe_conflict_field_name(tp->args[i].name,
							tp->args, i)) {
595
			pr_info("Argument[%d] name '%s' conflicts with "
596 597 598 599
				"another field.\n", i, argv[i]);
			ret = -EINVAL;
			goto error;
		}
600 601

		/* Parse fetch argument */
602
		ret = traceprobe_parse_probe_arg(arg, &tp->size, &tp->args[i],
603
						is_return, true);
604
		if (ret) {
605
			pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
606
			goto error;
607
		}
608 609 610 611 612 613 614 615 616 617 618 619
	}

	ret = register_trace_probe(tp);
	if (ret)
		goto error;
	return 0;

error:
	free_trace_probe(tp);
	return ret;
}

620
static int release_all_trace_probes(void)
621 622
{
	struct trace_probe *tp;
623
	int ret = 0;
624 625

	mutex_lock(&probe_lock);
626 627 628 629 630 631
	/* Ensure no probe is in use. */
	list_for_each_entry(tp, &probe_list, list)
		if (trace_probe_is_enabled(tp)) {
			ret = -EBUSY;
			goto end;
		}
632 633 634 635 636 637
	/* TODO: Use batch unregistration */
	while (!list_empty(&probe_list)) {
		tp = list_entry(probe_list.next, struct trace_probe, list);
		unregister_trace_probe(tp);
		free_trace_probe(tp);
	}
638 639

end:
640
	mutex_unlock(&probe_lock);
641 642

	return ret;
643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664
}

/* Probes listing interfaces */
static void *probes_seq_start(struct seq_file *m, loff_t *pos)
{
	mutex_lock(&probe_lock);
	return seq_list_start(&probe_list, *pos);
}

static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
{
	return seq_list_next(v, &probe_list, pos);
}

static void probes_seq_stop(struct seq_file *m, void *v)
{
	mutex_unlock(&probe_lock);
}

static int probes_seq_show(struct seq_file *m, void *v)
{
	struct trace_probe *tp = v;
665
	int i;
666

667
	seq_printf(m, "%c", trace_probe_is_return(tp) ? 'r' : 'p');
668
	seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name);
669

670 671 672
	if (!tp->symbol)
		seq_printf(m, " 0x%p", tp->rp.kp.addr);
	else if (tp->rp.kp.offset)
673 674
		seq_printf(m, " %s+%u", trace_probe_symbol(tp),
			   tp->rp.kp.offset);
675
	else
676
		seq_printf(m, " %s", trace_probe_symbol(tp));
677

678 679
	for (i = 0; i < tp->nr_args; i++)
		seq_printf(m, " %s=%s", tp->args[i].name, tp->args[i].comm);
680
	seq_printf(m, "\n");
681

682 683 684 685 686 687 688 689 690 691 692 693
	return 0;
}

static const struct seq_operations probes_seq_op = {
	.start  = probes_seq_start,
	.next   = probes_seq_next,
	.stop   = probes_seq_stop,
	.show   = probes_seq_show
};

static int probes_open(struct inode *inode, struct file *file)
{
694 695 696 697 698 699 700
	int ret;

	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
		ret = release_all_trace_probes();
		if (ret < 0)
			return ret;
	}
701 702 703 704 705 706 707

	return seq_open(file, &probes_seq_op);
}

static ssize_t probes_write(struct file *file, const char __user *buffer,
			    size_t count, loff_t *ppos)
{
708 709
	return traceprobe_probes_write(file, buffer, count, ppos,
			create_trace_probe);
710 711 712 713 714 715 716 717 718 719 720
}

static const struct file_operations kprobe_events_ops = {
	.owner          = THIS_MODULE,
	.open           = probes_open,
	.read           = seq_read,
	.llseek         = seq_lseek,
	.release        = seq_release,
	.write		= probes_write,
};

721 722 723 724 725 726
/* Probes profiling interfaces */
static int probes_profile_seq_show(struct seq_file *m, void *v)
{
	struct trace_probe *tp = v;

	seq_printf(m, "  %-44s %15lu %15lu\n", tp->call.name, tp->nhit,
727
		   tp->rp.kp.nmissed);
728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751

	return 0;
}

static const struct seq_operations profile_seq_op = {
	.start  = probes_seq_start,
	.next   = probes_seq_next,
	.stop   = probes_seq_stop,
	.show   = probes_profile_seq_show
};

static int profile_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &profile_seq_op);
}

static const struct file_operations kprobe_profile_ops = {
	.owner          = THIS_MODULE,
	.open           = profile_open,
	.read           = seq_read,
	.llseek         = seq_lseek,
	.release        = seq_release,
};

752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799
/* Sum up total data length for dynamic arraies (strings) */
static __kprobes int __get_data_size(struct trace_probe *tp,
				     struct pt_regs *regs)
{
	int i, ret = 0;
	u32 len;

	for (i = 0; i < tp->nr_args; i++)
		if (unlikely(tp->args[i].fetch_size.fn)) {
			call_fetch(&tp->args[i].fetch_size, regs, &len);
			ret += len;
		}

	return ret;
}

/* Store the value of each argument */
static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp,
				       struct pt_regs *regs,
				       u8 *data, int maxlen)
{
	int i;
	u32 end = tp->size;
	u32 *dl;	/* Data (relative) location */

	for (i = 0; i < tp->nr_args; i++) {
		if (unlikely(tp->args[i].fetch_size.fn)) {
			/*
			 * First, we set the relative location and
			 * maximum data length to *dl
			 */
			dl = (u32 *)(data + tp->args[i].offset);
			*dl = make_data_rloc(maxlen, end - tp->args[i].offset);
			/* Then try to fetch string or dynamic array data */
			call_fetch(&tp->args[i].fetch, regs, dl);
			/* Reduce maximum length */
			end += get_rloc_len(*dl);
			maxlen -= get_rloc_len(*dl);
			/* Trick here, convert data_rloc to data_loc */
			*dl = convert_rloc_to_loc(*dl,
				 ent_size + tp->args[i].offset);
		} else
			/* Just fetching data normally */
			call_fetch(&tp->args[i].fetch, regs,
				   data + tp->args[i].offset);
	}
}

800
/* Kprobe handler */
801
static __kprobes void
802 803
__kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs,
		    struct ftrace_event_file *ftrace_file)
804
{
805
	struct kprobe_trace_entry_head *entry;
806
	struct ring_buffer_event *event;
807
	struct ring_buffer *buffer;
808
	int size, dsize, pc;
809
	unsigned long irq_flags;
810
	struct ftrace_event_call *call = &tp->call;
811

812 813
	WARN_ON(call != ftrace_file->event_call);

814 815 816
	if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
		return;

817 818 819
	local_save_flags(irq_flags);
	pc = preempt_count();

820 821
	dsize = __get_data_size(tp, regs);
	size = sizeof(*entry) + tp->size + dsize;
822

823 824 825
	event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
						call->event.type,
						size, irq_flags, pc);
826
	if (!event)
827
		return;
828 829

	entry = ring_buffer_event_data(event);
830
	entry->ip = (unsigned long)tp->rp.kp.addr;
831
	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
832

833
	if (!filter_current_check_discard(buffer, call, entry, event))
834 835
		trace_buffer_unlock_commit_regs(buffer, event,
						irq_flags, pc, regs);
836 837
}

838 839 840
static __kprobes void
kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs)
{
841
	struct event_file_link *link;
842

843 844
	list_for_each_entry_rcu(link, &tp->files, list)
		__kprobe_trace_func(tp, regs, link->file);
845 846
}

847
/* Kretprobe handler */
848
static __kprobes void
849 850 851
__kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
		       struct pt_regs *regs,
		       struct ftrace_event_file *ftrace_file)
852
{
853
	struct kretprobe_trace_entry_head *entry;
854
	struct ring_buffer_event *event;
855
	struct ring_buffer *buffer;
856
	int size, pc, dsize;
857
	unsigned long irq_flags;
858
	struct ftrace_event_call *call = &tp->call;
859

860 861
	WARN_ON(call != ftrace_file->event_call);

862 863 864
	if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
		return;

865 866 867
	local_save_flags(irq_flags);
	pc = preempt_count();

868 869
	dsize = __get_data_size(tp, regs);
	size = sizeof(*entry) + tp->size + dsize;
870

871 872 873
	event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
						call->event.type,
						size, irq_flags, pc);
874
	if (!event)
875
		return;
876 877

	entry = ring_buffer_event_data(event);
878
	entry->func = (unsigned long)tp->rp.kp.addr;
879
	entry->ret_ip = (unsigned long)ri->ret_addr;
880
	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
881

882
	if (!filter_current_check_discard(buffer, call, entry, event))
883 884
		trace_buffer_unlock_commit_regs(buffer, event,
						irq_flags, pc, regs);
885 886
}

887 888 889 890
static __kprobes void
kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
		     struct pt_regs *regs)
{
891
	struct event_file_link *link;
892

893 894
	list_for_each_entry_rcu(link, &tp->files, list)
		__kretprobe_trace_func(tp, ri, regs, link->file);
895 896
}

897
/* Event entry printers */
898
static enum print_line_t
899 900
print_kprobe_event(struct trace_iterator *iter, int flags,
		   struct trace_event *event)
901
{
902
	struct kprobe_trace_entry_head *field;
903
	struct trace_seq *s = &iter->seq;
904
	struct trace_probe *tp;
905
	u8 *data;
906 907
	int i;

908
	field = (struct kprobe_trace_entry_head *)iter->ent;
909
	tp = container_of(event, struct trace_probe, call.event);
910

911 912 913
	if (!trace_seq_printf(s, "%s: (", tp->call.name))
		goto partial;

914 915 916
	if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
		goto partial;

917
	if (!trace_seq_puts(s, ")"))
918 919
		goto partial;

920 921 922
	data = (u8 *)&field[1];
	for (i = 0; i < tp->nr_args; i++)
		if (!tp->args[i].type->print(s, tp->args[i].name,
923
					     data + tp->args[i].offset, field))
924 925 926 927 928 929 930 931 932 933
			goto partial;

	if (!trace_seq_puts(s, "\n"))
		goto partial;

	return TRACE_TYPE_HANDLED;
partial:
	return TRACE_TYPE_PARTIAL_LINE;
}

934
static enum print_line_t
935 936
print_kretprobe_event(struct trace_iterator *iter, int flags,
		      struct trace_event *event)
937
{
938
	struct kretprobe_trace_entry_head *field;
939
	struct trace_seq *s = &iter->seq;
940
	struct trace_probe *tp;
941
	u8 *data;
942 943
	int i;

944
	field = (struct kretprobe_trace_entry_head *)iter->ent;
945
	tp = container_of(event, struct trace_probe, call.event);
946

947 948 949
	if (!trace_seq_printf(s, "%s: (", tp->call.name))
		goto partial;

950 951 952 953 954 955 956 957 958
	if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
		goto partial;

	if (!trace_seq_puts(s, " <- "))
		goto partial;

	if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
		goto partial;

959
	if (!trace_seq_puts(s, ")"))
960 961
		goto partial;

962 963 964
	data = (u8 *)&field[1];
	for (i = 0; i < tp->nr_args; i++)
		if (!tp->args[i].type->print(s, tp->args[i].name,
965
					     data + tp->args[i].offset, field))
966 967 968 969 970 971 972 973 974 975 976 977 978 979
			goto partial;

	if (!trace_seq_puts(s, "\n"))
		goto partial;

	return TRACE_TYPE_HANDLED;
partial:
	return TRACE_TYPE_PARTIAL_LINE;
}


static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
{
	int ret, i;
980
	struct kprobe_trace_entry_head field;
981 982
	struct trace_probe *tp = (struct trace_probe *)event_call->data;

983
	DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
984
	/* Set argument names as fields */
985
	for (i = 0; i < tp->nr_args; i++) {
986
		ret = trace_define_field(event_call, tp->args[i].type->fmttype,
987 988 989 990 991 992 993 994
					 tp->args[i].name,
					 sizeof(field) + tp->args[i].offset,
					 tp->args[i].type->size,
					 tp->args[i].type->is_signed,
					 FILTER_OTHER);
		if (ret)
			return ret;
	}
995 996 997 998 999 1000
	return 0;
}

static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
{
	int ret, i;
1001
	struct kretprobe_trace_entry_head field;
1002 1003
	struct trace_probe *tp = (struct trace_probe *)event_call->data;

1004 1005
	DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
	DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1006
	/* Set argument names as fields */
1007
	for (i = 0; i < tp->nr_args; i++) {
1008
		ret = trace_define_field(event_call, tp->args[i].type->fmttype,
1009 1010 1011 1012 1013 1014 1015 1016
					 tp->args[i].name,
					 sizeof(field) + tp->args[i].offset,
					 tp->args[i].type->size,
					 tp->args[i].type->is_signed,
					 FILTER_OTHER);
		if (ret)
			return ret;
	}
1017 1018 1019
	return 0;
}

1020 1021 1022 1023 1024 1025 1026
static int __set_print_fmt(struct trace_probe *tp, char *buf, int len)
{
	int i;
	int pos = 0;

	const char *fmt, *arg;

1027
	if (!trace_probe_is_return(tp)) {
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
		fmt = "(%lx)";
		arg = "REC->" FIELD_STRING_IP;
	} else {
		fmt = "(%lx <- %lx)";
		arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
	}

	/* When len=0, we just calculate the needed length */
#define LEN_OR_ZERO (len ? len - pos : 0)

	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);

	for (i = 0; i < tp->nr_args; i++) {
1041 1042
		pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s",
				tp->args[i].name, tp->args[i].type->fmt);
1043 1044 1045 1046 1047
	}

	pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);

	for (i = 0; i < tp->nr_args; i++) {
1048 1049 1050 1051 1052 1053 1054
		if (strcmp(tp->args[i].type->name, "string") == 0)
			pos += snprintf(buf + pos, LEN_OR_ZERO,
					", __get_str(%s)",
					tp->args[i].name);
		else
			pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s",
					tp->args[i].name);
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
	}

#undef LEN_OR_ZERO

	/* return the length of print_fmt */
	return pos;
}

static int set_print_fmt(struct trace_probe *tp)
{
	int len;
	char *print_fmt;

	/* First: called with 0 length to calculate the needed length */
	len = __set_print_fmt(tp, NULL, 0);
	print_fmt = kmalloc(len + 1, GFP_KERNEL);
	if (!print_fmt)
		return -ENOMEM;

	/* Second: actually write the @print_fmt */
	__set_print_fmt(tp, print_fmt, len + 1);
	tp->call.print_fmt = print_fmt;

	return 0;
}

1081
#ifdef CONFIG_PERF_EVENTS
1082 1083

/* Kprobe profile handler */
1084 1085
static __kprobes void
kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs)
1086 1087
{
	struct ftrace_event_call *call = &tp->call;
1088
	struct kprobe_trace_entry_head *entry;
1089
	struct hlist_head *head;
1090
	int size, __size, dsize;
1091
	int rctx;
1092

1093 1094 1095 1096
	head = this_cpu_ptr(call->perf_events);
	if (hlist_empty(head))
		return;

1097 1098
	dsize = __get_data_size(tp, regs);
	__size = sizeof(*entry) + tp->size + dsize;
1099 1100
	size = ALIGN(__size + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);
1101

S
Steven Rostedt 已提交
1102
	entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1103
	if (!entry)
1104
		return;
1105

1106
	entry->ip = (unsigned long)tp->rp.kp.addr;
1107 1108
	memset(&entry[1], 0, dsize);
	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
1109
	perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1110 1111 1112
}

/* Kretprobe profile handler */
1113 1114 1115
static __kprobes void
kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
		    struct pt_regs *regs)
1116 1117
{
	struct ftrace_event_call *call = &tp->call;
1118
	struct kretprobe_trace_entry_head *entry;
1119
	struct hlist_head *head;
1120
	int size, __size, dsize;
1121
	int rctx;
1122

1123 1124 1125 1126
	head = this_cpu_ptr(call->perf_events);
	if (hlist_empty(head))
		return;

1127 1128
	dsize = __get_data_size(tp, regs);
	__size = sizeof(*entry) + tp->size + dsize;
1129 1130
	size = ALIGN(__size + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);
1131

S
Steven Rostedt 已提交
1132
	entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1133
	if (!entry)
1134
		return;
1135

1136 1137
	entry->func = (unsigned long)tp->rp.kp.addr;
	entry->ret_ip = (unsigned long)ri->ret_addr;
1138
	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
1139
	perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1140
}
1141
#endif	/* CONFIG_PERF_EVENTS */
1142

1143 1144 1145 1146 1147 1148
/*
 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
 *
 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
 * lockless, but we can't race with this __init function.
 */
1149
static __kprobes
1150 1151
int kprobe_register(struct ftrace_event_call *event,
		    enum trace_reg type, void *data)
1152
{
1153
	struct trace_probe *tp = (struct trace_probe *)event->data;
1154
	struct ftrace_event_file *file = data;
1155

1156 1157
	switch (type) {
	case TRACE_REG_REGISTER:
1158
		return enable_trace_probe(tp, file);
1159
	case TRACE_REG_UNREGISTER:
1160
		return disable_trace_probe(tp, file);
1161 1162 1163

#ifdef CONFIG_PERF_EVENTS
	case TRACE_REG_PERF_REGISTER:
1164
		return enable_trace_probe(tp, NULL);
1165
	case TRACE_REG_PERF_UNREGISTER:
1166
		return disable_trace_probe(tp, NULL);
1167 1168
	case TRACE_REG_PERF_OPEN:
	case TRACE_REG_PERF_CLOSE:
1169 1170
	case TRACE_REG_PERF_ADD:
	case TRACE_REG_PERF_DEL:
1171
		return 0;
1172 1173 1174 1175
#endif
	}
	return 0;
}
1176 1177 1178 1179 1180

static __kprobes
int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
{
	struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1181

1182 1183
	tp->nhit++;

1184
	if (tp->flags & TP_FLAG_TRACE)
1185
		kprobe_trace_func(tp, regs);
1186
#ifdef CONFIG_PERF_EVENTS
1187
	if (tp->flags & TP_FLAG_PROFILE)
1188
		kprobe_perf_func(tp, regs);
1189
#endif
1190 1191 1192 1193 1194 1195 1196 1197
	return 0;	/* We don't tweek kernel, so just return 0 */
}

static __kprobes
int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
{
	struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);

1198 1199
	tp->nhit++;

1200
	if (tp->flags & TP_FLAG_TRACE)
1201
		kretprobe_trace_func(tp, ri, regs);
1202
#ifdef CONFIG_PERF_EVENTS
1203
	if (tp->flags & TP_FLAG_PROFILE)
1204
		kretprobe_perf_func(tp, ri, regs);
1205
#endif
1206 1207
	return 0;	/* We don't tweek kernel, so just return 0 */
}
1208

1209 1210 1211 1212 1213 1214 1215 1216
static struct trace_event_functions kretprobe_funcs = {
	.trace		= print_kretprobe_event
};

static struct trace_event_functions kprobe_funcs = {
	.trace		= print_kprobe_event
};

1217 1218 1219 1220 1221 1222
static int register_probe_event(struct trace_probe *tp)
{
	struct ftrace_event_call *call = &tp->call;
	int ret;

	/* Initialize ftrace_event_call */
1223
	INIT_LIST_HEAD(&call->class->fields);
1224
	if (trace_probe_is_return(tp)) {
1225
		call->event.funcs = &kretprobe_funcs;
1226
		call->class->define_fields = kretprobe_event_define_fields;
1227
	} else {
1228
		call->event.funcs = &kprobe_funcs;
1229
		call->class->define_fields = kprobe_event_define_fields;
1230
	}
1231 1232
	if (set_print_fmt(tp) < 0)
		return -ENOMEM;
1233 1234
	ret = register_ftrace_event(&call->event);
	if (!ret) {
1235
		kfree(call->print_fmt);
1236
		return -ENODEV;
1237
	}
1238
	call->flags = 0;
1239
	call->class->reg = kprobe_register;
1240 1241
	call->data = tp;
	ret = trace_add_event_call(call);
1242
	if (ret) {
1243
		pr_info("Failed to register kprobe event: %s\n", call->name);
1244
		kfree(call->print_fmt);
1245
		unregister_ftrace_event(&call->event);
1246
	}
1247 1248 1249 1250 1251
	return ret;
}

static void unregister_probe_event(struct trace_probe *tp)
{
1252
	/* tp->event is unregistered in trace_remove_event_call() */
1253
	trace_remove_event_call(&tp->call);
1254
	kfree(tp->call.print_fmt);
1255 1256
}

L
Lucas De Marchi 已提交
1257
/* Make a debugfs interface for controlling probe points */
1258 1259 1260 1261 1262
static __init int init_kprobe_trace(void)
{
	struct dentry *d_tracer;
	struct dentry *entry;

1263 1264 1265
	if (register_module_notifier(&trace_probe_module_nb))
		return -EINVAL;

1266 1267 1268 1269 1270 1271 1272
	d_tracer = tracing_init_dentry();
	if (!d_tracer)
		return 0;

	entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
				    NULL, &kprobe_events_ops);

1273
	/* Event list interface */
1274 1275 1276
	if (!entry)
		pr_warning("Could not create debugfs "
			   "'kprobe_events' entry\n");
1277 1278 1279 1280 1281 1282 1283 1284

	/* Profile interface */
	entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
				    NULL, &kprobe_profile_ops);

	if (!entry)
		pr_warning("Could not create debugfs "
			   "'kprobe_profile' entry\n");
1285 1286 1287 1288 1289 1290 1291
	return 0;
}
fs_initcall(init_kprobe_trace);


#ifdef CONFIG_FTRACE_STARTUP_TEST

1292 1293 1294 1295 1296 1297
/*
 * The "__used" keeps gcc from removing the function symbol
 * from the kallsyms table.
 */
static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
					       int a4, int a5, int a6)
1298 1299 1300 1301
{
	return a1 + a2 + a3 + a4 + a5 + a6;
}

1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313
static struct ftrace_event_file *
find_trace_probe_file(struct trace_probe *tp, struct trace_array *tr)
{
	struct ftrace_event_file *file;

	list_for_each_entry(file, &tr->events, list)
		if (file->event_call == &tp->call)
			return file;

	return NULL;
}

1314 1315 1316 1317
/*
 * Nobody but us can call enable_trace_probe/disable_trace_probe at this
 * stage, we can do this lockless.
 */
1318 1319
static __init int kprobe_trace_self_tests_init(void)
{
1320
	int ret, warn = 0;
1321
	int (*target)(int, int, int, int, int, int);
1322
	struct trace_probe *tp;
1323
	struct ftrace_event_file *file;
1324 1325 1326 1327 1328

	target = kprobe_trace_selftest_target;

	pr_info("Testing kprobe tracing: ");

1329 1330 1331
	ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
				  "$stack $stack0 +0($stack)",
				  create_trace_probe);
1332
	if (WARN_ON_ONCE(ret)) {
1333
		pr_warn("error on probing function entry.\n");
1334 1335 1336
		warn++;
	} else {
		/* Enable trace point */
1337
		tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
1338
		if (WARN_ON_ONCE(tp == NULL)) {
1339
			pr_warn("error on getting new probe.\n");
1340
			warn++;
1341 1342 1343 1344 1345 1346 1347 1348
		} else {
			file = find_trace_probe_file(tp, top_trace_array());
			if (WARN_ON_ONCE(file == NULL)) {
				pr_warn("error on getting probe file.\n");
				warn++;
			} else
				enable_trace_probe(tp, file);
		}
1349
	}
1350

1351 1352
	ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
				  "$retval", create_trace_probe);
1353
	if (WARN_ON_ONCE(ret)) {
1354
		pr_warn("error on probing function return.\n");
1355 1356 1357
		warn++;
	} else {
		/* Enable trace point */
1358
		tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
1359
		if (WARN_ON_ONCE(tp == NULL)) {
1360
			pr_warn("error on getting 2nd new probe.\n");
1361
			warn++;
1362 1363 1364 1365 1366 1367 1368 1369
		} else {
			file = find_trace_probe_file(tp, top_trace_array());
			if (WARN_ON_ONCE(file == NULL)) {
				pr_warn("error on getting probe file.\n");
				warn++;
			} else
				enable_trace_probe(tp, file);
		}
1370 1371 1372 1373
	}

	if (warn)
		goto end;
1374 1375 1376

	ret = target(1, 2, 3, 4, 5, 6);

1377 1378 1379
	/* Disable trace points before removing it */
	tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
	if (WARN_ON_ONCE(tp == NULL)) {
1380
		pr_warn("error on getting test probe.\n");
1381
		warn++;
1382 1383 1384 1385 1386 1387 1388 1389
	} else {
		file = find_trace_probe_file(tp, top_trace_array());
		if (WARN_ON_ONCE(file == NULL)) {
			pr_warn("error on getting probe file.\n");
			warn++;
		} else
			disable_trace_probe(tp, file);
	}
1390 1391 1392

	tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
	if (WARN_ON_ONCE(tp == NULL)) {
1393
		pr_warn("error on getting 2nd test probe.\n");
1394
		warn++;
1395 1396 1397 1398 1399 1400 1401 1402
	} else {
		file = find_trace_probe_file(tp, top_trace_array());
		if (WARN_ON_ONCE(file == NULL)) {
			pr_warn("error on getting probe file.\n");
			warn++;
		} else
			disable_trace_probe(tp, file);
	}
1403

1404
	ret = traceprobe_command("-:testprobe", create_trace_probe);
1405
	if (WARN_ON_ONCE(ret)) {
1406
		pr_warn("error on deleting a probe.\n");
1407 1408 1409
		warn++;
	}

1410
	ret = traceprobe_command("-:testprobe2", create_trace_probe);
1411
	if (WARN_ON_ONCE(ret)) {
1412
		pr_warn("error on deleting a probe.\n");
1413 1414
		warn++;
	}
1415

1416
end:
1417
	release_all_trace_probes();
1418 1419 1420 1421
	if (warn)
		pr_cont("NG: Some tests are failed. Please check them.\n");
	else
		pr_cont("OK\n");
1422 1423 1424 1425 1426 1427
	return 0;
}

late_initcall(kprobe_trace_self_tests_init);

#endif