trace_kprobe.c 33.3 KB
Newer Older
1
/*
2
 * Kprobes-based tracing events
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *
 * Created by Masami Hiramatsu <mhiramat@redhat.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */

#include <linux/module.h>
#include <linux/uaccess.h>

23
#include "trace_probe.h"
24

25
#define KPROBE_EVENT_SYSTEM "kprobes"
26

27
/**
28
 * Kprobe event core functions
29
 */
30
struct trace_kprobe {
31
	struct list_head	list;
32
	struct kretprobe	rp;	/* Use rp.kp for kprobe use */
33
	unsigned long 		nhit;
34
	const char		*symbol;	/* symbol name */
35
	struct trace_probe	tp;
36 37
};

38 39 40 41 42
struct event_file_link {
	struct ftrace_event_file	*file;
	struct list_head		list;
};

43 44
#define SIZEOF_TRACE_KPROBE(n)				\
	(offsetof(struct trace_kprobe, tp.args) +	\
45
	(sizeof(struct probe_arg) * (n)))
46

47

48
static __kprobes bool trace_kprobe_is_return(struct trace_kprobe *tk)
49
{
50
	return tk->rp.handler != NULL;
51 52
}

53
static __kprobes const char *trace_kprobe_symbol(struct trace_kprobe *tk)
54
{
55
	return tk->symbol ? tk->symbol : "unknown";
56 57
}

58
static __kprobes unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
59
{
60
	return tk->rp.kp.offset;
61 62
}

63
static __kprobes bool trace_kprobe_has_gone(struct trace_kprobe *tk)
64
{
65
	return !!(kprobe_gone(&tk->rp.kp));
66 67
}

68 69
static __kprobes bool trace_kprobe_within_module(struct trace_kprobe *tk,
						 struct module *mod)
70 71
{
	int len = strlen(mod->name);
72
	const char *name = trace_kprobe_symbol(tk);
73 74 75
	return strncmp(mod->name, name, len) == 0 && name[len] == ':';
}

76
static __kprobes bool trace_kprobe_is_on_module(struct trace_kprobe *tk)
77
{
78
	return !!strchr(trace_kprobe_symbol(tk), ':');
79 80
}

81 82
static int register_kprobe_event(struct trace_kprobe *tk);
static int unregister_kprobe_event(struct trace_kprobe *tk);
83 84 85 86

static DEFINE_MUTEX(probe_lock);
static LIST_HEAD(probe_list);

87 88 89 90
static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
static int kretprobe_dispatcher(struct kretprobe_instance *ri,
				struct pt_regs *regs);

91 92 93
/*
 * Allocate new trace_probe and initialize it (including kprobes).
 */
94
static struct trace_kprobe *alloc_trace_kprobe(const char *group,
95
					     const char *event,
96 97 98
					     void *addr,
					     const char *symbol,
					     unsigned long offs,
99
					     int nargs, bool is_return)
100
{
101
	struct trace_kprobe *tk;
102
	int ret = -ENOMEM;
103

104 105
	tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
	if (!tk)
106
		return ERR_PTR(ret);
107 108

	if (symbol) {
109 110
		tk->symbol = kstrdup(symbol, GFP_KERNEL);
		if (!tk->symbol)
111
			goto error;
112 113
		tk->rp.kp.symbol_name = tk->symbol;
		tk->rp.kp.offset = offs;
114
	} else
115
		tk->rp.kp.addr = addr;
116 117

	if (is_return)
118
		tk->rp.handler = kretprobe_dispatcher;
119
	else
120
		tk->rp.kp.pre_handler = kprobe_dispatcher;
121

122
	if (!event || !is_good_name(event)) {
123
		ret = -EINVAL;
124
		goto error;
125 126
	}

127 128 129
	tk->tp.call.class = &tk->tp.class;
	tk->tp.call.name = kstrdup(event, GFP_KERNEL);
	if (!tk->tp.call.name)
130
		goto error;
131

132
	if (!group || !is_good_name(group)) {
133
		ret = -EINVAL;
134
		goto error;
135 136
	}

137 138
	tk->tp.class.system = kstrdup(group, GFP_KERNEL);
	if (!tk->tp.class.system)
139 140
		goto error;

141 142 143
	INIT_LIST_HEAD(&tk->list);
	INIT_LIST_HEAD(&tk->tp.files);
	return tk;
144
error:
145 146 147
	kfree(tk->tp.call.name);
	kfree(tk->symbol);
	kfree(tk);
148
	return ERR_PTR(ret);
149 150
}

151
static void free_trace_kprobe(struct trace_kprobe *tk)
152 153 154
{
	int i;

155 156
	for (i = 0; i < tk->tp.nr_args; i++)
		traceprobe_free_probe_arg(&tk->tp.args[i]);
157

158 159 160 161
	kfree(tk->tp.call.class->system);
	kfree(tk->tp.call.name);
	kfree(tk->symbol);
	kfree(tk);
162 163
}

164 165
static struct trace_kprobe *find_trace_kprobe(const char *event,
					      const char *group)
166
{
167
	struct trace_kprobe *tk;
168

169 170 171 172
	list_for_each_entry(tk, &probe_list, list)
		if (strcmp(tk->tp.call.name, event) == 0 &&
		    strcmp(tk->tp.call.class->system, group) == 0)
			return tk;
173 174 175
	return NULL;
}

176 177 178 179 180
/*
 * Enable trace_probe
 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
 */
static int
181
enable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file)
182 183 184
{
	int ret = 0;

185
	if (file) {
186 187 188 189
		struct event_file_link *link;

		link = kmalloc(sizeof(*link), GFP_KERNEL);
		if (!link) {
190
			ret = -ENOMEM;
191
			goto out;
192 193
		}

194
		link->file = file;
195
		list_add_tail_rcu(&link->list, &tk->tp.files);
196

197
		tk->tp.flags |= TP_FLAG_TRACE;
198
	} else
199
		tk->tp.flags |= TP_FLAG_PROFILE;
200

201 202 203
	if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) {
		if (trace_kprobe_is_return(tk))
			ret = enable_kretprobe(&tk->rp);
204
		else
205
			ret = enable_kprobe(&tk->rp.kp);
206
	}
207
 out:
208 209 210
	return ret;
}

211 212
static struct event_file_link *
find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file)
213
{
214
	struct event_file_link *link;
215

216 217 218
	list_for_each_entry(link, &tp->files, list)
		if (link->file == file)
			return link;
219

220
	return NULL;
221 222 223 224 225 226 227
}

/*
 * Disable trace_probe
 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
 */
static int
228
disable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file)
229
{
230 231
	struct event_file_link *link = NULL;
	int wait = 0;
232 233 234
	int ret = 0;

	if (file) {
235
		link = find_event_file_link(&tk->tp, file);
236
		if (!link) {
237
			ret = -EINVAL;
238
			goto out;
239 240
		}

241
		list_del_rcu(&link->list);
242
		wait = 1;
243
		if (!list_empty(&tk->tp.files))
244
			goto out;
245

246
		tk->tp.flags &= ~TP_FLAG_TRACE;
247
	} else
248
		tk->tp.flags &= ~TP_FLAG_PROFILE;
249

250 251 252
	if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) {
		if (trace_kprobe_is_return(tk))
			disable_kretprobe(&tk->rp);
253
		else
254
			disable_kprobe(&tk->rp.kp);
255
		wait = 1;
256
	}
257
 out:
258 259 260 261 262 263 264 265 266 267 268 269 270
	if (wait) {
		/*
		 * Synchronize with kprobe_trace_func/kretprobe_trace_func
		 * to ensure disabled (all running handlers are finished).
		 * This is not only for kfree(), but also the caller,
		 * trace_remove_event_call() supposes it for releasing
		 * event_call related objects, which will be accessed in
		 * the kprobe_trace_func/kretprobe_trace_func.
		 */
		synchronize_sched();
		kfree(link);	/* Ignored if link == NULL */
	}

271
	return ret;
272 273
}

274
/* Internal register function - just handle k*probes and flags */
275
static int __register_trace_kprobe(struct trace_kprobe *tk)
276
{
277
	int i, ret;
278

279
	if (trace_probe_is_registered(&tk->tp))
280 281
		return -EINVAL;

282 283
	for (i = 0; i < tk->tp.nr_args; i++)
		traceprobe_update_arg(&tk->tp.args[i]);
284

285
	/* Set/clear disabled flag according to tp->flag */
286 287
	if (trace_probe_is_enabled(&tk->tp))
		tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
288
	else
289
		tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
290

291 292
	if (trace_kprobe_is_return(tk))
		ret = register_kretprobe(&tk->rp);
293
	else
294
		ret = register_kprobe(&tk->rp.kp);
295 296

	if (ret == 0)
297
		tk->tp.flags |= TP_FLAG_REGISTERED;
298 299
	else {
		pr_warning("Could not insert probe at %s+%lu: %d\n",
300 301
			   trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret);
		if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) {
302 303 304 305 306 307
			pr_warning("This probe might be able to register after"
				   "target module is loaded. Continue.\n");
			ret = 0;
		} else if (ret == -EILSEQ) {
			pr_warning("Probing address(0x%p) is not an "
				   "instruction boundary.\n",
308
				   tk->rp.kp.addr);
309 310 311 312 313 314 315 316
			ret = -EINVAL;
		}
	}

	return ret;
}

/* Internal unregister function - just handle k*probes and flags */
317
static void __unregister_trace_kprobe(struct trace_kprobe *tk)
318
{
319 320 321
	if (trace_probe_is_registered(&tk->tp)) {
		if (trace_kprobe_is_return(tk))
			unregister_kretprobe(&tk->rp);
322
		else
323 324
			unregister_kprobe(&tk->rp.kp);
		tk->tp.flags &= ~TP_FLAG_REGISTERED;
325
		/* Cleanup kprobe for reuse */
326 327
		if (tk->rp.kp.symbol_name)
			tk->rp.kp.addr = NULL;
328 329 330 331
	}
}

/* Unregister a trace_probe and probe_event: call with locking probe_lock */
332
static int unregister_trace_kprobe(struct trace_kprobe *tk)
333
{
334
	/* Enabled event can not be unregistered */
335
	if (trace_probe_is_enabled(&tk->tp))
336 337
		return -EBUSY;

338
	/* Will fail if probe is being used by ftrace or perf */
339
	if (unregister_kprobe_event(tk))
340 341
		return -EBUSY;

342 343
	__unregister_trace_kprobe(tk);
	list_del(&tk->list);
344 345

	return 0;
346 347 348
}

/* Register a trace_probe and probe_event */
349
static int register_trace_kprobe(struct trace_kprobe *tk)
350
{
351
	struct trace_kprobe *old_tk;
352 353 354 355
	int ret;

	mutex_lock(&probe_lock);

356
	/* Delete old (same name) event if exist */
357 358 359
	old_tk = find_trace_kprobe(tk->tp.call.name, tk->tp.call.class->system);
	if (old_tk) {
		ret = unregister_trace_kprobe(old_tk);
360 361
		if (ret < 0)
			goto end;
362
		free_trace_kprobe(old_tk);
363
	}
364 365

	/* Register new event */
366
	ret = register_kprobe_event(tk);
367
	if (ret) {
P
Paul Bolle 已提交
368
		pr_warning("Failed to register probe event(%d)\n", ret);
369 370 371
		goto end;
	}

372
	/* Register k*probe */
373
	ret = __register_trace_kprobe(tk);
374
	if (ret < 0)
375
		unregister_kprobe_event(tk);
376
	else
377
		list_add_tail(&tk->list, &probe_list);
378

379 380 381 382 383
end:
	mutex_unlock(&probe_lock);
	return ret;
}

384
/* Module notifier call back, checking event on the module */
385
static int trace_kprobe_module_callback(struct notifier_block *nb,
386 387 388
				       unsigned long val, void *data)
{
	struct module *mod = data;
389
	struct trace_kprobe *tk;
390 391 392 393 394 395 396
	int ret;

	if (val != MODULE_STATE_COMING)
		return NOTIFY_DONE;

	/* Update probes on coming module */
	mutex_lock(&probe_lock);
397 398
	list_for_each_entry(tk, &probe_list, list) {
		if (trace_kprobe_within_module(tk, mod)) {
399
			/* Don't need to check busy - this should have gone. */
400 401
			__unregister_trace_kprobe(tk);
			ret = __register_trace_kprobe(tk);
402 403 404
			if (ret)
				pr_warning("Failed to re-register probe %s on"
					   "%s: %d\n",
405
					   tk->tp.call.name, mod->name, ret);
406 407 408 409 410 411 412
		}
	}
	mutex_unlock(&probe_lock);

	return NOTIFY_DONE;
}

413 414
static struct notifier_block trace_kprobe_module_nb = {
	.notifier_call = trace_kprobe_module_callback,
415 416 417
	.priority = 1	/* Invoked after kprobe module callback */
};

418
static int create_trace_kprobe(int argc, char **argv)
419 420 421
{
	/*
	 * Argument syntax:
422 423
	 *  - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
	 *  - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
424
	 * Fetch args:
425 426 427
	 *  $retval	: fetch return value
	 *  $stack	: fetch stack address
	 *  $stackN	: fetch Nth of stack (N:0-)
428 429 430
	 *  @ADDR	: fetch memory at ADDR (ADDR should be in kernel)
	 *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
	 *  %REG	: fetch register REG
431
	 * Dereferencing memory fetch:
432
	 *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
433 434
	 * Alias name of args:
	 *  NAME=FETCHARG : set NAME as alias of FETCHARG.
435 436
	 * Type of args:
	 *  FETCHARG:TYPE : use TYPE instead of unsigned long.
437
	 */
438
	struct trace_kprobe *tk;
439
	int i, ret = 0;
440
	bool is_return = false, is_delete = false;
441
	char *symbol = NULL, *event = NULL, *group = NULL;
442
	char *arg;
443
	unsigned long offset = 0;
444
	void *addr = NULL;
445
	char buf[MAX_EVENT_NAME_LEN];
446

447
	/* argc must be >= 1 */
448
	if (argv[0][0] == 'p')
449
		is_return = false;
450
	else if (argv[0][0] == 'r')
451
		is_return = true;
452
	else if (argv[0][0] == '-')
453
		is_delete = true;
454
	else {
455 456
		pr_info("Probe definition must be started with 'p', 'r' or"
			" '-'.\n");
457
		return -EINVAL;
458
	}
459 460 461

	if (argv[0][1] == ':') {
		event = &argv[0][2];
462 463 464 465 466
		if (strchr(event, '/')) {
			group = event;
			event = strchr(group, '/') + 1;
			event[-1] = '\0';
			if (strlen(group) == 0) {
467
				pr_info("Group name is not specified\n");
468 469 470
				return -EINVAL;
			}
		}
471
		if (strlen(event) == 0) {
472
			pr_info("Event name is not specified\n");
473 474 475
			return -EINVAL;
		}
	}
476 477
	if (!group)
		group = KPROBE_EVENT_SYSTEM;
478

479 480 481 482 483
	if (is_delete) {
		if (!event) {
			pr_info("Delete command needs an event name.\n");
			return -EINVAL;
		}
484
		mutex_lock(&probe_lock);
485 486
		tk = find_trace_kprobe(event, group);
		if (!tk) {
487
			mutex_unlock(&probe_lock);
488 489 490 491
			pr_info("Event %s/%s doesn't exist.\n", group, event);
			return -ENOENT;
		}
		/* delete an event */
492
		ret = unregister_trace_kprobe(tk);
493
		if (ret == 0)
494
			free_trace_kprobe(tk);
495
		mutex_unlock(&probe_lock);
496
		return ret;
497 498 499 500 501 502
	}

	if (argc < 2) {
		pr_info("Probe point is not specified.\n");
		return -EINVAL;
	}
503
	if (isdigit(argv[1][0])) {
504 505
		if (is_return) {
			pr_info("Return probe point must be a symbol.\n");
506
			return -EINVAL;
507
		}
508
		/* an address specified */
509
		ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
510 511
		if (ret) {
			pr_info("Failed to parse address.\n");
512
			return ret;
513
		}
514 515 516 517
	} else {
		/* a symbol specified */
		symbol = argv[1];
		/* TODO: support .init module functions */
518
		ret = traceprobe_split_symbol_offset(symbol, &offset);
519 520
		if (ret) {
			pr_info("Failed to parse symbol.\n");
521
			return ret;
522 523 524
		}
		if (offset && is_return) {
			pr_info("Return probe must be used without offset.\n");
525
			return -EINVAL;
526
		}
527
	}
528
	argc -= 2; argv += 2;
529 530

	/* setup a probe */
531 532 533
	if (!event) {
		/* Make a new event name */
		if (symbol)
534
			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
535 536
				 is_return ? 'r' : 'p', symbol, offset);
		else
537
			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
538
				 is_return ? 'r' : 'p', addr);
539 540
		event = buf;
	}
541
	tk = alloc_trace_kprobe(group, event, addr, symbol, offset, argc,
542
			       is_return);
543
	if (IS_ERR(tk)) {
544
		pr_info("Failed to allocate trace_probe.(%d)\n",
545 546
			(int)PTR_ERR(tk));
		return PTR_ERR(tk);
547
	}
548 549

	/* parse arguments */
550 551
	ret = 0;
	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
552 553
		struct probe_arg *parg = &tk->tp.args[i];

554
		/* Increment count for freeing args in error case */
555
		tk->tp.nr_args++;
556

557 558
		/* Parse argument name */
		arg = strchr(argv[i], '=');
559
		if (arg) {
560
			*arg++ = '\0';
561
			parg->name = kstrdup(argv[i], GFP_KERNEL);
562
		} else {
563
			arg = argv[i];
564 565
			/* If argument name is omitted, set "argN" */
			snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
566
			parg->name = kstrdup(buf, GFP_KERNEL);
567
		}
568

569
		if (!parg->name) {
570
			pr_info("Failed to allocate argument[%d] name.\n", i);
571
			ret = -ENOMEM;
572 573
			goto error;
		}
574

575
		if (!is_good_name(parg->name)) {
576
			pr_info("Invalid argument[%d] name: %s\n",
577
				i, parg->name);
578 579 580
			ret = -EINVAL;
			goto error;
		}
581

582 583
		if (traceprobe_conflict_field_name(parg->name,
							tk->tp.args, i)) {
584
			pr_info("Argument[%d] name '%s' conflicts with "
585 586 587 588
				"another field.\n", i, argv[i]);
			ret = -EINVAL;
			goto error;
		}
589 590

		/* Parse fetch argument */
591
		ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg,
592
						is_return, true);
593
		if (ret) {
594
			pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
595
			goto error;
596
		}
597 598
	}

599
	ret = register_trace_kprobe(tk);
600 601 602 603 604
	if (ret)
		goto error;
	return 0;

error:
605
	free_trace_kprobe(tk);
606 607 608
	return ret;
}

609
static int release_all_trace_kprobes(void)
610
{
611
	struct trace_kprobe *tk;
612
	int ret = 0;
613 614

	mutex_lock(&probe_lock);
615
	/* Ensure no probe is in use. */
616 617
	list_for_each_entry(tk, &probe_list, list)
		if (trace_probe_is_enabled(&tk->tp)) {
618 619 620
			ret = -EBUSY;
			goto end;
		}
621 622
	/* TODO: Use batch unregistration */
	while (!list_empty(&probe_list)) {
623 624
		tk = list_entry(probe_list.next, struct trace_kprobe, list);
		ret = unregister_trace_kprobe(tk);
625 626
		if (ret)
			goto end;
627
		free_trace_kprobe(tk);
628
	}
629 630

end:
631
	mutex_unlock(&probe_lock);
632 633

	return ret;
634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654
}

/* Probes listing interfaces */
static void *probes_seq_start(struct seq_file *m, loff_t *pos)
{
	mutex_lock(&probe_lock);
	return seq_list_start(&probe_list, *pos);
}

static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
{
	return seq_list_next(v, &probe_list, pos);
}

static void probes_seq_stop(struct seq_file *m, void *v)
{
	mutex_unlock(&probe_lock);
}

static int probes_seq_show(struct seq_file *m, void *v)
{
655
	struct trace_kprobe *tk = v;
656
	int i;
657

658 659
	seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p');
	seq_printf(m, ":%s/%s", tk->tp.call.class->system, tk->tp.call.name);
660

661 662 663 664 665
	if (!tk->symbol)
		seq_printf(m, " 0x%p", tk->rp.kp.addr);
	else if (tk->rp.kp.offset)
		seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
			   tk->rp.kp.offset);
666
	else
667
		seq_printf(m, " %s", trace_kprobe_symbol(tk));
668

669 670
	for (i = 0; i < tk->tp.nr_args; i++)
		seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
671
	seq_printf(m, "\n");
672

673 674 675 676 677 678 679 680 681 682 683 684
	return 0;
}

static const struct seq_operations probes_seq_op = {
	.start  = probes_seq_start,
	.next   = probes_seq_next,
	.stop   = probes_seq_stop,
	.show   = probes_seq_show
};

static int probes_open(struct inode *inode, struct file *file)
{
685 686 687
	int ret;

	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
688
		ret = release_all_trace_kprobes();
689 690 691
		if (ret < 0)
			return ret;
	}
692 693 694 695 696 697 698

	return seq_open(file, &probes_seq_op);
}

static ssize_t probes_write(struct file *file, const char __user *buffer,
			    size_t count, loff_t *ppos)
{
699
	return traceprobe_probes_write(file, buffer, count, ppos,
700
			create_trace_kprobe);
701 702 703 704 705 706 707 708 709 710 711
}

static const struct file_operations kprobe_events_ops = {
	.owner          = THIS_MODULE,
	.open           = probes_open,
	.read           = seq_read,
	.llseek         = seq_lseek,
	.release        = seq_release,
	.write		= probes_write,
};

712 713 714
/* Probes profiling interfaces */
static int probes_profile_seq_show(struct seq_file *m, void *v)
{
715
	struct trace_kprobe *tk = v;
716

717 718
	seq_printf(m, "  %-44s %15lu %15lu\n", tk->tp.call.name, tk->nhit,
		   tk->rp.kp.nmissed);
719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742

	return 0;
}

static const struct seq_operations profile_seq_op = {
	.start  = probes_seq_start,
	.next   = probes_seq_next,
	.stop   = probes_seq_stop,
	.show   = probes_profile_seq_show
};

static int profile_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &profile_seq_op);
}

static const struct file_operations kprobe_profile_ops = {
	.owner          = THIS_MODULE,
	.open           = profile_open,
	.read           = seq_read,
	.llseek         = seq_lseek,
	.release        = seq_release,
};

743
/* Kprobe handler */
744
static __kprobes void
745
__kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
746
		    struct ftrace_event_file *ftrace_file)
747
{
748
	struct kprobe_trace_entry_head *entry;
749
	struct ring_buffer_event *event;
750
	struct ring_buffer *buffer;
751
	int size, dsize, pc;
752
	unsigned long irq_flags;
753
	struct ftrace_event_call *call = &tk->tp.call;
754

755 756
	WARN_ON(call != ftrace_file->event_call);

757 758 759
	if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
		return;

760 761 762
	local_save_flags(irq_flags);
	pc = preempt_count();

763 764
	dsize = __get_data_size(&tk->tp, regs);
	size = sizeof(*entry) + tk->tp.size + dsize;
765

766 767 768
	event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
						call->event.type,
						size, irq_flags, pc);
769
	if (!event)
770
		return;
771 772

	entry = ring_buffer_event_data(event);
773 774
	entry->ip = (unsigned long)tk->rp.kp.addr;
	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
775

776
	if (!filter_check_discard(ftrace_file, entry, buffer, event))
777 778
		trace_buffer_unlock_commit_regs(buffer, event,
						irq_flags, pc, regs);
779 780
}

781
static __kprobes void
782
kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
783
{
784
	struct event_file_link *link;
785

786 787
	list_for_each_entry_rcu(link, &tk->tp.files, list)
		__kprobe_trace_func(tk, regs, link->file);
788 789
}

790
/* Kretprobe handler */
791
static __kprobes void
792
__kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
793 794
		       struct pt_regs *regs,
		       struct ftrace_event_file *ftrace_file)
795
{
796
	struct kretprobe_trace_entry_head *entry;
797
	struct ring_buffer_event *event;
798
	struct ring_buffer *buffer;
799
	int size, pc, dsize;
800
	unsigned long irq_flags;
801
	struct ftrace_event_call *call = &tk->tp.call;
802

803 804
	WARN_ON(call != ftrace_file->event_call);

805 806 807
	if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
		return;

808 809 810
	local_save_flags(irq_flags);
	pc = preempt_count();

811 812
	dsize = __get_data_size(&tk->tp, regs);
	size = sizeof(*entry) + tk->tp.size + dsize;
813

814 815 816
	event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
						call->event.type,
						size, irq_flags, pc);
817
	if (!event)
818
		return;
819 820

	entry = ring_buffer_event_data(event);
821
	entry->func = (unsigned long)tk->rp.kp.addr;
822
	entry->ret_ip = (unsigned long)ri->ret_addr;
823
	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
824

825
	if (!filter_check_discard(ftrace_file, entry, buffer, event))
826 827
		trace_buffer_unlock_commit_regs(buffer, event,
						irq_flags, pc, regs);
828 829
}

830
static __kprobes void
831
kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
832 833
		     struct pt_regs *regs)
{
834
	struct event_file_link *link;
835

836 837
	list_for_each_entry_rcu(link, &tk->tp.files, list)
		__kretprobe_trace_func(tk, ri, regs, link->file);
838 839
}

840
/* Event entry printers */
841
static enum print_line_t
842 843
print_kprobe_event(struct trace_iterator *iter, int flags,
		   struct trace_event *event)
844
{
845
	struct kprobe_trace_entry_head *field;
846
	struct trace_seq *s = &iter->seq;
847
	struct trace_probe *tp;
848
	u8 *data;
849 850
	int i;

851
	field = (struct kprobe_trace_entry_head *)iter->ent;
852
	tp = container_of(event, struct trace_probe, call.event);
853

854 855 856
	if (!trace_seq_printf(s, "%s: (", tp->call.name))
		goto partial;

857 858 859
	if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
		goto partial;

860
	if (!trace_seq_puts(s, ")"))
861 862
		goto partial;

863 864 865
	data = (u8 *)&field[1];
	for (i = 0; i < tp->nr_args; i++)
		if (!tp->args[i].type->print(s, tp->args[i].name,
866
					     data + tp->args[i].offset, field))
867 868 869 870 871 872 873 874 875 876
			goto partial;

	if (!trace_seq_puts(s, "\n"))
		goto partial;

	return TRACE_TYPE_HANDLED;
partial:
	return TRACE_TYPE_PARTIAL_LINE;
}

877
static enum print_line_t
878 879
print_kretprobe_event(struct trace_iterator *iter, int flags,
		      struct trace_event *event)
880
{
881
	struct kretprobe_trace_entry_head *field;
882
	struct trace_seq *s = &iter->seq;
883
	struct trace_probe *tp;
884
	u8 *data;
885 886
	int i;

887
	field = (struct kretprobe_trace_entry_head *)iter->ent;
888
	tp = container_of(event, struct trace_probe, call.event);
889

890 891 892
	if (!trace_seq_printf(s, "%s: (", tp->call.name))
		goto partial;

893 894 895 896 897 898 899 900 901
	if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
		goto partial;

	if (!trace_seq_puts(s, " <- "))
		goto partial;

	if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
		goto partial;

902
	if (!trace_seq_puts(s, ")"))
903 904
		goto partial;

905 906 907
	data = (u8 *)&field[1];
	for (i = 0; i < tp->nr_args; i++)
		if (!tp->args[i].type->print(s, tp->args[i].name,
908
					     data + tp->args[i].offset, field))
909 910 911 912 913 914 915 916 917 918 919 920 921 922
			goto partial;

	if (!trace_seq_puts(s, "\n"))
		goto partial;

	return TRACE_TYPE_HANDLED;
partial:
	return TRACE_TYPE_PARTIAL_LINE;
}


static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
{
	int ret, i;
923
	struct kprobe_trace_entry_head field;
924
	struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
925

926
	DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
927
	/* Set argument names as fields */
928 929 930 931 932 933 934 935
	for (i = 0; i < tk->tp.nr_args; i++) {
		struct probe_arg *parg = &tk->tp.args[i];

		ret = trace_define_field(event_call, parg->type->fmttype,
					 parg->name,
					 sizeof(field) + parg->offset,
					 parg->type->size,
					 parg->type->is_signed,
936 937 938 939
					 FILTER_OTHER);
		if (ret)
			return ret;
	}
940 941 942 943 944 945
	return 0;
}

static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
{
	int ret, i;
946
	struct kretprobe_trace_entry_head field;
947
	struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
948

949 950
	DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
	DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
951
	/* Set argument names as fields */
952 953 954 955 956 957 958 959
	for (i = 0; i < tk->tp.nr_args; i++) {
		struct probe_arg *parg = &tk->tp.args[i];

		ret = trace_define_field(event_call, parg->type->fmttype,
					 parg->name,
					 sizeof(field) + parg->offset,
					 parg->type->size,
					 parg->type->is_signed,
960 961 962 963
					 FILTER_OTHER);
		if (ret)
			return ret;
	}
964 965 966
	return 0;
}

967
static int __set_print_fmt(struct trace_kprobe *tk, char *buf, int len)
968 969 970 971 972 973
{
	int i;
	int pos = 0;

	const char *fmt, *arg;

974
	if (!trace_kprobe_is_return(tk)) {
975 976 977 978 979 980 981 982 983 984 985 986
		fmt = "(%lx)";
		arg = "REC->" FIELD_STRING_IP;
	} else {
		fmt = "(%lx <- %lx)";
		arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
	}

	/* When len=0, we just calculate the needed length */
#define LEN_OR_ZERO (len ? len - pos : 0)

	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);

987
	for (i = 0; i < tk->tp.nr_args; i++) {
988
		pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s",
989
				tk->tp.args[i].name, tk->tp.args[i].type->fmt);
990 991 992 993
	}

	pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);

994 995
	for (i = 0; i < tk->tp.nr_args; i++) {
		if (strcmp(tk->tp.args[i].type->name, "string") == 0)
996 997
			pos += snprintf(buf + pos, LEN_OR_ZERO,
					", __get_str(%s)",
998
					tk->tp.args[i].name);
999 1000
		else
			pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s",
1001
					tk->tp.args[i].name);
1002 1003 1004 1005 1006 1007 1008 1009
	}

#undef LEN_OR_ZERO

	/* return the length of print_fmt */
	return pos;
}

1010
static int set_print_fmt(struct trace_kprobe *tk)
1011 1012 1013 1014 1015
{
	int len;
	char *print_fmt;

	/* First: called with 0 length to calculate the needed length */
1016
	len = __set_print_fmt(tk, NULL, 0);
1017 1018 1019 1020 1021
	print_fmt = kmalloc(len + 1, GFP_KERNEL);
	if (!print_fmt)
		return -ENOMEM;

	/* Second: actually write the @print_fmt */
1022 1023
	__set_print_fmt(tk, print_fmt, len + 1);
	tk->tp.call.print_fmt = print_fmt;
1024 1025 1026 1027

	return 0;
}

1028
#ifdef CONFIG_PERF_EVENTS
1029 1030

/* Kprobe profile handler */
1031
static __kprobes void
1032
kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1033
{
1034
	struct ftrace_event_call *call = &tk->tp.call;
1035
	struct kprobe_trace_entry_head *entry;
1036
	struct hlist_head *head;
1037
	int size, __size, dsize;
1038
	int rctx;
1039

1040 1041 1042 1043
	head = this_cpu_ptr(call->perf_events);
	if (hlist_empty(head))
		return;

1044 1045
	dsize = __get_data_size(&tk->tp, regs);
	__size = sizeof(*entry) + tk->tp.size + dsize;
1046 1047
	size = ALIGN(__size + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);
1048

S
Steven Rostedt 已提交
1049
	entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1050
	if (!entry)
1051
		return;
1052

1053
	entry->ip = (unsigned long)tk->rp.kp.addr;
1054
	memset(&entry[1], 0, dsize);
1055
	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1056
	perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1057 1058 1059
}

/* Kretprobe profile handler */
1060
static __kprobes void
1061
kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1062
		    struct pt_regs *regs)
1063
{
1064
	struct ftrace_event_call *call = &tk->tp.call;
1065
	struct kretprobe_trace_entry_head *entry;
1066
	struct hlist_head *head;
1067
	int size, __size, dsize;
1068
	int rctx;
1069

1070 1071 1072 1073
	head = this_cpu_ptr(call->perf_events);
	if (hlist_empty(head))
		return;

1074 1075
	dsize = __get_data_size(&tk->tp, regs);
	__size = sizeof(*entry) + tk->tp.size + dsize;
1076 1077
	size = ALIGN(__size + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);
1078

S
Steven Rostedt 已提交
1079
	entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1080
	if (!entry)
1081
		return;
1082

1083
	entry->func = (unsigned long)tk->rp.kp.addr;
1084
	entry->ret_ip = (unsigned long)ri->ret_addr;
1085
	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1086
	perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1087
}
1088
#endif	/* CONFIG_PERF_EVENTS */
1089

1090 1091 1092 1093 1094 1095
/*
 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
 *
 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
 * lockless, but we can't race with this __init function.
 */
1096
static __kprobes
1097 1098
int kprobe_register(struct ftrace_event_call *event,
		    enum trace_reg type, void *data)
1099
{
1100
	struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
1101
	struct ftrace_event_file *file = data;
1102

1103 1104
	switch (type) {
	case TRACE_REG_REGISTER:
1105
		return enable_trace_kprobe(tk, file);
1106
	case TRACE_REG_UNREGISTER:
1107
		return disable_trace_kprobe(tk, file);
1108 1109 1110

#ifdef CONFIG_PERF_EVENTS
	case TRACE_REG_PERF_REGISTER:
1111
		return enable_trace_kprobe(tk, NULL);
1112
	case TRACE_REG_PERF_UNREGISTER:
1113
		return disable_trace_kprobe(tk, NULL);
1114 1115
	case TRACE_REG_PERF_OPEN:
	case TRACE_REG_PERF_CLOSE:
1116 1117
	case TRACE_REG_PERF_ADD:
	case TRACE_REG_PERF_DEL:
1118
		return 0;
1119 1120 1121 1122
#endif
	}
	return 0;
}
1123 1124 1125 1126

static __kprobes
int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
{
1127
	struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1128

1129
	tk->nhit++;
1130

1131 1132
	if (tk->tp.flags & TP_FLAG_TRACE)
		kprobe_trace_func(tk, regs);
1133
#ifdef CONFIG_PERF_EVENTS
1134 1135
	if (tk->tp.flags & TP_FLAG_PROFILE)
		kprobe_perf_func(tk, regs);
1136
#endif
1137 1138 1139 1140 1141 1142
	return 0;	/* We don't tweek kernel, so just return 0 */
}

static __kprobes
int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
{
1143
	struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1144

1145
	tk->nhit++;
1146

1147 1148
	if (tk->tp.flags & TP_FLAG_TRACE)
		kretprobe_trace_func(tk, ri, regs);
1149
#ifdef CONFIG_PERF_EVENTS
1150 1151
	if (tk->tp.flags & TP_FLAG_PROFILE)
		kretprobe_perf_func(tk, ri, regs);
1152
#endif
1153 1154
	return 0;	/* We don't tweek kernel, so just return 0 */
}
1155

1156 1157 1158 1159 1160 1161 1162 1163
static struct trace_event_functions kretprobe_funcs = {
	.trace		= print_kretprobe_event
};

static struct trace_event_functions kprobe_funcs = {
	.trace		= print_kprobe_event
};

1164
static int register_kprobe_event(struct trace_kprobe *tk)
1165
{
1166
	struct ftrace_event_call *call = &tk->tp.call;
1167 1168 1169
	int ret;

	/* Initialize ftrace_event_call */
1170
	INIT_LIST_HEAD(&call->class->fields);
1171
	if (trace_kprobe_is_return(tk)) {
1172
		call->event.funcs = &kretprobe_funcs;
1173
		call->class->define_fields = kretprobe_event_define_fields;
1174
	} else {
1175
		call->event.funcs = &kprobe_funcs;
1176
		call->class->define_fields = kprobe_event_define_fields;
1177
	}
1178
	if (set_print_fmt(tk) < 0)
1179
		return -ENOMEM;
1180 1181
	ret = register_ftrace_event(&call->event);
	if (!ret) {
1182
		kfree(call->print_fmt);
1183
		return -ENODEV;
1184
	}
1185
	call->flags = 0;
1186
	call->class->reg = kprobe_register;
1187
	call->data = tk;
1188
	ret = trace_add_event_call(call);
1189
	if (ret) {
1190
		pr_info("Failed to register kprobe event: %s\n", call->name);
1191
		kfree(call->print_fmt);
1192
		unregister_ftrace_event(&call->event);
1193
	}
1194 1195 1196
	return ret;
}

1197
static int unregister_kprobe_event(struct trace_kprobe *tk)
1198
{
1199 1200
	int ret;

1201
	/* tp->event is unregistered in trace_remove_event_call() */
1202
	ret = trace_remove_event_call(&tk->tp.call);
1203
	if (!ret)
1204
		kfree(tk->tp.call.print_fmt);
1205
	return ret;
1206 1207
}

L
Lucas De Marchi 已提交
1208
/* Make a debugfs interface for controlling probe points */
1209 1210 1211 1212 1213
static __init int init_kprobe_trace(void)
{
	struct dentry *d_tracer;
	struct dentry *entry;

1214
	if (register_module_notifier(&trace_kprobe_module_nb))
1215 1216
		return -EINVAL;

1217 1218 1219 1220 1221 1222 1223
	d_tracer = tracing_init_dentry();
	if (!d_tracer)
		return 0;

	entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
				    NULL, &kprobe_events_ops);

1224
	/* Event list interface */
1225 1226 1227
	if (!entry)
		pr_warning("Could not create debugfs "
			   "'kprobe_events' entry\n");
1228 1229 1230 1231 1232 1233 1234 1235

	/* Profile interface */
	entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
				    NULL, &kprobe_profile_ops);

	if (!entry)
		pr_warning("Could not create debugfs "
			   "'kprobe_profile' entry\n");
1236 1237 1238 1239 1240 1241 1242
	return 0;
}
fs_initcall(init_kprobe_trace);


#ifdef CONFIG_FTRACE_STARTUP_TEST

1243 1244 1245 1246 1247 1248
/*
 * The "__used" keeps gcc from removing the function symbol
 * from the kallsyms table.
 */
static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
					       int a4, int a5, int a6)
1249 1250 1251 1252
{
	return a1 + a2 + a3 + a4 + a5 + a6;
}

1253
static struct ftrace_event_file *
1254
find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1255 1256 1257 1258
{
	struct ftrace_event_file *file;

	list_for_each_entry(file, &tr->events, list)
1259
		if (file->event_call == &tk->tp.call)
1260 1261 1262 1263 1264
			return file;

	return NULL;
}

1265
/*
1266
 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1267 1268
 * stage, we can do this lockless.
 */
1269 1270
static __init int kprobe_trace_self_tests_init(void)
{
1271
	int ret, warn = 0;
1272
	int (*target)(int, int, int, int, int, int);
1273
	struct trace_kprobe *tk;
1274
	struct ftrace_event_file *file;
1275 1276 1277 1278 1279

	target = kprobe_trace_selftest_target;

	pr_info("Testing kprobe tracing: ");

1280 1281
	ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
				  "$stack $stack0 +0($stack)",
1282
				  create_trace_kprobe);
1283
	if (WARN_ON_ONCE(ret)) {
1284
		pr_warn("error on probing function entry.\n");
1285 1286 1287
		warn++;
	} else {
		/* Enable trace point */
1288 1289
		tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
		if (WARN_ON_ONCE(tk == NULL)) {
1290
			pr_warn("error on getting new probe.\n");
1291
			warn++;
1292
		} else {
1293
			file = find_trace_probe_file(tk, top_trace_array());
1294 1295 1296 1297
			if (WARN_ON_ONCE(file == NULL)) {
				pr_warn("error on getting probe file.\n");
				warn++;
			} else
1298
				enable_trace_kprobe(tk, file);
1299
		}
1300
	}
1301

1302
	ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
1303
				  "$retval", create_trace_kprobe);
1304
	if (WARN_ON_ONCE(ret)) {
1305
		pr_warn("error on probing function return.\n");
1306 1307 1308
		warn++;
	} else {
		/* Enable trace point */
1309 1310
		tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
		if (WARN_ON_ONCE(tk == NULL)) {
1311
			pr_warn("error on getting 2nd new probe.\n");
1312
			warn++;
1313
		} else {
1314
			file = find_trace_probe_file(tk, top_trace_array());
1315 1316 1317 1318
			if (WARN_ON_ONCE(file == NULL)) {
				pr_warn("error on getting probe file.\n");
				warn++;
			} else
1319
				enable_trace_kprobe(tk, file);
1320
		}
1321 1322 1323 1324
	}

	if (warn)
		goto end;
1325 1326 1327

	ret = target(1, 2, 3, 4, 5, 6);

1328
	/* Disable trace points before removing it */
1329 1330
	tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
	if (WARN_ON_ONCE(tk == NULL)) {
1331
		pr_warn("error on getting test probe.\n");
1332
		warn++;
1333
	} else {
1334
		file = find_trace_probe_file(tk, top_trace_array());
1335 1336 1337 1338
		if (WARN_ON_ONCE(file == NULL)) {
			pr_warn("error on getting probe file.\n");
			warn++;
		} else
1339
			disable_trace_kprobe(tk, file);
1340
	}
1341

1342 1343
	tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
	if (WARN_ON_ONCE(tk == NULL)) {
1344
		pr_warn("error on getting 2nd test probe.\n");
1345
		warn++;
1346
	} else {
1347
		file = find_trace_probe_file(tk, top_trace_array());
1348 1349 1350 1351
		if (WARN_ON_ONCE(file == NULL)) {
			pr_warn("error on getting probe file.\n");
			warn++;
		} else
1352
			disable_trace_kprobe(tk, file);
1353
	}
1354

1355
	ret = traceprobe_command("-:testprobe", create_trace_kprobe);
1356
	if (WARN_ON_ONCE(ret)) {
1357
		pr_warn("error on deleting a probe.\n");
1358 1359 1360
		warn++;
	}

1361
	ret = traceprobe_command("-:testprobe2", create_trace_kprobe);
1362
	if (WARN_ON_ONCE(ret)) {
1363
		pr_warn("error on deleting a probe.\n");
1364 1365
		warn++;
	}
1366

1367
end:
1368
	release_all_trace_kprobes();
1369 1370 1371 1372
	if (warn)
		pr_cont("NG: Some tests are failed. Please check them.\n");
	else
		pr_cont("OK\n");
1373 1374 1375 1376 1377 1378
	return 0;
}

late_initcall(kprobe_trace_self_tests_init);

#endif