trace_kprobe.c 34.6 KB
Newer Older
1
/*
2
 * Kprobes-based tracing events
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *
 * Created by Masami Hiramatsu <mhiramat@redhat.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */

#include <linux/module.h>
#include <linux/uaccess.h>

23
#include "trace_probe.h"
24

25
#define KPROBE_EVENT_SYSTEM "kprobes"
26

27
/**
28
 * Kprobe event core functions
29
 */
30
struct trace_kprobe {
31
	struct list_head	list;
32
	struct kretprobe	rp;	/* Use rp.kp for kprobe use */
33
	unsigned long 		nhit;
34
	const char		*symbol;	/* symbol name */
35
	struct trace_probe	tp;
36 37
};

38 39 40 41 42
struct event_file_link {
	struct ftrace_event_file	*file;
	struct list_head		list;
};

43 44
#define SIZEOF_TRACE_KPROBE(n)				\
	(offsetof(struct trace_kprobe, tp.args) +	\
45
	(sizeof(struct probe_arg) * (n)))
46

47

48
static __kprobes bool trace_kprobe_is_return(struct trace_kprobe *tk)
49
{
50
	return tk->rp.handler != NULL;
51 52
}

53
static __kprobes const char *trace_kprobe_symbol(struct trace_kprobe *tk)
54
{
55
	return tk->symbol ? tk->symbol : "unknown";
56 57
}

58
static __kprobes unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
59
{
60
	return tk->rp.kp.offset;
61 62
}

63
static __kprobes bool trace_kprobe_has_gone(struct trace_kprobe *tk)
64
{
65
	return !!(kprobe_gone(&tk->rp.kp));
66 67
}

68 69
static __kprobes bool trace_kprobe_within_module(struct trace_kprobe *tk,
						 struct module *mod)
70 71
{
	int len = strlen(mod->name);
72
	const char *name = trace_kprobe_symbol(tk);
73 74 75
	return strncmp(mod->name, name, len) == 0 && name[len] == ':';
}

76
static __kprobes bool trace_kprobe_is_on_module(struct trace_kprobe *tk)
77
{
78
	return !!strchr(trace_kprobe_symbol(tk), ':');
79 80
}

81 82
static int register_kprobe_event(struct trace_kprobe *tk);
static int unregister_kprobe_event(struct trace_kprobe *tk);
83 84 85 86

static DEFINE_MUTEX(probe_lock);
static LIST_HEAD(probe_list);

87 88 89 90
static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
static int kretprobe_dispatcher(struct kretprobe_instance *ri,
				struct pt_regs *regs);

91 92 93
/*
 * Allocate new trace_probe and initialize it (including kprobes).
 */
94
static struct trace_kprobe *alloc_trace_kprobe(const char *group,
95
					     const char *event,
96 97 98
					     void *addr,
					     const char *symbol,
					     unsigned long offs,
99
					     int nargs, bool is_return)
100
{
101
	struct trace_kprobe *tk;
102
	int ret = -ENOMEM;
103

104 105
	tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
	if (!tk)
106
		return ERR_PTR(ret);
107 108

	if (symbol) {
109 110
		tk->symbol = kstrdup(symbol, GFP_KERNEL);
		if (!tk->symbol)
111
			goto error;
112 113
		tk->rp.kp.symbol_name = tk->symbol;
		tk->rp.kp.offset = offs;
114
	} else
115
		tk->rp.kp.addr = addr;
116 117

	if (is_return)
118
		tk->rp.handler = kretprobe_dispatcher;
119
	else
120
		tk->rp.kp.pre_handler = kprobe_dispatcher;
121

122
	if (!event || !is_good_name(event)) {
123
		ret = -EINVAL;
124
		goto error;
125 126
	}

127 128 129
	tk->tp.call.class = &tk->tp.class;
	tk->tp.call.name = kstrdup(event, GFP_KERNEL);
	if (!tk->tp.call.name)
130
		goto error;
131

132
	if (!group || !is_good_name(group)) {
133
		ret = -EINVAL;
134
		goto error;
135 136
	}

137 138
	tk->tp.class.system = kstrdup(group, GFP_KERNEL);
	if (!tk->tp.class.system)
139 140
		goto error;

141 142 143
	INIT_LIST_HEAD(&tk->list);
	INIT_LIST_HEAD(&tk->tp.files);
	return tk;
144
error:
145 146 147
	kfree(tk->tp.call.name);
	kfree(tk->symbol);
	kfree(tk);
148
	return ERR_PTR(ret);
149 150
}

151
static void free_trace_kprobe(struct trace_kprobe *tk)
152 153 154
{
	int i;

155 156
	for (i = 0; i < tk->tp.nr_args; i++)
		traceprobe_free_probe_arg(&tk->tp.args[i]);
157

158 159 160 161
	kfree(tk->tp.call.class->system);
	kfree(tk->tp.call.name);
	kfree(tk->symbol);
	kfree(tk);
162 163
}

164 165
static struct trace_kprobe *find_trace_kprobe(const char *event,
					      const char *group)
166
{
167
	struct trace_kprobe *tk;
168

169 170 171 172
	list_for_each_entry(tk, &probe_list, list)
		if (strcmp(tk->tp.call.name, event) == 0 &&
		    strcmp(tk->tp.call.class->system, group) == 0)
			return tk;
173 174 175
	return NULL;
}

176 177 178 179 180
/*
 * Enable trace_probe
 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
 */
static int
181
enable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file)
182 183 184
{
	int ret = 0;

185
	if (file) {
186 187 188 189
		struct event_file_link *link;

		link = kmalloc(sizeof(*link), GFP_KERNEL);
		if (!link) {
190
			ret = -ENOMEM;
191
			goto out;
192 193
		}

194
		link->file = file;
195
		list_add_tail_rcu(&link->list, &tk->tp.files);
196

197
		tk->tp.flags |= TP_FLAG_TRACE;
198
	} else
199
		tk->tp.flags |= TP_FLAG_PROFILE;
200

201 202 203
	if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) {
		if (trace_kprobe_is_return(tk))
			ret = enable_kretprobe(&tk->rp);
204
		else
205
			ret = enable_kprobe(&tk->rp.kp);
206
	}
207
 out:
208 209 210
	return ret;
}

211 212
static struct event_file_link *
find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file)
213
{
214
	struct event_file_link *link;
215

216 217 218
	list_for_each_entry(link, &tp->files, list)
		if (link->file == file)
			return link;
219

220
	return NULL;
221 222 223 224 225 226 227
}

/*
 * Disable trace_probe
 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
 */
static int
228
disable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file)
229
{
230 231
	struct event_file_link *link = NULL;
	int wait = 0;
232 233 234
	int ret = 0;

	if (file) {
235
		link = find_event_file_link(&tk->tp, file);
236
		if (!link) {
237
			ret = -EINVAL;
238
			goto out;
239 240
		}

241
		list_del_rcu(&link->list);
242
		wait = 1;
243
		if (!list_empty(&tk->tp.files))
244
			goto out;
245

246
		tk->tp.flags &= ~TP_FLAG_TRACE;
247
	} else
248
		tk->tp.flags &= ~TP_FLAG_PROFILE;
249

250 251 252
	if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) {
		if (trace_kprobe_is_return(tk))
			disable_kretprobe(&tk->rp);
253
		else
254
			disable_kprobe(&tk->rp.kp);
255
		wait = 1;
256
	}
257
 out:
258 259 260 261 262 263 264 265 266 267 268 269 270
	if (wait) {
		/*
		 * Synchronize with kprobe_trace_func/kretprobe_trace_func
		 * to ensure disabled (all running handlers are finished).
		 * This is not only for kfree(), but also the caller,
		 * trace_remove_event_call() supposes it for releasing
		 * event_call related objects, which will be accessed in
		 * the kprobe_trace_func/kretprobe_trace_func.
		 */
		synchronize_sched();
		kfree(link);	/* Ignored if link == NULL */
	}

271
	return ret;
272 273
}

274
/* Internal register function - just handle k*probes and flags */
275
static int __register_trace_kprobe(struct trace_kprobe *tk)
276
{
277
	int i, ret;
278

279
	if (trace_probe_is_registered(&tk->tp))
280 281
		return -EINVAL;

282 283
	for (i = 0; i < tk->tp.nr_args; i++)
		traceprobe_update_arg(&tk->tp.args[i]);
284

285
	/* Set/clear disabled flag according to tp->flag */
286 287
	if (trace_probe_is_enabled(&tk->tp))
		tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
288
	else
289
		tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
290

291 292
	if (trace_kprobe_is_return(tk))
		ret = register_kretprobe(&tk->rp);
293
	else
294
		ret = register_kprobe(&tk->rp.kp);
295 296

	if (ret == 0)
297
		tk->tp.flags |= TP_FLAG_REGISTERED;
298 299
	else {
		pr_warning("Could not insert probe at %s+%lu: %d\n",
300 301
			   trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret);
		if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) {
302 303 304 305 306 307
			pr_warning("This probe might be able to register after"
				   "target module is loaded. Continue.\n");
			ret = 0;
		} else if (ret == -EILSEQ) {
			pr_warning("Probing address(0x%p) is not an "
				   "instruction boundary.\n",
308
				   tk->rp.kp.addr);
309 310 311 312 313 314 315 316
			ret = -EINVAL;
		}
	}

	return ret;
}

/* Internal unregister function - just handle k*probes and flags */
317
static void __unregister_trace_kprobe(struct trace_kprobe *tk)
318
{
319 320 321
	if (trace_probe_is_registered(&tk->tp)) {
		if (trace_kprobe_is_return(tk))
			unregister_kretprobe(&tk->rp);
322
		else
323 324
			unregister_kprobe(&tk->rp.kp);
		tk->tp.flags &= ~TP_FLAG_REGISTERED;
325
		/* Cleanup kprobe for reuse */
326 327
		if (tk->rp.kp.symbol_name)
			tk->rp.kp.addr = NULL;
328 329 330 331
	}
}

/* Unregister a trace_probe and probe_event: call with locking probe_lock */
332
static int unregister_trace_kprobe(struct trace_kprobe *tk)
333
{
334
	/* Enabled event can not be unregistered */
335
	if (trace_probe_is_enabled(&tk->tp))
336 337
		return -EBUSY;

338
	/* Will fail if probe is being used by ftrace or perf */
339
	if (unregister_kprobe_event(tk))
340 341
		return -EBUSY;

342 343
	__unregister_trace_kprobe(tk);
	list_del(&tk->list);
344 345

	return 0;
346 347 348
}

/* Register a trace_probe and probe_event */
349
static int register_trace_kprobe(struct trace_kprobe *tk)
350
{
351
	struct trace_kprobe *old_tk;
352 353 354 355
	int ret;

	mutex_lock(&probe_lock);

356
	/* Delete old (same name) event if exist */
357 358 359
	old_tk = find_trace_kprobe(tk->tp.call.name, tk->tp.call.class->system);
	if (old_tk) {
		ret = unregister_trace_kprobe(old_tk);
360 361
		if (ret < 0)
			goto end;
362
		free_trace_kprobe(old_tk);
363
	}
364 365

	/* Register new event */
366
	ret = register_kprobe_event(tk);
367
	if (ret) {
P
Paul Bolle 已提交
368
		pr_warning("Failed to register probe event(%d)\n", ret);
369 370 371
		goto end;
	}

372
	/* Register k*probe */
373
	ret = __register_trace_kprobe(tk);
374
	if (ret < 0)
375
		unregister_kprobe_event(tk);
376
	else
377
		list_add_tail(&tk->list, &probe_list);
378

379 380 381 382 383
end:
	mutex_unlock(&probe_lock);
	return ret;
}

384
/* Module notifier call back, checking event on the module */
385
static int trace_kprobe_module_callback(struct notifier_block *nb,
386 387 388
				       unsigned long val, void *data)
{
	struct module *mod = data;
389
	struct trace_kprobe *tk;
390 391 392 393 394 395 396
	int ret;

	if (val != MODULE_STATE_COMING)
		return NOTIFY_DONE;

	/* Update probes on coming module */
	mutex_lock(&probe_lock);
397 398
	list_for_each_entry(tk, &probe_list, list) {
		if (trace_kprobe_within_module(tk, mod)) {
399
			/* Don't need to check busy - this should have gone. */
400 401
			__unregister_trace_kprobe(tk);
			ret = __register_trace_kprobe(tk);
402 403 404
			if (ret)
				pr_warning("Failed to re-register probe %s on"
					   "%s: %d\n",
405
					   tk->tp.call.name, mod->name, ret);
406 407 408 409 410 411 412
		}
	}
	mutex_unlock(&probe_lock);

	return NOTIFY_DONE;
}

413 414
static struct notifier_block trace_kprobe_module_nb = {
	.notifier_call = trace_kprobe_module_callback,
415 416 417
	.priority = 1	/* Invoked after kprobe module callback */
};

418
static int create_trace_kprobe(int argc, char **argv)
419 420 421
{
	/*
	 * Argument syntax:
422 423
	 *  - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
	 *  - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
424
	 * Fetch args:
425 426 427
	 *  $retval	: fetch return value
	 *  $stack	: fetch stack address
	 *  $stackN	: fetch Nth of stack (N:0-)
428 429 430
	 *  @ADDR	: fetch memory at ADDR (ADDR should be in kernel)
	 *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
	 *  %REG	: fetch register REG
431
	 * Dereferencing memory fetch:
432
	 *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
433 434
	 * Alias name of args:
	 *  NAME=FETCHARG : set NAME as alias of FETCHARG.
435 436
	 * Type of args:
	 *  FETCHARG:TYPE : use TYPE instead of unsigned long.
437
	 */
438
	struct trace_kprobe *tk;
439
	int i, ret = 0;
440
	bool is_return = false, is_delete = false;
441
	char *symbol = NULL, *event = NULL, *group = NULL;
442
	char *arg;
443
	unsigned long offset = 0;
444
	void *addr = NULL;
445
	char buf[MAX_EVENT_NAME_LEN];
446

447
	/* argc must be >= 1 */
448
	if (argv[0][0] == 'p')
449
		is_return = false;
450
	else if (argv[0][0] == 'r')
451
		is_return = true;
452
	else if (argv[0][0] == '-')
453
		is_delete = true;
454
	else {
455 456
		pr_info("Probe definition must be started with 'p', 'r' or"
			" '-'.\n");
457
		return -EINVAL;
458
	}
459 460 461

	if (argv[0][1] == ':') {
		event = &argv[0][2];
462 463 464 465 466
		if (strchr(event, '/')) {
			group = event;
			event = strchr(group, '/') + 1;
			event[-1] = '\0';
			if (strlen(group) == 0) {
467
				pr_info("Group name is not specified\n");
468 469 470
				return -EINVAL;
			}
		}
471
		if (strlen(event) == 0) {
472
			pr_info("Event name is not specified\n");
473 474 475
			return -EINVAL;
		}
	}
476 477
	if (!group)
		group = KPROBE_EVENT_SYSTEM;
478

479 480 481 482 483
	if (is_delete) {
		if (!event) {
			pr_info("Delete command needs an event name.\n");
			return -EINVAL;
		}
484
		mutex_lock(&probe_lock);
485 486
		tk = find_trace_kprobe(event, group);
		if (!tk) {
487
			mutex_unlock(&probe_lock);
488 489 490 491
			pr_info("Event %s/%s doesn't exist.\n", group, event);
			return -ENOENT;
		}
		/* delete an event */
492
		ret = unregister_trace_kprobe(tk);
493
		if (ret == 0)
494
			free_trace_kprobe(tk);
495
		mutex_unlock(&probe_lock);
496
		return ret;
497 498 499 500 501 502
	}

	if (argc < 2) {
		pr_info("Probe point is not specified.\n");
		return -EINVAL;
	}
503
	if (isdigit(argv[1][0])) {
504 505
		if (is_return) {
			pr_info("Return probe point must be a symbol.\n");
506
			return -EINVAL;
507
		}
508
		/* an address specified */
509
		ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
510 511
		if (ret) {
			pr_info("Failed to parse address.\n");
512
			return ret;
513
		}
514 515 516 517
	} else {
		/* a symbol specified */
		symbol = argv[1];
		/* TODO: support .init module functions */
518
		ret = traceprobe_split_symbol_offset(symbol, &offset);
519 520
		if (ret) {
			pr_info("Failed to parse symbol.\n");
521
			return ret;
522 523 524
		}
		if (offset && is_return) {
			pr_info("Return probe must be used without offset.\n");
525
			return -EINVAL;
526
		}
527
	}
528
	argc -= 2; argv += 2;
529 530

	/* setup a probe */
531 532 533
	if (!event) {
		/* Make a new event name */
		if (symbol)
534
			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
535 536
				 is_return ? 'r' : 'p', symbol, offset);
		else
537
			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
538
				 is_return ? 'r' : 'p', addr);
539 540
		event = buf;
	}
541
	tk = alloc_trace_kprobe(group, event, addr, symbol, offset, argc,
542
			       is_return);
543
	if (IS_ERR(tk)) {
544
		pr_info("Failed to allocate trace_probe.(%d)\n",
545 546
			(int)PTR_ERR(tk));
		return PTR_ERR(tk);
547
	}
548 549

	/* parse arguments */
550 551
	ret = 0;
	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
552 553
		struct probe_arg *parg = &tk->tp.args[i];

554
		/* Increment count for freeing args in error case */
555
		tk->tp.nr_args++;
556

557 558
		/* Parse argument name */
		arg = strchr(argv[i], '=');
559
		if (arg) {
560
			*arg++ = '\0';
561
			parg->name = kstrdup(argv[i], GFP_KERNEL);
562
		} else {
563
			arg = argv[i];
564 565
			/* If argument name is omitted, set "argN" */
			snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
566
			parg->name = kstrdup(buf, GFP_KERNEL);
567
		}
568

569
		if (!parg->name) {
570
			pr_info("Failed to allocate argument[%d] name.\n", i);
571
			ret = -ENOMEM;
572 573
			goto error;
		}
574

575
		if (!is_good_name(parg->name)) {
576
			pr_info("Invalid argument[%d] name: %s\n",
577
				i, parg->name);
578 579 580
			ret = -EINVAL;
			goto error;
		}
581

582 583
		if (traceprobe_conflict_field_name(parg->name,
							tk->tp.args, i)) {
584
			pr_info("Argument[%d] name '%s' conflicts with "
585 586 587 588
				"another field.\n", i, argv[i]);
			ret = -EINVAL;
			goto error;
		}
589 590

		/* Parse fetch argument */
591
		ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg,
592
						is_return, true);
593
		if (ret) {
594
			pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
595
			goto error;
596
		}
597 598
	}

599
	ret = register_trace_kprobe(tk);
600 601 602 603 604
	if (ret)
		goto error;
	return 0;

error:
605
	free_trace_kprobe(tk);
606 607 608
	return ret;
}

609
static int release_all_trace_kprobes(void)
610
{
611
	struct trace_kprobe *tk;
612
	int ret = 0;
613 614

	mutex_lock(&probe_lock);
615
	/* Ensure no probe is in use. */
616 617
	list_for_each_entry(tk, &probe_list, list)
		if (trace_probe_is_enabled(&tk->tp)) {
618 619 620
			ret = -EBUSY;
			goto end;
		}
621 622
	/* TODO: Use batch unregistration */
	while (!list_empty(&probe_list)) {
623 624
		tk = list_entry(probe_list.next, struct trace_kprobe, list);
		ret = unregister_trace_kprobe(tk);
625 626
		if (ret)
			goto end;
627
		free_trace_kprobe(tk);
628
	}
629 630

end:
631
	mutex_unlock(&probe_lock);
632 633

	return ret;
634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654
}

/* Probes listing interfaces */
static void *probes_seq_start(struct seq_file *m, loff_t *pos)
{
	mutex_lock(&probe_lock);
	return seq_list_start(&probe_list, *pos);
}

static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
{
	return seq_list_next(v, &probe_list, pos);
}

static void probes_seq_stop(struct seq_file *m, void *v)
{
	mutex_unlock(&probe_lock);
}

static int probes_seq_show(struct seq_file *m, void *v)
{
655
	struct trace_kprobe *tk = v;
656
	int i;
657

658 659
	seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p');
	seq_printf(m, ":%s/%s", tk->tp.call.class->system, tk->tp.call.name);
660

661 662 663 664 665
	if (!tk->symbol)
		seq_printf(m, " 0x%p", tk->rp.kp.addr);
	else if (tk->rp.kp.offset)
		seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
			   tk->rp.kp.offset);
666
	else
667
		seq_printf(m, " %s", trace_kprobe_symbol(tk));
668

669 670
	for (i = 0; i < tk->tp.nr_args; i++)
		seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
671
	seq_printf(m, "\n");
672

673 674 675 676 677 678 679 680 681 682 683 684
	return 0;
}

static const struct seq_operations probes_seq_op = {
	.start  = probes_seq_start,
	.next   = probes_seq_next,
	.stop   = probes_seq_stop,
	.show   = probes_seq_show
};

static int probes_open(struct inode *inode, struct file *file)
{
685 686 687
	int ret;

	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
688
		ret = release_all_trace_kprobes();
689 690 691
		if (ret < 0)
			return ret;
	}
692 693 694 695 696 697 698

	return seq_open(file, &probes_seq_op);
}

static ssize_t probes_write(struct file *file, const char __user *buffer,
			    size_t count, loff_t *ppos)
{
699
	return traceprobe_probes_write(file, buffer, count, ppos,
700
			create_trace_kprobe);
701 702 703 704 705 706 707 708 709 710 711
}

static const struct file_operations kprobe_events_ops = {
	.owner          = THIS_MODULE,
	.open           = probes_open,
	.read           = seq_read,
	.llseek         = seq_lseek,
	.release        = seq_release,
	.write		= probes_write,
};

712 713 714
/* Probes profiling interfaces */
static int probes_profile_seq_show(struct seq_file *m, void *v)
{
715
	struct trace_kprobe *tk = v;
716

717 718
	seq_printf(m, "  %-44s %15lu %15lu\n", tk->tp.call.name, tk->nhit,
		   tk->rp.kp.nmissed);
719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742

	return 0;
}

static const struct seq_operations profile_seq_op = {
	.start  = probes_seq_start,
	.next   = probes_seq_next,
	.stop   = probes_seq_stop,
	.show   = probes_profile_seq_show
};

static int profile_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &profile_seq_op);
}

static const struct file_operations kprobe_profile_ops = {
	.owner          = THIS_MODULE,
	.open           = profile_open,
	.read           = seq_read,
	.llseek         = seq_lseek,
	.release        = seq_release,
};

743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790
/* Sum up total data length for dynamic arraies (strings) */
static __kprobes int __get_data_size(struct trace_probe *tp,
				     struct pt_regs *regs)
{
	int i, ret = 0;
	u32 len;

	for (i = 0; i < tp->nr_args; i++)
		if (unlikely(tp->args[i].fetch_size.fn)) {
			call_fetch(&tp->args[i].fetch_size, regs, &len);
			ret += len;
		}

	return ret;
}

/* Store the value of each argument */
static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp,
				       struct pt_regs *regs,
				       u8 *data, int maxlen)
{
	int i;
	u32 end = tp->size;
	u32 *dl;	/* Data (relative) location */

	for (i = 0; i < tp->nr_args; i++) {
		if (unlikely(tp->args[i].fetch_size.fn)) {
			/*
			 * First, we set the relative location and
			 * maximum data length to *dl
			 */
			dl = (u32 *)(data + tp->args[i].offset);
			*dl = make_data_rloc(maxlen, end - tp->args[i].offset);
			/* Then try to fetch string or dynamic array data */
			call_fetch(&tp->args[i].fetch, regs, dl);
			/* Reduce maximum length */
			end += get_rloc_len(*dl);
			maxlen -= get_rloc_len(*dl);
			/* Trick here, convert data_rloc to data_loc */
			*dl = convert_rloc_to_loc(*dl,
				 ent_size + tp->args[i].offset);
		} else
			/* Just fetching data normally */
			call_fetch(&tp->args[i].fetch, regs,
				   data + tp->args[i].offset);
	}
}

791
/* Kprobe handler */
792
static __kprobes void
793
__kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
794
		    struct ftrace_event_file *ftrace_file)
795
{
796
	struct kprobe_trace_entry_head *entry;
797
	struct ring_buffer_event *event;
798
	struct ring_buffer *buffer;
799
	int size, dsize, pc;
800
	unsigned long irq_flags;
801
	struct ftrace_event_call *call = &tk->tp.call;
802

803 804
	WARN_ON(call != ftrace_file->event_call);

805 806 807
	if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
		return;

808 809 810
	local_save_flags(irq_flags);
	pc = preempt_count();

811 812
	dsize = __get_data_size(&tk->tp, regs);
	size = sizeof(*entry) + tk->tp.size + dsize;
813

814 815 816
	event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
						call->event.type,
						size, irq_flags, pc);
817
	if (!event)
818
		return;
819 820

	entry = ring_buffer_event_data(event);
821 822
	entry->ip = (unsigned long)tk->rp.kp.addr;
	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
823

824
	if (!filter_check_discard(ftrace_file, entry, buffer, event))
825 826
		trace_buffer_unlock_commit_regs(buffer, event,
						irq_flags, pc, regs);
827 828
}

829
static __kprobes void
830
kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
831
{
832
	struct event_file_link *link;
833

834 835
	list_for_each_entry_rcu(link, &tk->tp.files, list)
		__kprobe_trace_func(tk, regs, link->file);
836 837
}

838
/* Kretprobe handler */
839
static __kprobes void
840
__kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
841 842
		       struct pt_regs *regs,
		       struct ftrace_event_file *ftrace_file)
843
{
844
	struct kretprobe_trace_entry_head *entry;
845
	struct ring_buffer_event *event;
846
	struct ring_buffer *buffer;
847
	int size, pc, dsize;
848
	unsigned long irq_flags;
849
	struct ftrace_event_call *call = &tk->tp.call;
850

851 852
	WARN_ON(call != ftrace_file->event_call);

853 854 855
	if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
		return;

856 857 858
	local_save_flags(irq_flags);
	pc = preempt_count();

859 860
	dsize = __get_data_size(&tk->tp, regs);
	size = sizeof(*entry) + tk->tp.size + dsize;
861

862 863 864
	event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
						call->event.type,
						size, irq_flags, pc);
865
	if (!event)
866
		return;
867 868

	entry = ring_buffer_event_data(event);
869
	entry->func = (unsigned long)tk->rp.kp.addr;
870
	entry->ret_ip = (unsigned long)ri->ret_addr;
871
	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
872

873
	if (!filter_check_discard(ftrace_file, entry, buffer, event))
874 875
		trace_buffer_unlock_commit_regs(buffer, event,
						irq_flags, pc, regs);
876 877
}

878
static __kprobes void
879
kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
880 881
		     struct pt_regs *regs)
{
882
	struct event_file_link *link;
883

884 885
	list_for_each_entry_rcu(link, &tk->tp.files, list)
		__kretprobe_trace_func(tk, ri, regs, link->file);
886 887
}

888
/* Event entry printers */
889
static enum print_line_t
890 891
print_kprobe_event(struct trace_iterator *iter, int flags,
		   struct trace_event *event)
892
{
893
	struct kprobe_trace_entry_head *field;
894
	struct trace_seq *s = &iter->seq;
895
	struct trace_probe *tp;
896
	u8 *data;
897 898
	int i;

899
	field = (struct kprobe_trace_entry_head *)iter->ent;
900
	tp = container_of(event, struct trace_probe, call.event);
901

902 903 904
	if (!trace_seq_printf(s, "%s: (", tp->call.name))
		goto partial;

905 906 907
	if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
		goto partial;

908
	if (!trace_seq_puts(s, ")"))
909 910
		goto partial;

911 912 913
	data = (u8 *)&field[1];
	for (i = 0; i < tp->nr_args; i++)
		if (!tp->args[i].type->print(s, tp->args[i].name,
914
					     data + tp->args[i].offset, field))
915 916 917 918 919 920 921 922 923 924
			goto partial;

	if (!trace_seq_puts(s, "\n"))
		goto partial;

	return TRACE_TYPE_HANDLED;
partial:
	return TRACE_TYPE_PARTIAL_LINE;
}

925
static enum print_line_t
926 927
print_kretprobe_event(struct trace_iterator *iter, int flags,
		      struct trace_event *event)
928
{
929
	struct kretprobe_trace_entry_head *field;
930
	struct trace_seq *s = &iter->seq;
931
	struct trace_probe *tp;
932
	u8 *data;
933 934
	int i;

935
	field = (struct kretprobe_trace_entry_head *)iter->ent;
936
	tp = container_of(event, struct trace_probe, call.event);
937

938 939 940
	if (!trace_seq_printf(s, "%s: (", tp->call.name))
		goto partial;

941 942 943 944 945 946 947 948 949
	if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
		goto partial;

	if (!trace_seq_puts(s, " <- "))
		goto partial;

	if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
		goto partial;

950
	if (!trace_seq_puts(s, ")"))
951 952
		goto partial;

953 954 955
	data = (u8 *)&field[1];
	for (i = 0; i < tp->nr_args; i++)
		if (!tp->args[i].type->print(s, tp->args[i].name,
956
					     data + tp->args[i].offset, field))
957 958 959 960 961 962 963 964 965 966 967 968 969 970
			goto partial;

	if (!trace_seq_puts(s, "\n"))
		goto partial;

	return TRACE_TYPE_HANDLED;
partial:
	return TRACE_TYPE_PARTIAL_LINE;
}


static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
{
	int ret, i;
971
	struct kprobe_trace_entry_head field;
972
	struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
973

974
	DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
975
	/* Set argument names as fields */
976 977 978 979 980 981 982 983
	for (i = 0; i < tk->tp.nr_args; i++) {
		struct probe_arg *parg = &tk->tp.args[i];

		ret = trace_define_field(event_call, parg->type->fmttype,
					 parg->name,
					 sizeof(field) + parg->offset,
					 parg->type->size,
					 parg->type->is_signed,
984 985 986 987
					 FILTER_OTHER);
		if (ret)
			return ret;
	}
988 989 990 991 992 993
	return 0;
}

static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
{
	int ret, i;
994
	struct kretprobe_trace_entry_head field;
995
	struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
996

997 998
	DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
	DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
999
	/* Set argument names as fields */
1000 1001 1002 1003 1004 1005 1006 1007
	for (i = 0; i < tk->tp.nr_args; i++) {
		struct probe_arg *parg = &tk->tp.args[i];

		ret = trace_define_field(event_call, parg->type->fmttype,
					 parg->name,
					 sizeof(field) + parg->offset,
					 parg->type->size,
					 parg->type->is_signed,
1008 1009 1010 1011
					 FILTER_OTHER);
		if (ret)
			return ret;
	}
1012 1013 1014
	return 0;
}

1015
static int __set_print_fmt(struct trace_kprobe *tk, char *buf, int len)
1016 1017 1018 1019 1020 1021
{
	int i;
	int pos = 0;

	const char *fmt, *arg;

1022
	if (!trace_kprobe_is_return(tk)) {
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
		fmt = "(%lx)";
		arg = "REC->" FIELD_STRING_IP;
	} else {
		fmt = "(%lx <- %lx)";
		arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
	}

	/* When len=0, we just calculate the needed length */
#define LEN_OR_ZERO (len ? len - pos : 0)

	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);

1035
	for (i = 0; i < tk->tp.nr_args; i++) {
1036
		pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s",
1037
				tk->tp.args[i].name, tk->tp.args[i].type->fmt);
1038 1039 1040 1041
	}

	pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);

1042 1043
	for (i = 0; i < tk->tp.nr_args; i++) {
		if (strcmp(tk->tp.args[i].type->name, "string") == 0)
1044 1045
			pos += snprintf(buf + pos, LEN_OR_ZERO,
					", __get_str(%s)",
1046
					tk->tp.args[i].name);
1047 1048
		else
			pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s",
1049
					tk->tp.args[i].name);
1050 1051 1052 1053 1054 1055 1056 1057
	}

#undef LEN_OR_ZERO

	/* return the length of print_fmt */
	return pos;
}

1058
static int set_print_fmt(struct trace_kprobe *tk)
1059 1060 1061 1062 1063
{
	int len;
	char *print_fmt;

	/* First: called with 0 length to calculate the needed length */
1064
	len = __set_print_fmt(tk, NULL, 0);
1065 1066 1067 1068 1069
	print_fmt = kmalloc(len + 1, GFP_KERNEL);
	if (!print_fmt)
		return -ENOMEM;

	/* Second: actually write the @print_fmt */
1070 1071
	__set_print_fmt(tk, print_fmt, len + 1);
	tk->tp.call.print_fmt = print_fmt;
1072 1073 1074 1075

	return 0;
}

1076
#ifdef CONFIG_PERF_EVENTS
1077 1078

/* Kprobe profile handler */
1079
static __kprobes void
1080
kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1081
{
1082
	struct ftrace_event_call *call = &tk->tp.call;
1083
	struct kprobe_trace_entry_head *entry;
1084
	struct hlist_head *head;
1085
	int size, __size, dsize;
1086
	int rctx;
1087

1088 1089 1090 1091
	head = this_cpu_ptr(call->perf_events);
	if (hlist_empty(head))
		return;

1092 1093
	dsize = __get_data_size(&tk->tp, regs);
	__size = sizeof(*entry) + tk->tp.size + dsize;
1094 1095
	size = ALIGN(__size + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);
1096

S
Steven Rostedt 已提交
1097
	entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1098
	if (!entry)
1099
		return;
1100

1101
	entry->ip = (unsigned long)tk->rp.kp.addr;
1102
	memset(&entry[1], 0, dsize);
1103
	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1104
	perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1105 1106 1107
}

/* Kretprobe profile handler */
1108
static __kprobes void
1109
kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1110
		    struct pt_regs *regs)
1111
{
1112
	struct ftrace_event_call *call = &tk->tp.call;
1113
	struct kretprobe_trace_entry_head *entry;
1114
	struct hlist_head *head;
1115
	int size, __size, dsize;
1116
	int rctx;
1117

1118 1119 1120 1121
	head = this_cpu_ptr(call->perf_events);
	if (hlist_empty(head))
		return;

1122 1123
	dsize = __get_data_size(&tk->tp, regs);
	__size = sizeof(*entry) + tk->tp.size + dsize;
1124 1125
	size = ALIGN(__size + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);
1126

S
Steven Rostedt 已提交
1127
	entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1128
	if (!entry)
1129
		return;
1130

1131
	entry->func = (unsigned long)tk->rp.kp.addr;
1132
	entry->ret_ip = (unsigned long)ri->ret_addr;
1133
	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1134
	perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1135
}
1136
#endif	/* CONFIG_PERF_EVENTS */
1137

1138 1139 1140 1141 1142 1143
/*
 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
 *
 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
 * lockless, but we can't race with this __init function.
 */
1144
static __kprobes
1145 1146
int kprobe_register(struct ftrace_event_call *event,
		    enum trace_reg type, void *data)
1147
{
1148
	struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
1149
	struct ftrace_event_file *file = data;
1150

1151 1152
	switch (type) {
	case TRACE_REG_REGISTER:
1153
		return enable_trace_kprobe(tk, file);
1154
	case TRACE_REG_UNREGISTER:
1155
		return disable_trace_kprobe(tk, file);
1156 1157 1158

#ifdef CONFIG_PERF_EVENTS
	case TRACE_REG_PERF_REGISTER:
1159
		return enable_trace_kprobe(tk, NULL);
1160
	case TRACE_REG_PERF_UNREGISTER:
1161
		return disable_trace_kprobe(tk, NULL);
1162 1163
	case TRACE_REG_PERF_OPEN:
	case TRACE_REG_PERF_CLOSE:
1164 1165
	case TRACE_REG_PERF_ADD:
	case TRACE_REG_PERF_DEL:
1166
		return 0;
1167 1168 1169 1170
#endif
	}
	return 0;
}
1171 1172 1173 1174

static __kprobes
int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
{
1175
	struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1176

1177
	tk->nhit++;
1178

1179 1180
	if (tk->tp.flags & TP_FLAG_TRACE)
		kprobe_trace_func(tk, regs);
1181
#ifdef CONFIG_PERF_EVENTS
1182 1183
	if (tk->tp.flags & TP_FLAG_PROFILE)
		kprobe_perf_func(tk, regs);
1184
#endif
1185 1186 1187 1188 1189 1190
	return 0;	/* We don't tweek kernel, so just return 0 */
}

static __kprobes
int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
{
1191
	struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1192

1193
	tk->nhit++;
1194

1195 1196
	if (tk->tp.flags & TP_FLAG_TRACE)
		kretprobe_trace_func(tk, ri, regs);
1197
#ifdef CONFIG_PERF_EVENTS
1198 1199
	if (tk->tp.flags & TP_FLAG_PROFILE)
		kretprobe_perf_func(tk, ri, regs);
1200
#endif
1201 1202
	return 0;	/* We don't tweek kernel, so just return 0 */
}
1203

1204 1205 1206 1207 1208 1209 1210 1211
static struct trace_event_functions kretprobe_funcs = {
	.trace		= print_kretprobe_event
};

static struct trace_event_functions kprobe_funcs = {
	.trace		= print_kprobe_event
};

1212
static int register_kprobe_event(struct trace_kprobe *tk)
1213
{
1214
	struct ftrace_event_call *call = &tk->tp.call;
1215 1216 1217
	int ret;

	/* Initialize ftrace_event_call */
1218
	INIT_LIST_HEAD(&call->class->fields);
1219
	if (trace_kprobe_is_return(tk)) {
1220
		call->event.funcs = &kretprobe_funcs;
1221
		call->class->define_fields = kretprobe_event_define_fields;
1222
	} else {
1223
		call->event.funcs = &kprobe_funcs;
1224
		call->class->define_fields = kprobe_event_define_fields;
1225
	}
1226
	if (set_print_fmt(tk) < 0)
1227
		return -ENOMEM;
1228 1229
	ret = register_ftrace_event(&call->event);
	if (!ret) {
1230
		kfree(call->print_fmt);
1231
		return -ENODEV;
1232
	}
1233
	call->flags = 0;
1234
	call->class->reg = kprobe_register;
1235
	call->data = tk;
1236
	ret = trace_add_event_call(call);
1237
	if (ret) {
1238
		pr_info("Failed to register kprobe event: %s\n", call->name);
1239
		kfree(call->print_fmt);
1240
		unregister_ftrace_event(&call->event);
1241
	}
1242 1243 1244
	return ret;
}

1245
static int unregister_kprobe_event(struct trace_kprobe *tk)
1246
{
1247 1248
	int ret;

1249
	/* tp->event is unregistered in trace_remove_event_call() */
1250
	ret = trace_remove_event_call(&tk->tp.call);
1251
	if (!ret)
1252
		kfree(tk->tp.call.print_fmt);
1253
	return ret;
1254 1255
}

L
Lucas De Marchi 已提交
1256
/* Make a debugfs interface for controlling probe points */
1257 1258 1259 1260 1261
static __init int init_kprobe_trace(void)
{
	struct dentry *d_tracer;
	struct dentry *entry;

1262
	if (register_module_notifier(&trace_kprobe_module_nb))
1263 1264
		return -EINVAL;

1265 1266 1267 1268 1269 1270 1271
	d_tracer = tracing_init_dentry();
	if (!d_tracer)
		return 0;

	entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
				    NULL, &kprobe_events_ops);

1272
	/* Event list interface */
1273 1274 1275
	if (!entry)
		pr_warning("Could not create debugfs "
			   "'kprobe_events' entry\n");
1276 1277 1278 1279 1280 1281 1282 1283

	/* Profile interface */
	entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
				    NULL, &kprobe_profile_ops);

	if (!entry)
		pr_warning("Could not create debugfs "
			   "'kprobe_profile' entry\n");
1284 1285 1286 1287 1288 1289 1290
	return 0;
}
fs_initcall(init_kprobe_trace);


#ifdef CONFIG_FTRACE_STARTUP_TEST

1291 1292 1293 1294 1295 1296
/*
 * The "__used" keeps gcc from removing the function symbol
 * from the kallsyms table.
 */
static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
					       int a4, int a5, int a6)
1297 1298 1299 1300
{
	return a1 + a2 + a3 + a4 + a5 + a6;
}

1301
static struct ftrace_event_file *
1302
find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1303 1304 1305 1306
{
	struct ftrace_event_file *file;

	list_for_each_entry(file, &tr->events, list)
1307
		if (file->event_call == &tk->tp.call)
1308 1309 1310 1311 1312
			return file;

	return NULL;
}

1313
/*
1314
 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1315 1316
 * stage, we can do this lockless.
 */
1317 1318
static __init int kprobe_trace_self_tests_init(void)
{
1319
	int ret, warn = 0;
1320
	int (*target)(int, int, int, int, int, int);
1321
	struct trace_kprobe *tk;
1322
	struct ftrace_event_file *file;
1323 1324 1325 1326 1327

	target = kprobe_trace_selftest_target;

	pr_info("Testing kprobe tracing: ");

1328 1329
	ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
				  "$stack $stack0 +0($stack)",
1330
				  create_trace_kprobe);
1331
	if (WARN_ON_ONCE(ret)) {
1332
		pr_warn("error on probing function entry.\n");
1333 1334 1335
		warn++;
	} else {
		/* Enable trace point */
1336 1337
		tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
		if (WARN_ON_ONCE(tk == NULL)) {
1338
			pr_warn("error on getting new probe.\n");
1339
			warn++;
1340
		} else {
1341
			file = find_trace_probe_file(tk, top_trace_array());
1342 1343 1344 1345
			if (WARN_ON_ONCE(file == NULL)) {
				pr_warn("error on getting probe file.\n");
				warn++;
			} else
1346
				enable_trace_kprobe(tk, file);
1347
		}
1348
	}
1349

1350
	ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
1351
				  "$retval", create_trace_kprobe);
1352
	if (WARN_ON_ONCE(ret)) {
1353
		pr_warn("error on probing function return.\n");
1354 1355 1356
		warn++;
	} else {
		/* Enable trace point */
1357 1358
		tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
		if (WARN_ON_ONCE(tk == NULL)) {
1359
			pr_warn("error on getting 2nd new probe.\n");
1360
			warn++;
1361
		} else {
1362
			file = find_trace_probe_file(tk, top_trace_array());
1363 1364 1365 1366
			if (WARN_ON_ONCE(file == NULL)) {
				pr_warn("error on getting probe file.\n");
				warn++;
			} else
1367
				enable_trace_kprobe(tk, file);
1368
		}
1369 1370 1371 1372
	}

	if (warn)
		goto end;
1373 1374 1375

	ret = target(1, 2, 3, 4, 5, 6);

1376
	/* Disable trace points before removing it */
1377 1378
	tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
	if (WARN_ON_ONCE(tk == NULL)) {
1379
		pr_warn("error on getting test probe.\n");
1380
		warn++;
1381
	} else {
1382
		file = find_trace_probe_file(tk, top_trace_array());
1383 1384 1385 1386
		if (WARN_ON_ONCE(file == NULL)) {
			pr_warn("error on getting probe file.\n");
			warn++;
		} else
1387
			disable_trace_kprobe(tk, file);
1388
	}
1389

1390 1391
	tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
	if (WARN_ON_ONCE(tk == NULL)) {
1392
		pr_warn("error on getting 2nd test probe.\n");
1393
		warn++;
1394
	} else {
1395
		file = find_trace_probe_file(tk, top_trace_array());
1396 1397 1398 1399
		if (WARN_ON_ONCE(file == NULL)) {
			pr_warn("error on getting probe file.\n");
			warn++;
		} else
1400
			disable_trace_kprobe(tk, file);
1401
	}
1402

1403
	ret = traceprobe_command("-:testprobe", create_trace_kprobe);
1404
	if (WARN_ON_ONCE(ret)) {
1405
		pr_warn("error on deleting a probe.\n");
1406 1407 1408
		warn++;
	}

1409
	ret = traceprobe_command("-:testprobe2", create_trace_kprobe);
1410
	if (WARN_ON_ONCE(ret)) {
1411
		pr_warn("error on deleting a probe.\n");
1412 1413
		warn++;
	}
1414

1415
end:
1416
	release_all_trace_kprobes();
1417 1418 1419 1420
	if (warn)
		pr_cont("NG: Some tests are failed. Please check them.\n");
	else
		pr_cont("OK\n");
1421 1422 1423 1424 1425 1426
	return 0;
}

late_initcall(kprobe_trace_self_tests_init);

#endif