trace_kprobe.c 35.8 KB
Newer Older
1
/*
2
 * Kprobes-based tracing events
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *
 * Created by Masami Hiramatsu <mhiramat@redhat.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */

#include <linux/module.h>
#include <linux/uaccess.h>

23
#include "trace_probe.h"
24

25
#define KPROBE_EVENT_SYSTEM "kprobes"
26

27
/**
28
 * Kprobe event core functions
29
 */
30
struct trace_kprobe {
31
	struct list_head	list;
32
	struct kretprobe	rp;	/* Use rp.kp for kprobe use */
33
	unsigned long 		nhit;
34
	const char		*symbol;	/* symbol name */
35
	struct trace_probe	tp;
36 37
};

38 39
#define SIZEOF_TRACE_KPROBE(n)				\
	(offsetof(struct trace_kprobe, tp.args) +	\
40
	(sizeof(struct probe_arg) * (n)))
41

42

43
static __kprobes bool trace_kprobe_is_return(struct trace_kprobe *tk)
44
{
45
	return tk->rp.handler != NULL;
46 47
}

48
static __kprobes const char *trace_kprobe_symbol(struct trace_kprobe *tk)
49
{
50
	return tk->symbol ? tk->symbol : "unknown";
51 52
}

53
static __kprobes unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
54
{
55
	return tk->rp.kp.offset;
56 57
}

58
static __kprobes bool trace_kprobe_has_gone(struct trace_kprobe *tk)
59
{
60
	return !!(kprobe_gone(&tk->rp.kp));
61 62
}

63 64
static __kprobes bool trace_kprobe_within_module(struct trace_kprobe *tk,
						 struct module *mod)
65 66
{
	int len = strlen(mod->name);
67
	const char *name = trace_kprobe_symbol(tk);
68 69 70
	return strncmp(mod->name, name, len) == 0 && name[len] == ':';
}

71
static __kprobes bool trace_kprobe_is_on_module(struct trace_kprobe *tk)
72
{
73
	return !!strchr(trace_kprobe_symbol(tk), ':');
74 75
}

76 77
static int register_kprobe_event(struct trace_kprobe *tk);
static int unregister_kprobe_event(struct trace_kprobe *tk);
78 79 80 81

static DEFINE_MUTEX(probe_lock);
static LIST_HEAD(probe_list);

82 83 84 85
static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
static int kretprobe_dispatcher(struct kretprobe_instance *ri,
				struct pt_regs *regs);

86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
/* Memory fetching by symbol */
struct symbol_cache {
	char		*symbol;
	long		offset;
	unsigned long	addr;
};

unsigned long update_symbol_cache(struct symbol_cache *sc)
{
	sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);

	if (sc->addr)
		sc->addr += sc->offset;

	return sc->addr;
}

void free_symbol_cache(struct symbol_cache *sc)
{
	kfree(sc->symbol);
	kfree(sc);
}

struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
{
	struct symbol_cache *sc;

	if (!sym || strlen(sym) == 0)
		return NULL;

	sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
	if (!sc)
		return NULL;

	sc->symbol = kstrdup(sym, GFP_KERNEL);
	if (!sc->symbol) {
		kfree(sc);
		return NULL;
	}
	sc->offset = offset;
	update_symbol_cache(sc);

	return sc;
}

131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
/*
 * Kprobes-specific fetch functions
 */
#define DEFINE_FETCH_stack(type)					\
static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\
					  void *offset, void *dest)	\
{									\
	*(type *)dest = (type)regs_get_kernel_stack_nth(regs,		\
				(unsigned int)((unsigned long)offset));	\
}
DEFINE_BASIC_FETCH_FUNCS(stack)
/* No string on the stack entry */
#define fetch_stack_string	NULL
#define fetch_stack_string_size	NULL

146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
#define DEFINE_FETCH_memory(type)					\
static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\
					  void *addr, void *dest)	\
{									\
	type retval;							\
	if (probe_kernel_address(addr, retval))				\
		*(type *)dest = 0;					\
	else								\
		*(type *)dest = retval;					\
}
DEFINE_BASIC_FETCH_FUNCS(memory)
/*
 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
 * length and relative data location.
 */
static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
						      void *addr, void *dest)
{
	long ret;
	int maxlen = get_rloc_len(*(u32 *)dest);
	u8 *dst = get_rloc_data(dest);
	u8 *src = addr;
	mm_segment_t old_fs = get_fs();

	if (!maxlen)
		return;

	/*
	 * Try to get string again, since the string can be changed while
	 * probing.
	 */
	set_fs(KERNEL_DS);
	pagefault_disable();

	do
		ret = __copy_from_user_inatomic(dst++, src++, 1);
	while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);

	dst[-1] = '\0';
	pagefault_enable();
	set_fs(old_fs);

	if (ret < 0) {	/* Failed to fetch string */
		((u8 *)get_rloc_data(dest))[0] = '\0';
		*(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
	} else {
		*(u32 *)dest = make_data_rloc(src - (u8 *)addr,
					      get_rloc_offs(*(u32 *)dest));
	}
}

/* Return the length of string -- including null terminal byte */
static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
							void *addr, void *dest)
{
	mm_segment_t old_fs;
	int ret, len = 0;
	u8 c;

	old_fs = get_fs();
	set_fs(KERNEL_DS);
	pagefault_disable();

	do {
		ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
		len++;
	} while (c && ret == 0 && len < MAX_STRING_SIZE);

	pagefault_enable();
	set_fs(old_fs);

	if (ret < 0)	/* Failed to check the length */
		*(u32 *)dest = 0;
	else
		*(u32 *)dest = len;
}

223 224 225 226 227 228 229 230 231 232 233 234 235 236
#define DEFINE_FETCH_symbol(type)					\
__kprobes void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs,	\
					  void *data, void *dest)	\
{									\
	struct symbol_cache *sc = data;					\
	if (sc->addr)							\
		fetch_memory_##type(regs, (void *)sc->addr, dest);	\
	else								\
		*(type *)dest = 0;					\
}
DEFINE_BASIC_FETCH_FUNCS(symbol)
DEFINE_FETCH_symbol(string)
DEFINE_FETCH_symbol(string_size)

237 238 239 240 241 242 243 244
/* kprobes don't support file_offset fetch methods */
#define fetch_file_offset_u8		NULL
#define fetch_file_offset_u16		NULL
#define fetch_file_offset_u32		NULL
#define fetch_file_offset_u64		NULL
#define fetch_file_offset_string	NULL
#define fetch_file_offset_string_size	NULL

245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
/* Fetch type information table */
const struct fetch_type kprobes_fetch_type_table[] = {
	/* Special types */
	[FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
					sizeof(u32), 1, "__data_loc char[]"),
	[FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
					string_size, sizeof(u32), 0, "u32"),
	/* Basic types */
	ASSIGN_FETCH_TYPE(u8,  u8,  0),
	ASSIGN_FETCH_TYPE(u16, u16, 0),
	ASSIGN_FETCH_TYPE(u32, u32, 0),
	ASSIGN_FETCH_TYPE(u64, u64, 0),
	ASSIGN_FETCH_TYPE(s8,  u8,  1),
	ASSIGN_FETCH_TYPE(s16, u16, 1),
	ASSIGN_FETCH_TYPE(s32, u32, 1),
	ASSIGN_FETCH_TYPE(s64, u64, 1),

	ASSIGN_FETCH_TYPE_END
};

265 266 267
/*
 * Allocate new trace_probe and initialize it (including kprobes).
 */
268
static struct trace_kprobe *alloc_trace_kprobe(const char *group,
269
					     const char *event,
270 271 272
					     void *addr,
					     const char *symbol,
					     unsigned long offs,
273
					     int nargs, bool is_return)
274
{
275
	struct trace_kprobe *tk;
276
	int ret = -ENOMEM;
277

278 279
	tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
	if (!tk)
280
		return ERR_PTR(ret);
281 282

	if (symbol) {
283 284
		tk->symbol = kstrdup(symbol, GFP_KERNEL);
		if (!tk->symbol)
285
			goto error;
286 287
		tk->rp.kp.symbol_name = tk->symbol;
		tk->rp.kp.offset = offs;
288
	} else
289
		tk->rp.kp.addr = addr;
290 291

	if (is_return)
292
		tk->rp.handler = kretprobe_dispatcher;
293
	else
294
		tk->rp.kp.pre_handler = kprobe_dispatcher;
295

296
	if (!event || !is_good_name(event)) {
297
		ret = -EINVAL;
298
		goto error;
299 300
	}

301 302 303
	tk->tp.call.class = &tk->tp.class;
	tk->tp.call.name = kstrdup(event, GFP_KERNEL);
	if (!tk->tp.call.name)
304
		goto error;
305

306
	if (!group || !is_good_name(group)) {
307
		ret = -EINVAL;
308
		goto error;
309 310
	}

311 312
	tk->tp.class.system = kstrdup(group, GFP_KERNEL);
	if (!tk->tp.class.system)
313 314
		goto error;

315 316 317
	INIT_LIST_HEAD(&tk->list);
	INIT_LIST_HEAD(&tk->tp.files);
	return tk;
318
error:
319 320 321
	kfree(tk->tp.call.name);
	kfree(tk->symbol);
	kfree(tk);
322
	return ERR_PTR(ret);
323 324
}

325
static void free_trace_kprobe(struct trace_kprobe *tk)
326 327 328
{
	int i;

329 330
	for (i = 0; i < tk->tp.nr_args; i++)
		traceprobe_free_probe_arg(&tk->tp.args[i]);
331

332 333 334 335
	kfree(tk->tp.call.class->system);
	kfree(tk->tp.call.name);
	kfree(tk->symbol);
	kfree(tk);
336 337
}

338 339
static struct trace_kprobe *find_trace_kprobe(const char *event,
					      const char *group)
340
{
341
	struct trace_kprobe *tk;
342

343 344 345 346
	list_for_each_entry(tk, &probe_list, list)
		if (strcmp(tk->tp.call.name, event) == 0 &&
		    strcmp(tk->tp.call.class->system, group) == 0)
			return tk;
347 348 349
	return NULL;
}

350 351 352 353 354
/*
 * Enable trace_probe
 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
 */
static int
355
enable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file)
356 357 358
{
	int ret = 0;

359
	if (file) {
360 361 362 363
		struct event_file_link *link;

		link = kmalloc(sizeof(*link), GFP_KERNEL);
		if (!link) {
364
			ret = -ENOMEM;
365
			goto out;
366 367
		}

368
		link->file = file;
369
		list_add_tail_rcu(&link->list, &tk->tp.files);
370

371
		tk->tp.flags |= TP_FLAG_TRACE;
372
	} else
373
		tk->tp.flags |= TP_FLAG_PROFILE;
374

375 376 377
	if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) {
		if (trace_kprobe_is_return(tk))
			ret = enable_kretprobe(&tk->rp);
378
		else
379
			ret = enable_kprobe(&tk->rp.kp);
380
	}
381
 out:
382 383 384
	return ret;
}

385 386 387 388 389
/*
 * Disable trace_probe
 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
 */
static int
390
disable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file)
391
{
392 393
	struct event_file_link *link = NULL;
	int wait = 0;
394 395 396
	int ret = 0;

	if (file) {
397
		link = find_event_file_link(&tk->tp, file);
398
		if (!link) {
399
			ret = -EINVAL;
400
			goto out;
401 402
		}

403
		list_del_rcu(&link->list);
404
		wait = 1;
405
		if (!list_empty(&tk->tp.files))
406
			goto out;
407

408
		tk->tp.flags &= ~TP_FLAG_TRACE;
409
	} else
410
		tk->tp.flags &= ~TP_FLAG_PROFILE;
411

412 413 414
	if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) {
		if (trace_kprobe_is_return(tk))
			disable_kretprobe(&tk->rp);
415
		else
416
			disable_kprobe(&tk->rp.kp);
417
		wait = 1;
418
	}
419
 out:
420 421 422 423 424 425 426 427 428 429 430 431 432
	if (wait) {
		/*
		 * Synchronize with kprobe_trace_func/kretprobe_trace_func
		 * to ensure disabled (all running handlers are finished).
		 * This is not only for kfree(), but also the caller,
		 * trace_remove_event_call() supposes it for releasing
		 * event_call related objects, which will be accessed in
		 * the kprobe_trace_func/kretprobe_trace_func.
		 */
		synchronize_sched();
		kfree(link);	/* Ignored if link == NULL */
	}

433
	return ret;
434 435
}

436
/* Internal register function - just handle k*probes and flags */
437
static int __register_trace_kprobe(struct trace_kprobe *tk)
438
{
439
	int i, ret;
440

441
	if (trace_probe_is_registered(&tk->tp))
442 443
		return -EINVAL;

444 445
	for (i = 0; i < tk->tp.nr_args; i++)
		traceprobe_update_arg(&tk->tp.args[i]);
446

447
	/* Set/clear disabled flag according to tp->flag */
448 449
	if (trace_probe_is_enabled(&tk->tp))
		tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
450
	else
451
		tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
452

453 454
	if (trace_kprobe_is_return(tk))
		ret = register_kretprobe(&tk->rp);
455
	else
456
		ret = register_kprobe(&tk->rp.kp);
457 458

	if (ret == 0)
459
		tk->tp.flags |= TP_FLAG_REGISTERED;
460 461
	else {
		pr_warning("Could not insert probe at %s+%lu: %d\n",
462 463
			   trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret);
		if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) {
464 465 466 467 468 469
			pr_warning("This probe might be able to register after"
				   "target module is loaded. Continue.\n");
			ret = 0;
		} else if (ret == -EILSEQ) {
			pr_warning("Probing address(0x%p) is not an "
				   "instruction boundary.\n",
470
				   tk->rp.kp.addr);
471 472 473 474 475 476 477 478
			ret = -EINVAL;
		}
	}

	return ret;
}

/* Internal unregister function - just handle k*probes and flags */
479
static void __unregister_trace_kprobe(struct trace_kprobe *tk)
480
{
481 482 483
	if (trace_probe_is_registered(&tk->tp)) {
		if (trace_kprobe_is_return(tk))
			unregister_kretprobe(&tk->rp);
484
		else
485 486
			unregister_kprobe(&tk->rp.kp);
		tk->tp.flags &= ~TP_FLAG_REGISTERED;
487
		/* Cleanup kprobe for reuse */
488 489
		if (tk->rp.kp.symbol_name)
			tk->rp.kp.addr = NULL;
490 491 492 493
	}
}

/* Unregister a trace_probe and probe_event: call with locking probe_lock */
494
static int unregister_trace_kprobe(struct trace_kprobe *tk)
495
{
496
	/* Enabled event can not be unregistered */
497
	if (trace_probe_is_enabled(&tk->tp))
498 499
		return -EBUSY;

500
	/* Will fail if probe is being used by ftrace or perf */
501
	if (unregister_kprobe_event(tk))
502 503
		return -EBUSY;

504 505
	__unregister_trace_kprobe(tk);
	list_del(&tk->list);
506 507

	return 0;
508 509 510
}

/* Register a trace_probe and probe_event */
511
static int register_trace_kprobe(struct trace_kprobe *tk)
512
{
513
	struct trace_kprobe *old_tk;
514 515 516 517
	int ret;

	mutex_lock(&probe_lock);

518
	/* Delete old (same name) event if exist */
519 520 521
	old_tk = find_trace_kprobe(tk->tp.call.name, tk->tp.call.class->system);
	if (old_tk) {
		ret = unregister_trace_kprobe(old_tk);
522 523
		if (ret < 0)
			goto end;
524
		free_trace_kprobe(old_tk);
525
	}
526 527

	/* Register new event */
528
	ret = register_kprobe_event(tk);
529
	if (ret) {
P
Paul Bolle 已提交
530
		pr_warning("Failed to register probe event(%d)\n", ret);
531 532 533
		goto end;
	}

534
	/* Register k*probe */
535
	ret = __register_trace_kprobe(tk);
536
	if (ret < 0)
537
		unregister_kprobe_event(tk);
538
	else
539
		list_add_tail(&tk->list, &probe_list);
540

541 542 543 544 545
end:
	mutex_unlock(&probe_lock);
	return ret;
}

546
/* Module notifier call back, checking event on the module */
547
static int trace_kprobe_module_callback(struct notifier_block *nb,
548 549 550
				       unsigned long val, void *data)
{
	struct module *mod = data;
551
	struct trace_kprobe *tk;
552 553 554 555 556 557 558
	int ret;

	if (val != MODULE_STATE_COMING)
		return NOTIFY_DONE;

	/* Update probes on coming module */
	mutex_lock(&probe_lock);
559 560
	list_for_each_entry(tk, &probe_list, list) {
		if (trace_kprobe_within_module(tk, mod)) {
561
			/* Don't need to check busy - this should have gone. */
562 563
			__unregister_trace_kprobe(tk);
			ret = __register_trace_kprobe(tk);
564 565 566
			if (ret)
				pr_warning("Failed to re-register probe %s on"
					   "%s: %d\n",
567
					   tk->tp.call.name, mod->name, ret);
568 569 570 571 572 573 574
		}
	}
	mutex_unlock(&probe_lock);

	return NOTIFY_DONE;
}

575 576
static struct notifier_block trace_kprobe_module_nb = {
	.notifier_call = trace_kprobe_module_callback,
577 578 579
	.priority = 1	/* Invoked after kprobe module callback */
};

580
static int create_trace_kprobe(int argc, char **argv)
581 582 583
{
	/*
	 * Argument syntax:
584 585
	 *  - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
	 *  - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
586
	 * Fetch args:
587 588 589
	 *  $retval	: fetch return value
	 *  $stack	: fetch stack address
	 *  $stackN	: fetch Nth of stack (N:0-)
590 591 592
	 *  @ADDR	: fetch memory at ADDR (ADDR should be in kernel)
	 *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
	 *  %REG	: fetch register REG
593
	 * Dereferencing memory fetch:
594
	 *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
595 596
	 * Alias name of args:
	 *  NAME=FETCHARG : set NAME as alias of FETCHARG.
597 598
	 * Type of args:
	 *  FETCHARG:TYPE : use TYPE instead of unsigned long.
599
	 */
600
	struct trace_kprobe *tk;
601
	int i, ret = 0;
602
	bool is_return = false, is_delete = false;
603
	char *symbol = NULL, *event = NULL, *group = NULL;
604
	char *arg;
605
	unsigned long offset = 0;
606
	void *addr = NULL;
607
	char buf[MAX_EVENT_NAME_LEN];
608

609
	/* argc must be >= 1 */
610
	if (argv[0][0] == 'p')
611
		is_return = false;
612
	else if (argv[0][0] == 'r')
613
		is_return = true;
614
	else if (argv[0][0] == '-')
615
		is_delete = true;
616
	else {
617 618
		pr_info("Probe definition must be started with 'p', 'r' or"
			" '-'.\n");
619
		return -EINVAL;
620
	}
621 622 623

	if (argv[0][1] == ':') {
		event = &argv[0][2];
624 625 626 627 628
		if (strchr(event, '/')) {
			group = event;
			event = strchr(group, '/') + 1;
			event[-1] = '\0';
			if (strlen(group) == 0) {
629
				pr_info("Group name is not specified\n");
630 631 632
				return -EINVAL;
			}
		}
633
		if (strlen(event) == 0) {
634
			pr_info("Event name is not specified\n");
635 636 637
			return -EINVAL;
		}
	}
638 639
	if (!group)
		group = KPROBE_EVENT_SYSTEM;
640

641 642 643 644 645
	if (is_delete) {
		if (!event) {
			pr_info("Delete command needs an event name.\n");
			return -EINVAL;
		}
646
		mutex_lock(&probe_lock);
647 648
		tk = find_trace_kprobe(event, group);
		if (!tk) {
649
			mutex_unlock(&probe_lock);
650 651 652 653
			pr_info("Event %s/%s doesn't exist.\n", group, event);
			return -ENOENT;
		}
		/* delete an event */
654
		ret = unregister_trace_kprobe(tk);
655
		if (ret == 0)
656
			free_trace_kprobe(tk);
657
		mutex_unlock(&probe_lock);
658
		return ret;
659 660 661 662 663 664
	}

	if (argc < 2) {
		pr_info("Probe point is not specified.\n");
		return -EINVAL;
	}
665
	if (isdigit(argv[1][0])) {
666 667
		if (is_return) {
			pr_info("Return probe point must be a symbol.\n");
668
			return -EINVAL;
669
		}
670
		/* an address specified */
671
		ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
672 673
		if (ret) {
			pr_info("Failed to parse address.\n");
674
			return ret;
675
		}
676 677 678 679
	} else {
		/* a symbol specified */
		symbol = argv[1];
		/* TODO: support .init module functions */
680
		ret = traceprobe_split_symbol_offset(symbol, &offset);
681 682
		if (ret) {
			pr_info("Failed to parse symbol.\n");
683
			return ret;
684 685 686
		}
		if (offset && is_return) {
			pr_info("Return probe must be used without offset.\n");
687
			return -EINVAL;
688
		}
689
	}
690
	argc -= 2; argv += 2;
691 692

	/* setup a probe */
693 694 695
	if (!event) {
		/* Make a new event name */
		if (symbol)
696
			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
697 698
				 is_return ? 'r' : 'p', symbol, offset);
		else
699
			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
700
				 is_return ? 'r' : 'p', addr);
701 702
		event = buf;
	}
703
	tk = alloc_trace_kprobe(group, event, addr, symbol, offset, argc,
704
			       is_return);
705
	if (IS_ERR(tk)) {
706
		pr_info("Failed to allocate trace_probe.(%d)\n",
707 708
			(int)PTR_ERR(tk));
		return PTR_ERR(tk);
709
	}
710 711

	/* parse arguments */
712 713
	ret = 0;
	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
714 715
		struct probe_arg *parg = &tk->tp.args[i];

716
		/* Increment count for freeing args in error case */
717
		tk->tp.nr_args++;
718

719 720
		/* Parse argument name */
		arg = strchr(argv[i], '=');
721
		if (arg) {
722
			*arg++ = '\0';
723
			parg->name = kstrdup(argv[i], GFP_KERNEL);
724
		} else {
725
			arg = argv[i];
726 727
			/* If argument name is omitted, set "argN" */
			snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
728
			parg->name = kstrdup(buf, GFP_KERNEL);
729
		}
730

731
		if (!parg->name) {
732
			pr_info("Failed to allocate argument[%d] name.\n", i);
733
			ret = -ENOMEM;
734 735
			goto error;
		}
736

737
		if (!is_good_name(parg->name)) {
738
			pr_info("Invalid argument[%d] name: %s\n",
739
				i, parg->name);
740 741 742
			ret = -EINVAL;
			goto error;
		}
743

744 745
		if (traceprobe_conflict_field_name(parg->name,
							tk->tp.args, i)) {
746
			pr_info("Argument[%d] name '%s' conflicts with "
747 748 749 750
				"another field.\n", i, argv[i]);
			ret = -EINVAL;
			goto error;
		}
751 752

		/* Parse fetch argument */
753
		ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg,
754
						is_return, true);
755
		if (ret) {
756
			pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
757
			goto error;
758
		}
759 760
	}

761
	ret = register_trace_kprobe(tk);
762 763 764 765 766
	if (ret)
		goto error;
	return 0;

error:
767
	free_trace_kprobe(tk);
768 769 770
	return ret;
}

771
static int release_all_trace_kprobes(void)
772
{
773
	struct trace_kprobe *tk;
774
	int ret = 0;
775 776

	mutex_lock(&probe_lock);
777
	/* Ensure no probe is in use. */
778 779
	list_for_each_entry(tk, &probe_list, list)
		if (trace_probe_is_enabled(&tk->tp)) {
780 781 782
			ret = -EBUSY;
			goto end;
		}
783 784
	/* TODO: Use batch unregistration */
	while (!list_empty(&probe_list)) {
785 786
		tk = list_entry(probe_list.next, struct trace_kprobe, list);
		ret = unregister_trace_kprobe(tk);
787 788
		if (ret)
			goto end;
789
		free_trace_kprobe(tk);
790
	}
791 792

end:
793
	mutex_unlock(&probe_lock);
794 795

	return ret;
796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816
}

/* Probes listing interfaces */
static void *probes_seq_start(struct seq_file *m, loff_t *pos)
{
	mutex_lock(&probe_lock);
	return seq_list_start(&probe_list, *pos);
}

static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
{
	return seq_list_next(v, &probe_list, pos);
}

static void probes_seq_stop(struct seq_file *m, void *v)
{
	mutex_unlock(&probe_lock);
}

static int probes_seq_show(struct seq_file *m, void *v)
{
817
	struct trace_kprobe *tk = v;
818
	int i;
819

820 821
	seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p');
	seq_printf(m, ":%s/%s", tk->tp.call.class->system, tk->tp.call.name);
822

823 824 825 826 827
	if (!tk->symbol)
		seq_printf(m, " 0x%p", tk->rp.kp.addr);
	else if (tk->rp.kp.offset)
		seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
			   tk->rp.kp.offset);
828
	else
829
		seq_printf(m, " %s", trace_kprobe_symbol(tk));
830

831 832
	for (i = 0; i < tk->tp.nr_args; i++)
		seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
833
	seq_printf(m, "\n");
834

835 836 837 838 839 840 841 842 843 844 845 846
	return 0;
}

static const struct seq_operations probes_seq_op = {
	.start  = probes_seq_start,
	.next   = probes_seq_next,
	.stop   = probes_seq_stop,
	.show   = probes_seq_show
};

static int probes_open(struct inode *inode, struct file *file)
{
847 848 849
	int ret;

	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
850
		ret = release_all_trace_kprobes();
851 852 853
		if (ret < 0)
			return ret;
	}
854 855 856 857 858 859 860

	return seq_open(file, &probes_seq_op);
}

static ssize_t probes_write(struct file *file, const char __user *buffer,
			    size_t count, loff_t *ppos)
{
861
	return traceprobe_probes_write(file, buffer, count, ppos,
862
			create_trace_kprobe);
863 864 865 866 867 868 869 870 871 872 873
}

static const struct file_operations kprobe_events_ops = {
	.owner          = THIS_MODULE,
	.open           = probes_open,
	.read           = seq_read,
	.llseek         = seq_lseek,
	.release        = seq_release,
	.write		= probes_write,
};

874 875 876
/* Probes profiling interfaces */
static int probes_profile_seq_show(struct seq_file *m, void *v)
{
877
	struct trace_kprobe *tk = v;
878

879 880
	seq_printf(m, "  %-44s %15lu %15lu\n", tk->tp.call.name, tk->nhit,
		   tk->rp.kp.nmissed);
881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904

	return 0;
}

static const struct seq_operations profile_seq_op = {
	.start  = probes_seq_start,
	.next   = probes_seq_next,
	.stop   = probes_seq_stop,
	.show   = probes_profile_seq_show
};

static int profile_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &profile_seq_op);
}

static const struct file_operations kprobe_profile_ops = {
	.owner          = THIS_MODULE,
	.open           = profile_open,
	.read           = seq_read,
	.llseek         = seq_lseek,
	.release        = seq_release,
};

905
/* Kprobe handler */
906
static __kprobes void
907
__kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
908
		    struct ftrace_event_file *ftrace_file)
909
{
910
	struct kprobe_trace_entry_head *entry;
911
	struct ring_buffer_event *event;
912
	struct ring_buffer *buffer;
913
	int size, dsize, pc;
914
	unsigned long irq_flags;
915
	struct ftrace_event_call *call = &tk->tp.call;
916

917 918
	WARN_ON(call != ftrace_file->event_call);

919 920
	if (ftrace_trigger_soft_disabled(ftrace_file))
		return;
921

922 923 924
	local_save_flags(irq_flags);
	pc = preempt_count();

925 926
	dsize = __get_data_size(&tk->tp, regs);
	size = sizeof(*entry) + tk->tp.size + dsize;
927

928 929 930
	event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
						call->event.type,
						size, irq_flags, pc);
931
	if (!event)
932
		return;
933 934

	entry = ring_buffer_event_data(event);
935 936
	entry->ip = (unsigned long)tk->rp.kp.addr;
	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
937

938 939
	event_trigger_unlock_commit_regs(ftrace_file, buffer, event,
					 entry, irq_flags, pc, regs);
940 941
}

942
static __kprobes void
943
kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
944
{
945
	struct event_file_link *link;
946

947 948
	list_for_each_entry_rcu(link, &tk->tp.files, list)
		__kprobe_trace_func(tk, regs, link->file);
949 950
}

951
/* Kretprobe handler */
952
static __kprobes void
953
__kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
954 955
		       struct pt_regs *regs,
		       struct ftrace_event_file *ftrace_file)
956
{
957
	struct kretprobe_trace_entry_head *entry;
958
	struct ring_buffer_event *event;
959
	struct ring_buffer *buffer;
960
	int size, pc, dsize;
961
	unsigned long irq_flags;
962
	struct ftrace_event_call *call = &tk->tp.call;
963

964 965
	WARN_ON(call != ftrace_file->event_call);

966 967
	if (ftrace_trigger_soft_disabled(ftrace_file))
		return;
968

969 970 971
	local_save_flags(irq_flags);
	pc = preempt_count();

972 973
	dsize = __get_data_size(&tk->tp, regs);
	size = sizeof(*entry) + tk->tp.size + dsize;
974

975 976 977
	event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
						call->event.type,
						size, irq_flags, pc);
978
	if (!event)
979
		return;
980 981

	entry = ring_buffer_event_data(event);
982
	entry->func = (unsigned long)tk->rp.kp.addr;
983
	entry->ret_ip = (unsigned long)ri->ret_addr;
984
	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
985

986 987
	event_trigger_unlock_commit_regs(ftrace_file, buffer, event,
					 entry, irq_flags, pc, regs);
988 989
}

990
static __kprobes void
991
kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
992 993
		     struct pt_regs *regs)
{
994
	struct event_file_link *link;
995

996 997
	list_for_each_entry_rcu(link, &tk->tp.files, list)
		__kretprobe_trace_func(tk, ri, regs, link->file);
998 999
}

1000
/* Event entry printers */
1001
static enum print_line_t
1002 1003
print_kprobe_event(struct trace_iterator *iter, int flags,
		   struct trace_event *event)
1004
{
1005
	struct kprobe_trace_entry_head *field;
1006
	struct trace_seq *s = &iter->seq;
1007
	struct trace_probe *tp;
1008
	u8 *data;
1009 1010
	int i;

1011
	field = (struct kprobe_trace_entry_head *)iter->ent;
1012
	tp = container_of(event, struct trace_probe, call.event);
1013

1014 1015 1016
	if (!trace_seq_printf(s, "%s: (", tp->call.name))
		goto partial;

1017 1018 1019
	if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
		goto partial;

1020
	if (!trace_seq_puts(s, ")"))
1021 1022
		goto partial;

1023 1024 1025
	data = (u8 *)&field[1];
	for (i = 0; i < tp->nr_args; i++)
		if (!tp->args[i].type->print(s, tp->args[i].name,
1026
					     data + tp->args[i].offset, field))
1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
			goto partial;

	if (!trace_seq_puts(s, "\n"))
		goto partial;

	return TRACE_TYPE_HANDLED;
partial:
	return TRACE_TYPE_PARTIAL_LINE;
}

1037
static enum print_line_t
1038 1039
print_kretprobe_event(struct trace_iterator *iter, int flags,
		      struct trace_event *event)
1040
{
1041
	struct kretprobe_trace_entry_head *field;
1042
	struct trace_seq *s = &iter->seq;
1043
	struct trace_probe *tp;
1044
	u8 *data;
1045 1046
	int i;

1047
	field = (struct kretprobe_trace_entry_head *)iter->ent;
1048
	tp = container_of(event, struct trace_probe, call.event);
1049

1050 1051 1052
	if (!trace_seq_printf(s, "%s: (", tp->call.name))
		goto partial;

1053 1054 1055 1056 1057 1058 1059 1060 1061
	if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
		goto partial;

	if (!trace_seq_puts(s, " <- "))
		goto partial;

	if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
		goto partial;

1062
	if (!trace_seq_puts(s, ")"))
1063 1064
		goto partial;

1065 1066 1067
	data = (u8 *)&field[1];
	for (i = 0; i < tp->nr_args; i++)
		if (!tp->args[i].type->print(s, tp->args[i].name,
1068
					     data + tp->args[i].offset, field))
1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082
			goto partial;

	if (!trace_seq_puts(s, "\n"))
		goto partial;

	return TRACE_TYPE_HANDLED;
partial:
	return TRACE_TYPE_PARTIAL_LINE;
}


static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
{
	int ret, i;
1083
	struct kprobe_trace_entry_head field;
1084
	struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1085

1086
	DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1087
	/* Set argument names as fields */
1088 1089 1090 1091 1092 1093 1094 1095
	for (i = 0; i < tk->tp.nr_args; i++) {
		struct probe_arg *parg = &tk->tp.args[i];

		ret = trace_define_field(event_call, parg->type->fmttype,
					 parg->name,
					 sizeof(field) + parg->offset,
					 parg->type->size,
					 parg->type->is_signed,
1096 1097 1098 1099
					 FILTER_OTHER);
		if (ret)
			return ret;
	}
1100 1101 1102 1103 1104 1105
	return 0;
}

static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
{
	int ret, i;
1106
	struct kretprobe_trace_entry_head field;
1107
	struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1108

1109 1110
	DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
	DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1111
	/* Set argument names as fields */
1112 1113 1114 1115 1116 1117 1118 1119
	for (i = 0; i < tk->tp.nr_args; i++) {
		struct probe_arg *parg = &tk->tp.args[i];

		ret = trace_define_field(event_call, parg->type->fmttype,
					 parg->name,
					 sizeof(field) + parg->offset,
					 parg->type->size,
					 parg->type->is_signed,
1120 1121 1122 1123
					 FILTER_OTHER);
		if (ret)
			return ret;
	}
1124 1125 1126
	return 0;
}

1127
#ifdef CONFIG_PERF_EVENTS
1128 1129

/* Kprobe profile handler */
1130
static __kprobes void
1131
kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1132
{
1133
	struct ftrace_event_call *call = &tk->tp.call;
1134
	struct kprobe_trace_entry_head *entry;
1135
	struct hlist_head *head;
1136
	int size, __size, dsize;
1137
	int rctx;
1138

1139 1140 1141 1142
	head = this_cpu_ptr(call->perf_events);
	if (hlist_empty(head))
		return;

1143 1144
	dsize = __get_data_size(&tk->tp, regs);
	__size = sizeof(*entry) + tk->tp.size + dsize;
1145 1146
	size = ALIGN(__size + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);
1147

S
Steven Rostedt 已提交
1148
	entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1149
	if (!entry)
1150
		return;
1151

1152
	entry->ip = (unsigned long)tk->rp.kp.addr;
1153
	memset(&entry[1], 0, dsize);
1154
	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1155
	perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1156 1157 1158
}

/* Kretprobe profile handler */
1159
static __kprobes void
1160
kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1161
		    struct pt_regs *regs)
1162
{
1163
	struct ftrace_event_call *call = &tk->tp.call;
1164
	struct kretprobe_trace_entry_head *entry;
1165
	struct hlist_head *head;
1166
	int size, __size, dsize;
1167
	int rctx;
1168

1169 1170 1171 1172
	head = this_cpu_ptr(call->perf_events);
	if (hlist_empty(head))
		return;

1173 1174
	dsize = __get_data_size(&tk->tp, regs);
	__size = sizeof(*entry) + tk->tp.size + dsize;
1175 1176
	size = ALIGN(__size + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);
1177

S
Steven Rostedt 已提交
1178
	entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1179
	if (!entry)
1180
		return;
1181

1182
	entry->func = (unsigned long)tk->rp.kp.addr;
1183
	entry->ret_ip = (unsigned long)ri->ret_addr;
1184
	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1185
	perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1186
}
1187
#endif	/* CONFIG_PERF_EVENTS */
1188

1189 1190 1191 1192 1193 1194
/*
 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
 *
 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
 * lockless, but we can't race with this __init function.
 */
1195
static __kprobes
1196 1197
int kprobe_register(struct ftrace_event_call *event,
		    enum trace_reg type, void *data)
1198
{
1199
	struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
1200
	struct ftrace_event_file *file = data;
1201

1202 1203
	switch (type) {
	case TRACE_REG_REGISTER:
1204
		return enable_trace_kprobe(tk, file);
1205
	case TRACE_REG_UNREGISTER:
1206
		return disable_trace_kprobe(tk, file);
1207 1208 1209

#ifdef CONFIG_PERF_EVENTS
	case TRACE_REG_PERF_REGISTER:
1210
		return enable_trace_kprobe(tk, NULL);
1211
	case TRACE_REG_PERF_UNREGISTER:
1212
		return disable_trace_kprobe(tk, NULL);
1213 1214
	case TRACE_REG_PERF_OPEN:
	case TRACE_REG_PERF_CLOSE:
1215 1216
	case TRACE_REG_PERF_ADD:
	case TRACE_REG_PERF_DEL:
1217
		return 0;
1218 1219 1220 1221
#endif
	}
	return 0;
}
1222 1223 1224 1225

static __kprobes
int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
{
1226
	struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1227

1228
	tk->nhit++;
1229

1230 1231
	if (tk->tp.flags & TP_FLAG_TRACE)
		kprobe_trace_func(tk, regs);
1232
#ifdef CONFIG_PERF_EVENTS
1233 1234
	if (tk->tp.flags & TP_FLAG_PROFILE)
		kprobe_perf_func(tk, regs);
1235
#endif
1236 1237 1238 1239 1240 1241
	return 0;	/* We don't tweek kernel, so just return 0 */
}

static __kprobes
int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
{
1242
	struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1243

1244
	tk->nhit++;
1245

1246 1247
	if (tk->tp.flags & TP_FLAG_TRACE)
		kretprobe_trace_func(tk, ri, regs);
1248
#ifdef CONFIG_PERF_EVENTS
1249 1250
	if (tk->tp.flags & TP_FLAG_PROFILE)
		kretprobe_perf_func(tk, ri, regs);
1251
#endif
1252 1253
	return 0;	/* We don't tweek kernel, so just return 0 */
}
1254

1255 1256 1257 1258 1259 1260 1261 1262
static struct trace_event_functions kretprobe_funcs = {
	.trace		= print_kretprobe_event
};

static struct trace_event_functions kprobe_funcs = {
	.trace		= print_kprobe_event
};

1263
static int register_kprobe_event(struct trace_kprobe *tk)
1264
{
1265
	struct ftrace_event_call *call = &tk->tp.call;
1266 1267 1268
	int ret;

	/* Initialize ftrace_event_call */
1269
	INIT_LIST_HEAD(&call->class->fields);
1270
	if (trace_kprobe_is_return(tk)) {
1271
		call->event.funcs = &kretprobe_funcs;
1272
		call->class->define_fields = kretprobe_event_define_fields;
1273
	} else {
1274
		call->event.funcs = &kprobe_funcs;
1275
		call->class->define_fields = kprobe_event_define_fields;
1276
	}
1277
	if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0)
1278
		return -ENOMEM;
1279 1280
	ret = register_ftrace_event(&call->event);
	if (!ret) {
1281
		kfree(call->print_fmt);
1282
		return -ENODEV;
1283
	}
1284
	call->flags = 0;
1285
	call->class->reg = kprobe_register;
1286
	call->data = tk;
1287
	ret = trace_add_event_call(call);
1288
	if (ret) {
1289
		pr_info("Failed to register kprobe event: %s\n", call->name);
1290
		kfree(call->print_fmt);
1291
		unregister_ftrace_event(&call->event);
1292
	}
1293 1294 1295
	return ret;
}

1296
static int unregister_kprobe_event(struct trace_kprobe *tk)
1297
{
1298 1299
	int ret;

1300
	/* tp->event is unregistered in trace_remove_event_call() */
1301
	ret = trace_remove_event_call(&tk->tp.call);
1302
	if (!ret)
1303
		kfree(tk->tp.call.print_fmt);
1304
	return ret;
1305 1306
}

L
Lucas De Marchi 已提交
1307
/* Make a debugfs interface for controlling probe points */
1308 1309 1310 1311 1312
static __init int init_kprobe_trace(void)
{
	struct dentry *d_tracer;
	struct dentry *entry;

1313
	if (register_module_notifier(&trace_kprobe_module_nb))
1314 1315
		return -EINVAL;

1316 1317 1318 1319 1320 1321 1322
	d_tracer = tracing_init_dentry();
	if (!d_tracer)
		return 0;

	entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
				    NULL, &kprobe_events_ops);

1323
	/* Event list interface */
1324 1325 1326
	if (!entry)
		pr_warning("Could not create debugfs "
			   "'kprobe_events' entry\n");
1327 1328 1329 1330 1331 1332 1333 1334

	/* Profile interface */
	entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
				    NULL, &kprobe_profile_ops);

	if (!entry)
		pr_warning("Could not create debugfs "
			   "'kprobe_profile' entry\n");
1335 1336 1337 1338 1339 1340 1341
	return 0;
}
fs_initcall(init_kprobe_trace);


#ifdef CONFIG_FTRACE_STARTUP_TEST

1342 1343 1344 1345 1346 1347
/*
 * The "__used" keeps gcc from removing the function symbol
 * from the kallsyms table.
 */
static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
					       int a4, int a5, int a6)
1348 1349 1350 1351
{
	return a1 + a2 + a3 + a4 + a5 + a6;
}

1352
static struct ftrace_event_file *
1353
find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1354 1355 1356 1357
{
	struct ftrace_event_file *file;

	list_for_each_entry(file, &tr->events, list)
1358
		if (file->event_call == &tk->tp.call)
1359 1360 1361 1362 1363
			return file;

	return NULL;
}

1364
/*
1365
 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1366 1367
 * stage, we can do this lockless.
 */
1368 1369
static __init int kprobe_trace_self_tests_init(void)
{
1370
	int ret, warn = 0;
1371
	int (*target)(int, int, int, int, int, int);
1372
	struct trace_kprobe *tk;
1373
	struct ftrace_event_file *file;
1374 1375 1376 1377 1378

	target = kprobe_trace_selftest_target;

	pr_info("Testing kprobe tracing: ");

1379 1380
	ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
				  "$stack $stack0 +0($stack)",
1381
				  create_trace_kprobe);
1382
	if (WARN_ON_ONCE(ret)) {
1383
		pr_warn("error on probing function entry.\n");
1384 1385 1386
		warn++;
	} else {
		/* Enable trace point */
1387 1388
		tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
		if (WARN_ON_ONCE(tk == NULL)) {
1389
			pr_warn("error on getting new probe.\n");
1390
			warn++;
1391
		} else {
1392
			file = find_trace_probe_file(tk, top_trace_array());
1393 1394 1395 1396
			if (WARN_ON_ONCE(file == NULL)) {
				pr_warn("error on getting probe file.\n");
				warn++;
			} else
1397
				enable_trace_kprobe(tk, file);
1398
		}
1399
	}
1400

1401
	ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
1402
				  "$retval", create_trace_kprobe);
1403
	if (WARN_ON_ONCE(ret)) {
1404
		pr_warn("error on probing function return.\n");
1405 1406 1407
		warn++;
	} else {
		/* Enable trace point */
1408 1409
		tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
		if (WARN_ON_ONCE(tk == NULL)) {
1410
			pr_warn("error on getting 2nd new probe.\n");
1411
			warn++;
1412
		} else {
1413
			file = find_trace_probe_file(tk, top_trace_array());
1414 1415 1416 1417
			if (WARN_ON_ONCE(file == NULL)) {
				pr_warn("error on getting probe file.\n");
				warn++;
			} else
1418
				enable_trace_kprobe(tk, file);
1419
		}
1420 1421 1422 1423
	}

	if (warn)
		goto end;
1424 1425 1426

	ret = target(1, 2, 3, 4, 5, 6);

1427
	/* Disable trace points before removing it */
1428 1429
	tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
	if (WARN_ON_ONCE(tk == NULL)) {
1430
		pr_warn("error on getting test probe.\n");
1431
		warn++;
1432
	} else {
1433
		file = find_trace_probe_file(tk, top_trace_array());
1434 1435 1436 1437
		if (WARN_ON_ONCE(file == NULL)) {
			pr_warn("error on getting probe file.\n");
			warn++;
		} else
1438
			disable_trace_kprobe(tk, file);
1439
	}
1440

1441 1442
	tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
	if (WARN_ON_ONCE(tk == NULL)) {
1443
		pr_warn("error on getting 2nd test probe.\n");
1444
		warn++;
1445
	} else {
1446
		file = find_trace_probe_file(tk, top_trace_array());
1447 1448 1449 1450
		if (WARN_ON_ONCE(file == NULL)) {
			pr_warn("error on getting probe file.\n");
			warn++;
		} else
1451
			disable_trace_kprobe(tk, file);
1452
	}
1453

1454
	ret = traceprobe_command("-:testprobe", create_trace_kprobe);
1455
	if (WARN_ON_ONCE(ret)) {
1456
		pr_warn("error on deleting a probe.\n");
1457 1458 1459
		warn++;
	}

1460
	ret = traceprobe_command("-:testprobe2", create_trace_kprobe);
1461
	if (WARN_ON_ONCE(ret)) {
1462
		pr_warn("error on deleting a probe.\n");
1463 1464
		warn++;
	}
1465

1466
end:
1467
	release_all_trace_kprobes();
1468 1469 1470 1471
	if (warn)
		pr_cont("NG: Some tests are failed. Please check them.\n");
	else
		pr_cont("OK\n");
1472 1473 1474 1475 1476 1477
	return 0;
}

late_initcall(kprobe_trace_self_tests_init);

#endif