trace_kprobe.c 38.1 KB
Newer Older
1
/*
2
 * Kprobes-based tracing events
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 *
 * Created by Masami Hiramatsu <mhiramat@redhat.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
19
#define pr_fmt(fmt)	"trace_kprobe: " fmt
20 21 22

#include <linux/module.h>
#include <linux/uaccess.h>
23
#include <linux/rculist.h>
24

25
#include "trace_probe.h"
26

27
#define KPROBE_EVENT_SYSTEM "kprobes"
28
#define KRETPROBE_MAXACTIVE_MAX 4096
29

30
/**
31
 * Kprobe event core functions
32
 */
33
struct trace_kprobe {
34
	struct list_head	list;
35
	struct kretprobe	rp;	/* Use rp.kp for kprobe use */
36
	unsigned long __percpu *nhit;
37
	const char		*symbol;	/* symbol name */
38
	struct trace_probe	tp;
39 40
};

41 42
#define SIZEOF_TRACE_KPROBE(n)				\
	(offsetof(struct trace_kprobe, tp.args) +	\
43
	(sizeof(struct probe_arg) * (n)))
44

45

46
static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
47
{
48
	return tk->rp.handler != NULL;
49 50
}

51
static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
52
{
53
	return tk->symbol ? tk->symbol : "unknown";
54 55
}

56
static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
57
{
58
	return tk->rp.kp.offset;
59 60
}

61
static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
62
{
63
	return !!(kprobe_gone(&tk->rp.kp));
64 65
}

66
static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
67
						 struct module *mod)
68 69
{
	int len = strlen(mod->name);
70
	const char *name = trace_kprobe_symbol(tk);
71 72 73
	return strncmp(mod->name, name, len) == 0 && name[len] == ':';
}

74
static nokprobe_inline bool trace_kprobe_is_on_module(struct trace_kprobe *tk)
75
{
76
	return !!strchr(trace_kprobe_symbol(tk), ':');
77 78
}

79 80 81 82 83 84 85 86 87 88 89
static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
{
	unsigned long nhit = 0;
	int cpu;

	for_each_possible_cpu(cpu)
		nhit += *per_cpu_ptr(tk->nhit, cpu);

	return nhit;
}

90 91
static int register_kprobe_event(struct trace_kprobe *tk);
static int unregister_kprobe_event(struct trace_kprobe *tk);
92 93 94 95

static DEFINE_MUTEX(probe_lock);
static LIST_HEAD(probe_list);

96 97 98 99
static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
static int kretprobe_dispatcher(struct kretprobe_instance *ri,
				struct pt_regs *regs);

100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
/* Memory fetching by symbol */
struct symbol_cache {
	char		*symbol;
	long		offset;
	unsigned long	addr;
};

unsigned long update_symbol_cache(struct symbol_cache *sc)
{
	sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);

	if (sc->addr)
		sc->addr += sc->offset;

	return sc->addr;
}

void free_symbol_cache(struct symbol_cache *sc)
{
	kfree(sc->symbol);
	kfree(sc);
}

struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
{
	struct symbol_cache *sc;

	if (!sym || strlen(sym) == 0)
		return NULL;

	sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
	if (!sc)
		return NULL;

	sc->symbol = kstrdup(sym, GFP_KERNEL);
	if (!sc->symbol) {
		kfree(sc);
		return NULL;
	}
	sc->offset = offset;
	update_symbol_cache(sc);

	return sc;
}

145 146 147 148
/*
 * Kprobes-specific fetch functions
 */
#define DEFINE_FETCH_stack(type)					\
149
static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,		\
150 151 152 153
					  void *offset, void *dest)	\
{									\
	*(type *)dest = (type)regs_get_kernel_stack_nth(regs,		\
				(unsigned int)((unsigned long)offset));	\
154 155 156
}									\
NOKPROBE_SYMBOL(FETCH_FUNC_NAME(stack, type));

157 158 159 160 161
DEFINE_BASIC_FETCH_FUNCS(stack)
/* No string on the stack entry */
#define fetch_stack_string	NULL
#define fetch_stack_string_size	NULL

162
#define DEFINE_FETCH_memory(type)					\
163
static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,		\
164 165 166 167 168 169 170
					  void *addr, void *dest)	\
{									\
	type retval;							\
	if (probe_kernel_address(addr, retval))				\
		*(type *)dest = 0;					\
	else								\
		*(type *)dest = retval;					\
171 172 173
}									\
NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, type));

174 175 176 177 178
DEFINE_BASIC_FETCH_FUNCS(memory)
/*
 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
 * length and relative data location.
 */
179 180
static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
					    void *addr, void *dest)
181 182 183
{
	int maxlen = get_rloc_len(*(u32 *)dest);
	u8 *dst = get_rloc_data(dest);
184
	long ret;
185 186 187 188 189 190 191 192

	if (!maxlen)
		return;

	/*
	 * Try to get string again, since the string can be changed while
	 * probing.
	 */
193
	ret = strncpy_from_unsafe(dst, addr, maxlen);
194 195

	if (ret < 0) {	/* Failed to fetch string */
196
		dst[0] = '\0';
197 198
		*(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
	} else {
199
		*(u32 *)dest = make_data_rloc(ret, get_rloc_offs(*(u32 *)dest));
200 201
	}
}
202
NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string));
203 204

/* Return the length of string -- including null terminal byte */
205 206
static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
						 void *addr, void *dest)
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
{
	mm_segment_t old_fs;
	int ret, len = 0;
	u8 c;

	old_fs = get_fs();
	set_fs(KERNEL_DS);
	pagefault_disable();

	do {
		ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
		len++;
	} while (c && ret == 0 && len < MAX_STRING_SIZE);

	pagefault_enable();
	set_fs(old_fs);

	if (ret < 0)	/* Failed to check the length */
		*(u32 *)dest = 0;
	else
		*(u32 *)dest = len;
}
229
NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string_size));
230

231
#define DEFINE_FETCH_symbol(type)					\
232
void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, void *data, void *dest)\
233 234 235 236 237 238
{									\
	struct symbol_cache *sc = data;					\
	if (sc->addr)							\
		fetch_memory_##type(regs, (void *)sc->addr, dest);	\
	else								\
		*(type *)dest = 0;					\
239 240 241
}									\
NOKPROBE_SYMBOL(FETCH_FUNC_NAME(symbol, type));

242 243 244 245
DEFINE_BASIC_FETCH_FUNCS(symbol)
DEFINE_FETCH_symbol(string)
DEFINE_FETCH_symbol(string_size)

246 247 248 249 250 251 252 253
/* kprobes don't support file_offset fetch methods */
#define fetch_file_offset_u8		NULL
#define fetch_file_offset_u16		NULL
#define fetch_file_offset_u32		NULL
#define fetch_file_offset_u64		NULL
#define fetch_file_offset_string	NULL
#define fetch_file_offset_string_size	NULL

254
/* Fetch type information table */
255
static const struct fetch_type kprobes_fetch_type_table[] = {
256 257 258 259 260 261 262 263 264 265 266 267 268 269
	/* Special types */
	[FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
					sizeof(u32), 1, "__data_loc char[]"),
	[FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
					string_size, sizeof(u32), 0, "u32"),
	/* Basic types */
	ASSIGN_FETCH_TYPE(u8,  u8,  0),
	ASSIGN_FETCH_TYPE(u16, u16, 0),
	ASSIGN_FETCH_TYPE(u32, u32, 0),
	ASSIGN_FETCH_TYPE(u64, u64, 0),
	ASSIGN_FETCH_TYPE(s8,  u8,  1),
	ASSIGN_FETCH_TYPE(s16, u16, 1),
	ASSIGN_FETCH_TYPE(s32, u32, 1),
	ASSIGN_FETCH_TYPE(s64, u64, 1),
270 271 272 273
	ASSIGN_FETCH_TYPE_ALIAS(x8,  u8,  u8,  0),
	ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0),
	ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0),
	ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0),
274 275 276 277

	ASSIGN_FETCH_TYPE_END
};

278 279 280
/*
 * Allocate new trace_probe and initialize it (including kprobes).
 */
281
static struct trace_kprobe *alloc_trace_kprobe(const char *group,
282
					     const char *event,
283 284 285
					     void *addr,
					     const char *symbol,
					     unsigned long offs,
286
					     int maxactive,
287
					     int nargs, bool is_return)
288
{
289
	struct trace_kprobe *tk;
290
	int ret = -ENOMEM;
291

292 293
	tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
	if (!tk)
294
		return ERR_PTR(ret);
295

296 297 298 299
	tk->nhit = alloc_percpu(unsigned long);
	if (!tk->nhit)
		goto error;

300
	if (symbol) {
301 302
		tk->symbol = kstrdup(symbol, GFP_KERNEL);
		if (!tk->symbol)
303
			goto error;
304 305
		tk->rp.kp.symbol_name = tk->symbol;
		tk->rp.kp.offset = offs;
306
	} else
307
		tk->rp.kp.addr = addr;
308 309

	if (is_return)
310
		tk->rp.handler = kretprobe_dispatcher;
311
	else
312
		tk->rp.kp.pre_handler = kprobe_dispatcher;
313

314 315
	tk->rp.maxactive = maxactive;

316
	if (!event || !is_good_name(event)) {
317
		ret = -EINVAL;
318
		goto error;
319 320
	}

321 322 323
	tk->tp.call.class = &tk->tp.class;
	tk->tp.call.name = kstrdup(event, GFP_KERNEL);
	if (!tk->tp.call.name)
324
		goto error;
325

326
	if (!group || !is_good_name(group)) {
327
		ret = -EINVAL;
328
		goto error;
329 330
	}

331 332
	tk->tp.class.system = kstrdup(group, GFP_KERNEL);
	if (!tk->tp.class.system)
333 334
		goto error;

335 336 337
	INIT_LIST_HEAD(&tk->list);
	INIT_LIST_HEAD(&tk->tp.files);
	return tk;
338
error:
339 340
	kfree(tk->tp.call.name);
	kfree(tk->symbol);
341
	free_percpu(tk->nhit);
342
	kfree(tk);
343
	return ERR_PTR(ret);
344 345
}

346
static void free_trace_kprobe(struct trace_kprobe *tk)
347 348 349
{
	int i;

350 351
	for (i = 0; i < tk->tp.nr_args; i++)
		traceprobe_free_probe_arg(&tk->tp.args[i]);
352

353 354 355
	kfree(tk->tp.call.class->system);
	kfree(tk->tp.call.name);
	kfree(tk->symbol);
356
	free_percpu(tk->nhit);
357
	kfree(tk);
358 359
}

360 361
static struct trace_kprobe *find_trace_kprobe(const char *event,
					      const char *group)
362
{
363
	struct trace_kprobe *tk;
364

365
	list_for_each_entry(tk, &probe_list, list)
366
		if (strcmp(trace_event_name(&tk->tp.call), event) == 0 &&
367 368
		    strcmp(tk->tp.call.class->system, group) == 0)
			return tk;
369 370 371
	return NULL;
}

372 373 374 375 376
/*
 * Enable trace_probe
 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
 */
static int
377
enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
378 379 380
{
	int ret = 0;

381
	if (file) {
382 383 384 385
		struct event_file_link *link;

		link = kmalloc(sizeof(*link), GFP_KERNEL);
		if (!link) {
386
			ret = -ENOMEM;
387
			goto out;
388 389
		}

390
		link->file = file;
391
		list_add_tail_rcu(&link->list, &tk->tp.files);
392

393
		tk->tp.flags |= TP_FLAG_TRACE;
394
	} else
395
		tk->tp.flags |= TP_FLAG_PROFILE;
396

397 398 399
	if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) {
		if (trace_kprobe_is_return(tk))
			ret = enable_kretprobe(&tk->rp);
400
		else
401
			ret = enable_kprobe(&tk->rp.kp);
402
	}
403
 out:
404 405 406
	return ret;
}

407 408 409 410 411
/*
 * Disable trace_probe
 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
 */
static int
412
disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
413
{
414 415
	struct event_file_link *link = NULL;
	int wait = 0;
416 417 418
	int ret = 0;

	if (file) {
419
		link = find_event_file_link(&tk->tp, file);
420
		if (!link) {
421
			ret = -EINVAL;
422
			goto out;
423 424
		}

425
		list_del_rcu(&link->list);
426
		wait = 1;
427
		if (!list_empty(&tk->tp.files))
428
			goto out;
429

430
		tk->tp.flags &= ~TP_FLAG_TRACE;
431
	} else
432
		tk->tp.flags &= ~TP_FLAG_PROFILE;
433

434 435 436
	if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) {
		if (trace_kprobe_is_return(tk))
			disable_kretprobe(&tk->rp);
437
		else
438
			disable_kprobe(&tk->rp.kp);
439
		wait = 1;
440
	}
441
 out:
442 443 444 445 446 447 448 449 450 451 452 453 454
	if (wait) {
		/*
		 * Synchronize with kprobe_trace_func/kretprobe_trace_func
		 * to ensure disabled (all running handlers are finished).
		 * This is not only for kfree(), but also the caller,
		 * trace_remove_event_call() supposes it for releasing
		 * event_call related objects, which will be accessed in
		 * the kprobe_trace_func/kretprobe_trace_func.
		 */
		synchronize_sched();
		kfree(link);	/* Ignored if link == NULL */
	}

455
	return ret;
456 457
}

458
/* Internal register function - just handle k*probes and flags */
459
static int __register_trace_kprobe(struct trace_kprobe *tk)
460
{
461
	int i, ret;
462

463
	if (trace_probe_is_registered(&tk->tp))
464 465
		return -EINVAL;

466 467
	for (i = 0; i < tk->tp.nr_args; i++)
		traceprobe_update_arg(&tk->tp.args[i]);
468

469
	/* Set/clear disabled flag according to tp->flag */
470 471
	if (trace_probe_is_enabled(&tk->tp))
		tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
472
	else
473
		tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
474

475 476
	if (trace_kprobe_is_return(tk))
		ret = register_kretprobe(&tk->rp);
477
	else
478
		ret = register_kprobe(&tk->rp.kp);
479 480

	if (ret == 0)
481
		tk->tp.flags |= TP_FLAG_REGISTERED;
482
	else {
483 484
		pr_warn("Could not insert probe at %s+%lu: %d\n",
			trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret);
485
		if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) {
486
			pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
487 488
			ret = 0;
		} else if (ret == -EILSEQ) {
489 490
			pr_warn("Probing address(0x%p) is not an instruction boundary.\n",
				tk->rp.kp.addr);
491 492 493 494 495 496 497 498
			ret = -EINVAL;
		}
	}

	return ret;
}

/* Internal unregister function - just handle k*probes and flags */
499
static void __unregister_trace_kprobe(struct trace_kprobe *tk)
500
{
501 502 503
	if (trace_probe_is_registered(&tk->tp)) {
		if (trace_kprobe_is_return(tk))
			unregister_kretprobe(&tk->rp);
504
		else
505 506
			unregister_kprobe(&tk->rp.kp);
		tk->tp.flags &= ~TP_FLAG_REGISTERED;
507
		/* Cleanup kprobe for reuse */
508 509
		if (tk->rp.kp.symbol_name)
			tk->rp.kp.addr = NULL;
510 511 512 513
	}
}

/* Unregister a trace_probe and probe_event: call with locking probe_lock */
514
static int unregister_trace_kprobe(struct trace_kprobe *tk)
515
{
516
	/* Enabled event can not be unregistered */
517
	if (trace_probe_is_enabled(&tk->tp))
518 519
		return -EBUSY;

520
	/* Will fail if probe is being used by ftrace or perf */
521
	if (unregister_kprobe_event(tk))
522 523
		return -EBUSY;

524 525
	__unregister_trace_kprobe(tk);
	list_del(&tk->list);
526 527

	return 0;
528 529 530
}

/* Register a trace_probe and probe_event */
531
static int register_trace_kprobe(struct trace_kprobe *tk)
532
{
533
	struct trace_kprobe *old_tk;
534 535 536 537
	int ret;

	mutex_lock(&probe_lock);

538
	/* Delete old (same name) event if exist */
539
	old_tk = find_trace_kprobe(trace_event_name(&tk->tp.call),
540
			tk->tp.call.class->system);
541 542
	if (old_tk) {
		ret = unregister_trace_kprobe(old_tk);
543 544
		if (ret < 0)
			goto end;
545
		free_trace_kprobe(old_tk);
546
	}
547 548

	/* Register new event */
549
	ret = register_kprobe_event(tk);
550
	if (ret) {
551
		pr_warn("Failed to register probe event(%d)\n", ret);
552 553 554
		goto end;
	}

555
	/* Register k*probe */
556
	ret = __register_trace_kprobe(tk);
557
	if (ret < 0)
558
		unregister_kprobe_event(tk);
559
	else
560
		list_add_tail(&tk->list, &probe_list);
561

562 563 564 565 566
end:
	mutex_unlock(&probe_lock);
	return ret;
}

567
/* Module notifier call back, checking event on the module */
568
static int trace_kprobe_module_callback(struct notifier_block *nb,
569 570 571
				       unsigned long val, void *data)
{
	struct module *mod = data;
572
	struct trace_kprobe *tk;
573 574 575 576 577 578 579
	int ret;

	if (val != MODULE_STATE_COMING)
		return NOTIFY_DONE;

	/* Update probes on coming module */
	mutex_lock(&probe_lock);
580 581
	list_for_each_entry(tk, &probe_list, list) {
		if (trace_kprobe_within_module(tk, mod)) {
582
			/* Don't need to check busy - this should have gone. */
583 584
			__unregister_trace_kprobe(tk);
			ret = __register_trace_kprobe(tk);
585
			if (ret)
586 587 588
				pr_warn("Failed to re-register probe %s on %s: %d\n",
					trace_event_name(&tk->tp.call),
					mod->name, ret);
589 590 591 592 593 594 595
		}
	}
	mutex_unlock(&probe_lock);

	return NOTIFY_DONE;
}

596 597
static struct notifier_block trace_kprobe_module_nb = {
	.notifier_call = trace_kprobe_module_callback,
598 599 600
	.priority = 1	/* Invoked after kprobe module callback */
};

601 602 603 604 605 606 607 608
/* Convert certain expected symbols into '_' when generating event names */
static inline void sanitize_event_name(char *name)
{
	while (*name++ != '\0')
		if (*name == ':' || *name == '.')
			*name = '_';
}

609
static int create_trace_kprobe(int argc, char **argv)
610 611 612
{
	/*
	 * Argument syntax:
613 614 615 616
	 *  - Add kprobe:
	 *      p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
	 *  - Add kretprobe:
	 *      r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
617
	 * Fetch args:
618 619 620
	 *  $retval	: fetch return value
	 *  $stack	: fetch stack address
	 *  $stackN	: fetch Nth of stack (N:0-)
621
	 *  $comm       : fetch current task comm
622 623 624
	 *  @ADDR	: fetch memory at ADDR (ADDR should be in kernel)
	 *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
	 *  %REG	: fetch register REG
625
	 * Dereferencing memory fetch:
626
	 *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
627 628
	 * Alias name of args:
	 *  NAME=FETCHARG : set NAME as alias of FETCHARG.
629 630
	 * Type of args:
	 *  FETCHARG:TYPE : use TYPE instead of unsigned long.
631
	 */
632
	struct trace_kprobe *tk;
633
	int i, ret = 0;
634
	bool is_return = false, is_delete = false;
635
	char *symbol = NULL, *event = NULL, *group = NULL;
636
	int maxactive = 0;
637
	char *arg;
638
	unsigned long offset = 0;
639
	void *addr = NULL;
640
	char buf[MAX_EVENT_NAME_LEN];
641

642
	/* argc must be >= 1 */
643
	if (argv[0][0] == 'p')
644
		is_return = false;
645
	else if (argv[0][0] == 'r')
646
		is_return = true;
647
	else if (argv[0][0] == '-')
648
		is_delete = true;
649
	else {
650 651
		pr_info("Probe definition must be started with 'p', 'r' or"
			" '-'.\n");
652
		return -EINVAL;
653
	}
654

655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
	event = strchr(&argv[0][1], ':');
	if (event) {
		event[0] = '\0';
		event++;
	}
	if (is_return && isdigit(argv[0][1])) {
		ret = kstrtouint(&argv[0][1], 0, &maxactive);
		if (ret) {
			pr_info("Failed to parse maxactive.\n");
			return ret;
		}
		/* kretprobes instances are iterated over via a list. The
		 * maximum should stay reasonable.
		 */
		if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
			pr_info("Maxactive is too big (%d > %d).\n",
				maxactive, KRETPROBE_MAXACTIVE_MAX);
			return -E2BIG;
		}
	}

	if (event) {
677 678 679 680 681
		if (strchr(event, '/')) {
			group = event;
			event = strchr(group, '/') + 1;
			event[-1] = '\0';
			if (strlen(group) == 0) {
682
				pr_info("Group name is not specified\n");
683 684 685
				return -EINVAL;
			}
		}
686
		if (strlen(event) == 0) {
687
			pr_info("Event name is not specified\n");
688 689 690
			return -EINVAL;
		}
	}
691 692
	if (!group)
		group = KPROBE_EVENT_SYSTEM;
693

694 695 696 697 698
	if (is_delete) {
		if (!event) {
			pr_info("Delete command needs an event name.\n");
			return -EINVAL;
		}
699
		mutex_lock(&probe_lock);
700 701
		tk = find_trace_kprobe(event, group);
		if (!tk) {
702
			mutex_unlock(&probe_lock);
703 704 705 706
			pr_info("Event %s/%s doesn't exist.\n", group, event);
			return -ENOENT;
		}
		/* delete an event */
707
		ret = unregister_trace_kprobe(tk);
708
		if (ret == 0)
709
			free_trace_kprobe(tk);
710
		mutex_unlock(&probe_lock);
711
		return ret;
712 713 714 715 716 717
	}

	if (argc < 2) {
		pr_info("Probe point is not specified.\n");
		return -EINVAL;
	}
718 719 720 721

	/* try to parse an address. if that fails, try to read the
	 * input as a symbol. */
	if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
722 723 724
		/* a symbol specified */
		symbol = argv[1];
		/* TODO: support .init module functions */
725
		ret = traceprobe_split_symbol_offset(symbol, &offset);
726
		if (ret) {
727
			pr_info("Failed to parse either an address or a symbol.\n");
728
			return ret;
729
		}
730
		if (offset && is_return &&
731
		    !kprobe_on_func_entry(NULL, symbol, offset)) {
732
			pr_info("Given offset is not valid for return probe.\n");
733
			return -EINVAL;
734
		}
735
	}
736
	argc -= 2; argv += 2;
737 738

	/* setup a probe */
739 740 741
	if (!event) {
		/* Make a new event name */
		if (symbol)
742
			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
743 744
				 is_return ? 'r' : 'p', symbol, offset);
		else
745
			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
746
				 is_return ? 'r' : 'p', addr);
747
		sanitize_event_name(buf);
748 749
		event = buf;
	}
750 751
	tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
			       argc, is_return);
752
	if (IS_ERR(tk)) {
753
		pr_info("Failed to allocate trace_probe.(%d)\n",
754 755
			(int)PTR_ERR(tk));
		return PTR_ERR(tk);
756
	}
757 758

	/* parse arguments */
759 760
	ret = 0;
	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
761 762
		struct probe_arg *parg = &tk->tp.args[i];

763
		/* Increment count for freeing args in error case */
764
		tk->tp.nr_args++;
765

766 767
		/* Parse argument name */
		arg = strchr(argv[i], '=');
768
		if (arg) {
769
			*arg++ = '\0';
770
			parg->name = kstrdup(argv[i], GFP_KERNEL);
771
		} else {
772
			arg = argv[i];
773 774
			/* If argument name is omitted, set "argN" */
			snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
775
			parg->name = kstrdup(buf, GFP_KERNEL);
776
		}
777

778
		if (!parg->name) {
779
			pr_info("Failed to allocate argument[%d] name.\n", i);
780
			ret = -ENOMEM;
781 782
			goto error;
		}
783

784
		if (!is_good_name(parg->name)) {
785
			pr_info("Invalid argument[%d] name: %s\n",
786
				i, parg->name);
787 788 789
			ret = -EINVAL;
			goto error;
		}
790

791 792
		if (traceprobe_conflict_field_name(parg->name,
							tk->tp.args, i)) {
793
			pr_info("Argument[%d] name '%s' conflicts with "
794 795 796 797
				"another field.\n", i, argv[i]);
			ret = -EINVAL;
			goto error;
		}
798 799

		/* Parse fetch argument */
800
		ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg,
801 802
						is_return, true,
						kprobes_fetch_type_table);
803
		if (ret) {
804
			pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
805
			goto error;
806
		}
807 808
	}

809
	ret = register_trace_kprobe(tk);
810 811 812 813 814
	if (ret)
		goto error;
	return 0;

error:
815
	free_trace_kprobe(tk);
816 817 818
	return ret;
}

819
static int release_all_trace_kprobes(void)
820
{
821
	struct trace_kprobe *tk;
822
	int ret = 0;
823 824

	mutex_lock(&probe_lock);
825
	/* Ensure no probe is in use. */
826 827
	list_for_each_entry(tk, &probe_list, list)
		if (trace_probe_is_enabled(&tk->tp)) {
828 829 830
			ret = -EBUSY;
			goto end;
		}
831 832
	/* TODO: Use batch unregistration */
	while (!list_empty(&probe_list)) {
833 834
		tk = list_entry(probe_list.next, struct trace_kprobe, list);
		ret = unregister_trace_kprobe(tk);
835 836
		if (ret)
			goto end;
837
		free_trace_kprobe(tk);
838
	}
839 840

end:
841
	mutex_unlock(&probe_lock);
842 843

	return ret;
844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864
}

/* Probes listing interfaces */
static void *probes_seq_start(struct seq_file *m, loff_t *pos)
{
	mutex_lock(&probe_lock);
	return seq_list_start(&probe_list, *pos);
}

static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
{
	return seq_list_next(v, &probe_list, pos);
}

static void probes_seq_stop(struct seq_file *m, void *v)
{
	mutex_unlock(&probe_lock);
}

static int probes_seq_show(struct seq_file *m, void *v)
{
865
	struct trace_kprobe *tk = v;
866
	int i;
867

868
	seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
869
	seq_printf(m, ":%s/%s", tk->tp.call.class->system,
870
			trace_event_name(&tk->tp.call));
871

872 873 874 875 876
	if (!tk->symbol)
		seq_printf(m, " 0x%p", tk->rp.kp.addr);
	else if (tk->rp.kp.offset)
		seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
			   tk->rp.kp.offset);
877
	else
878
		seq_printf(m, " %s", trace_kprobe_symbol(tk));
879

880 881
	for (i = 0; i < tk->tp.nr_args; i++)
		seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
882
	seq_putc(m, '\n');
883

884 885 886 887 888 889 890 891 892 893 894 895
	return 0;
}

static const struct seq_operations probes_seq_op = {
	.start  = probes_seq_start,
	.next   = probes_seq_next,
	.stop   = probes_seq_stop,
	.show   = probes_seq_show
};

static int probes_open(struct inode *inode, struct file *file)
{
896 897 898
	int ret;

	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
899
		ret = release_all_trace_kprobes();
900 901 902
		if (ret < 0)
			return ret;
	}
903 904 905 906 907 908 909

	return seq_open(file, &probes_seq_op);
}

static ssize_t probes_write(struct file *file, const char __user *buffer,
			    size_t count, loff_t *ppos)
{
910
	return traceprobe_probes_write(file, buffer, count, ppos,
911
			create_trace_kprobe);
912 913 914 915 916 917 918 919 920 921 922
}

static const struct file_operations kprobe_events_ops = {
	.owner          = THIS_MODULE,
	.open           = probes_open,
	.read           = seq_read,
	.llseek         = seq_lseek,
	.release        = seq_release,
	.write		= probes_write,
};

923 924 925
/* Probes profiling interfaces */
static int probes_profile_seq_show(struct seq_file *m, void *v)
{
926
	struct trace_kprobe *tk = v;
927

928
	seq_printf(m, "  %-44s %15lu %15lu\n",
929 930
		   trace_event_name(&tk->tp.call),
		   trace_kprobe_nhit(tk),
931
		   tk->rp.kp.nmissed);
932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955

	return 0;
}

static const struct seq_operations profile_seq_op = {
	.start  = probes_seq_start,
	.next   = probes_seq_next,
	.stop   = probes_seq_stop,
	.show   = probes_profile_seq_show
};

static int profile_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &profile_seq_op);
}

static const struct file_operations kprobe_profile_ops = {
	.owner          = THIS_MODULE,
	.open           = profile_open,
	.read           = seq_read,
	.llseek         = seq_lseek,
	.release        = seq_release,
};

956
/* Kprobe handler */
957
static nokprobe_inline void
958
__kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
959
		    struct trace_event_file *trace_file)
960
{
961
	struct kprobe_trace_entry_head *entry;
962
	struct ring_buffer_event *event;
963
	struct ring_buffer *buffer;
964
	int size, dsize, pc;
965
	unsigned long irq_flags;
966
	struct trace_event_call *call = &tk->tp.call;
967

968
	WARN_ON(call != trace_file->event_call);
969

970
	if (trace_trigger_soft_disabled(trace_file))
971
		return;
972

973 974 975
	local_save_flags(irq_flags);
	pc = preempt_count();

976 977
	dsize = __get_data_size(&tk->tp, regs);
	size = sizeof(*entry) + tk->tp.size + dsize;
978

979
	event = trace_event_buffer_lock_reserve(&buffer, trace_file,
980 981
						call->event.type,
						size, irq_flags, pc);
982
	if (!event)
983
		return;
984 985

	entry = ring_buffer_event_data(event);
986 987
	entry->ip = (unsigned long)tk->rp.kp.addr;
	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
988

989
	event_trigger_unlock_commit_regs(trace_file, buffer, event,
990
					 entry, irq_flags, pc, regs);
991 992
}

993
static void
994
kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
995
{
996
	struct event_file_link *link;
997

998 999
	list_for_each_entry_rcu(link, &tk->tp.files, list)
		__kprobe_trace_func(tk, regs, link->file);
1000
}
1001
NOKPROBE_SYMBOL(kprobe_trace_func);
1002

1003
/* Kretprobe handler */
1004
static nokprobe_inline void
1005
__kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1006
		       struct pt_regs *regs,
1007
		       struct trace_event_file *trace_file)
1008
{
1009
	struct kretprobe_trace_entry_head *entry;
1010
	struct ring_buffer_event *event;
1011
	struct ring_buffer *buffer;
1012
	int size, pc, dsize;
1013
	unsigned long irq_flags;
1014
	struct trace_event_call *call = &tk->tp.call;
1015

1016
	WARN_ON(call != trace_file->event_call);
1017

1018
	if (trace_trigger_soft_disabled(trace_file))
1019
		return;
1020

1021 1022 1023
	local_save_flags(irq_flags);
	pc = preempt_count();

1024 1025
	dsize = __get_data_size(&tk->tp, regs);
	size = sizeof(*entry) + tk->tp.size + dsize;
1026

1027
	event = trace_event_buffer_lock_reserve(&buffer, trace_file,
1028 1029
						call->event.type,
						size, irq_flags, pc);
1030
	if (!event)
1031
		return;
1032 1033

	entry = ring_buffer_event_data(event);
1034
	entry->func = (unsigned long)tk->rp.kp.addr;
1035
	entry->ret_ip = (unsigned long)ri->ret_addr;
1036
	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1037

1038
	event_trigger_unlock_commit_regs(trace_file, buffer, event,
1039
					 entry, irq_flags, pc, regs);
1040 1041
}

1042
static void
1043
kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1044 1045
		     struct pt_regs *regs)
{
1046
	struct event_file_link *link;
1047

1048 1049
	list_for_each_entry_rcu(link, &tk->tp.files, list)
		__kretprobe_trace_func(tk, ri, regs, link->file);
1050
}
1051
NOKPROBE_SYMBOL(kretprobe_trace_func);
1052

1053
/* Event entry printers */
1054
static enum print_line_t
1055 1056
print_kprobe_event(struct trace_iterator *iter, int flags,
		   struct trace_event *event)
1057
{
1058
	struct kprobe_trace_entry_head *field;
1059
	struct trace_seq *s = &iter->seq;
1060
	struct trace_probe *tp;
1061
	u8 *data;
1062 1063
	int i;

1064
	field = (struct kprobe_trace_entry_head *)iter->ent;
1065
	tp = container_of(event, struct trace_probe, call.event);
1066

1067
	trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1068

1069
	if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1070
		goto out;
1071

1072
	trace_seq_putc(s, ')');
1073

1074 1075 1076
	data = (u8 *)&field[1];
	for (i = 0; i < tp->nr_args; i++)
		if (!tp->args[i].type->print(s, tp->args[i].name,
1077
					     data + tp->args[i].offset, field))
1078
			goto out;
1079

1080 1081 1082
	trace_seq_putc(s, '\n');
 out:
	return trace_handle_return(s);
1083 1084
}

1085
static enum print_line_t
1086 1087
print_kretprobe_event(struct trace_iterator *iter, int flags,
		      struct trace_event *event)
1088
{
1089
	struct kretprobe_trace_entry_head *field;
1090
	struct trace_seq *s = &iter->seq;
1091
	struct trace_probe *tp;
1092
	u8 *data;
1093 1094
	int i;

1095
	field = (struct kretprobe_trace_entry_head *)iter->ent;
1096
	tp = container_of(event, struct trace_probe, call.event);
1097

1098
	trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1099

1100
	if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1101
		goto out;
1102

1103
	trace_seq_puts(s, " <- ");
1104 1105

	if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1106
		goto out;
1107

1108
	trace_seq_putc(s, ')');
1109

1110 1111 1112
	data = (u8 *)&field[1];
	for (i = 0; i < tp->nr_args; i++)
		if (!tp->args[i].type->print(s, tp->args[i].name,
1113
					     data + tp->args[i].offset, field))
1114
			goto out;
1115

1116
	trace_seq_putc(s, '\n');
1117

1118 1119
 out:
	return trace_handle_return(s);
1120 1121 1122
}


1123
static int kprobe_event_define_fields(struct trace_event_call *event_call)
1124 1125
{
	int ret, i;
1126
	struct kprobe_trace_entry_head field;
1127
	struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1128

1129
	DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1130
	/* Set argument names as fields */
1131 1132 1133 1134 1135 1136 1137 1138
	for (i = 0; i < tk->tp.nr_args; i++) {
		struct probe_arg *parg = &tk->tp.args[i];

		ret = trace_define_field(event_call, parg->type->fmttype,
					 parg->name,
					 sizeof(field) + parg->offset,
					 parg->type->size,
					 parg->type->is_signed,
1139 1140 1141 1142
					 FILTER_OTHER);
		if (ret)
			return ret;
	}
1143 1144 1145
	return 0;
}

1146
static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1147 1148
{
	int ret, i;
1149
	struct kretprobe_trace_entry_head field;
1150
	struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1151

1152 1153
	DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
	DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1154
	/* Set argument names as fields */
1155 1156 1157 1158 1159 1160 1161 1162
	for (i = 0; i < tk->tp.nr_args; i++) {
		struct probe_arg *parg = &tk->tp.args[i];

		ret = trace_define_field(event_call, parg->type->fmttype,
					 parg->name,
					 sizeof(field) + parg->offset,
					 parg->type->size,
					 parg->type->is_signed,
1163 1164 1165 1166
					 FILTER_OTHER);
		if (ret)
			return ret;
	}
1167 1168 1169
	return 0;
}

1170
#ifdef CONFIG_PERF_EVENTS
1171 1172

/* Kprobe profile handler */
1173
static void
1174
kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1175
{
1176
	struct trace_event_call *call = &tk->tp.call;
1177
	struct bpf_prog *prog = call->prog;
1178
	struct kprobe_trace_entry_head *entry;
1179
	struct hlist_head *head;
1180
	int size, __size, dsize;
1181
	int rctx;
1182

1183 1184 1185
	if (prog && !trace_call_bpf(prog, regs))
		return;

1186 1187 1188 1189
	head = this_cpu_ptr(call->perf_events);
	if (hlist_empty(head))
		return;

1190 1191
	dsize = __get_data_size(&tk->tp, regs);
	__size = sizeof(*entry) + tk->tp.size + dsize;
1192 1193
	size = ALIGN(__size + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);
1194

1195
	entry = perf_trace_buf_alloc(size, NULL, &rctx);
1196
	if (!entry)
1197
		return;
1198

1199
	entry->ip = (unsigned long)tk->rp.kp.addr;
1200
	memset(&entry[1], 0, dsize);
1201
	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1202
	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1203
			      head, NULL, NULL);
1204
}
1205
NOKPROBE_SYMBOL(kprobe_perf_func);
1206 1207

/* Kretprobe profile handler */
1208
static void
1209
kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1210
		    struct pt_regs *regs)
1211
{
1212
	struct trace_event_call *call = &tk->tp.call;
1213
	struct bpf_prog *prog = call->prog;
1214
	struct kretprobe_trace_entry_head *entry;
1215
	struct hlist_head *head;
1216
	int size, __size, dsize;
1217
	int rctx;
1218

1219 1220 1221
	if (prog && !trace_call_bpf(prog, regs))
		return;

1222 1223 1224 1225
	head = this_cpu_ptr(call->perf_events);
	if (hlist_empty(head))
		return;

1226 1227
	dsize = __get_data_size(&tk->tp, regs);
	__size = sizeof(*entry) + tk->tp.size + dsize;
1228 1229
	size = ALIGN(__size + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);
1230

1231
	entry = perf_trace_buf_alloc(size, NULL, &rctx);
1232
	if (!entry)
1233
		return;
1234

1235
	entry->func = (unsigned long)tk->rp.kp.addr;
1236
	entry->ret_ip = (unsigned long)ri->ret_addr;
1237
	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1238
	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1239
			      head, NULL, NULL);
1240
}
1241
NOKPROBE_SYMBOL(kretprobe_perf_func);
1242
#endif	/* CONFIG_PERF_EVENTS */
1243

1244 1245 1246 1247 1248 1249
/*
 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
 *
 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
 * lockless, but we can't race with this __init function.
 */
1250
static int kprobe_register(struct trace_event_call *event,
1251
			   enum trace_reg type, void *data)
1252
{
1253
	struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
1254
	struct trace_event_file *file = data;
1255

1256 1257
	switch (type) {
	case TRACE_REG_REGISTER:
1258
		return enable_trace_kprobe(tk, file);
1259
	case TRACE_REG_UNREGISTER:
1260
		return disable_trace_kprobe(tk, file);
1261 1262 1263

#ifdef CONFIG_PERF_EVENTS
	case TRACE_REG_PERF_REGISTER:
1264
		return enable_trace_kprobe(tk, NULL);
1265
	case TRACE_REG_PERF_UNREGISTER:
1266
		return disable_trace_kprobe(tk, NULL);
1267 1268
	case TRACE_REG_PERF_OPEN:
	case TRACE_REG_PERF_CLOSE:
1269 1270
	case TRACE_REG_PERF_ADD:
	case TRACE_REG_PERF_DEL:
1271
		return 0;
1272 1273 1274 1275
#endif
	}
	return 0;
}
1276

1277
static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1278
{
1279
	struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1280

1281
	raw_cpu_inc(*tk->nhit);
1282

1283 1284
	if (tk->tp.flags & TP_FLAG_TRACE)
		kprobe_trace_func(tk, regs);
1285
#ifdef CONFIG_PERF_EVENTS
1286 1287
	if (tk->tp.flags & TP_FLAG_PROFILE)
		kprobe_perf_func(tk, regs);
1288
#endif
1289 1290
	return 0;	/* We don't tweek kernel, so just return 0 */
}
1291
NOKPROBE_SYMBOL(kprobe_dispatcher);
1292

1293 1294
static int
kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1295
{
1296
	struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1297

1298
	raw_cpu_inc(*tk->nhit);
1299

1300 1301
	if (tk->tp.flags & TP_FLAG_TRACE)
		kretprobe_trace_func(tk, ri, regs);
1302
#ifdef CONFIG_PERF_EVENTS
1303 1304
	if (tk->tp.flags & TP_FLAG_PROFILE)
		kretprobe_perf_func(tk, ri, regs);
1305
#endif
1306 1307
	return 0;	/* We don't tweek kernel, so just return 0 */
}
1308
NOKPROBE_SYMBOL(kretprobe_dispatcher);
1309

1310 1311 1312 1313 1314 1315 1316 1317
static struct trace_event_functions kretprobe_funcs = {
	.trace		= print_kretprobe_event
};

static struct trace_event_functions kprobe_funcs = {
	.trace		= print_kprobe_event
};

1318
static int register_kprobe_event(struct trace_kprobe *tk)
1319
{
1320
	struct trace_event_call *call = &tk->tp.call;
1321 1322
	int ret;

1323
	/* Initialize trace_event_call */
1324
	INIT_LIST_HEAD(&call->class->fields);
1325
	if (trace_kprobe_is_return(tk)) {
1326
		call->event.funcs = &kretprobe_funcs;
1327
		call->class->define_fields = kretprobe_event_define_fields;
1328
	} else {
1329
		call->event.funcs = &kprobe_funcs;
1330
		call->class->define_fields = kprobe_event_define_fields;
1331
	}
1332
	if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0)
1333
		return -ENOMEM;
1334
	ret = register_trace_event(&call->event);
1335
	if (!ret) {
1336
		kfree(call->print_fmt);
1337
		return -ENODEV;
1338
	}
A
Alexei Starovoitov 已提交
1339
	call->flags = TRACE_EVENT_FL_KPROBE;
1340
	call->class->reg = kprobe_register;
1341
	call->data = tk;
1342
	ret = trace_add_event_call(call);
1343
	if (ret) {
1344
		pr_info("Failed to register kprobe event: %s\n",
1345
			trace_event_name(call));
1346
		kfree(call->print_fmt);
1347
		unregister_trace_event(&call->event);
1348
	}
1349 1350 1351
	return ret;
}

1352
static int unregister_kprobe_event(struct trace_kprobe *tk)
1353
{
1354 1355
	int ret;

1356
	/* tp->event is unregistered in trace_remove_event_call() */
1357
	ret = trace_remove_event_call(&tk->tp.call);
1358
	if (!ret)
1359
		kfree(tk->tp.call.print_fmt);
1360
	return ret;
1361 1362
}

1363
/* Make a tracefs interface for controlling probe points */
1364 1365 1366 1367 1368
static __init int init_kprobe_trace(void)
{
	struct dentry *d_tracer;
	struct dentry *entry;

1369
	if (register_module_notifier(&trace_kprobe_module_nb))
1370 1371
		return -EINVAL;

1372
	d_tracer = tracing_init_dentry();
1373
	if (IS_ERR(d_tracer))
1374 1375
		return 0;

1376
	entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
1377 1378
				    NULL, &kprobe_events_ops);

1379
	/* Event list interface */
1380
	if (!entry)
1381
		pr_warn("Could not create tracefs 'kprobe_events' entry\n");
1382 1383

	/* Profile interface */
1384
	entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
1385 1386 1387
				    NULL, &kprobe_profile_ops);

	if (!entry)
1388
		pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
1389 1390 1391 1392 1393 1394
	return 0;
}
fs_initcall(init_kprobe_trace);


#ifdef CONFIG_FTRACE_STARTUP_TEST
1395 1396
/*
 * The "__used" keeps gcc from removing the function symbol
1397 1398
 * from the kallsyms table. 'noinline' makes sure that there
 * isn't an inlined version used by the test method below
1399
 */
1400 1401
static __used __init noinline int
kprobe_trace_selftest_target(int a1, int a2, int a3, int a4, int a5, int a6)
1402 1403 1404 1405
{
	return a1 + a2 + a3 + a4 + a5 + a6;
}

1406
static __init struct trace_event_file *
1407
find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1408
{
1409
	struct trace_event_file *file;
1410 1411

	list_for_each_entry(file, &tr->events, list)
1412
		if (file->event_call == &tk->tp.call)
1413 1414 1415 1416 1417
			return file;

	return NULL;
}

1418
/*
1419
 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1420 1421
 * stage, we can do this lockless.
 */
1422 1423
static __init int kprobe_trace_self_tests_init(void)
{
1424
	int ret, warn = 0;
1425
	int (*target)(int, int, int, int, int, int);
1426
	struct trace_kprobe *tk;
1427
	struct trace_event_file *file;
1428

1429 1430 1431
	if (tracing_is_disabled())
		return -ENODEV;

1432 1433 1434 1435
	target = kprobe_trace_selftest_target;

	pr_info("Testing kprobe tracing: ");

1436 1437
	ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
				  "$stack $stack0 +0($stack)",
1438
				  create_trace_kprobe);
1439
	if (WARN_ON_ONCE(ret)) {
1440
		pr_warn("error on probing function entry.\n");
1441 1442 1443
		warn++;
	} else {
		/* Enable trace point */
1444 1445
		tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
		if (WARN_ON_ONCE(tk == NULL)) {
1446
			pr_warn("error on getting new probe.\n");
1447
			warn++;
1448
		} else {
1449
			file = find_trace_probe_file(tk, top_trace_array());
1450 1451 1452 1453
			if (WARN_ON_ONCE(file == NULL)) {
				pr_warn("error on getting probe file.\n");
				warn++;
			} else
1454
				enable_trace_kprobe(tk, file);
1455
		}
1456
	}
1457

1458
	ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
1459
				  "$retval", create_trace_kprobe);
1460
	if (WARN_ON_ONCE(ret)) {
1461
		pr_warn("error on probing function return.\n");
1462 1463 1464
		warn++;
	} else {
		/* Enable trace point */
1465 1466
		tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
		if (WARN_ON_ONCE(tk == NULL)) {
1467
			pr_warn("error on getting 2nd new probe.\n");
1468
			warn++;
1469
		} else {
1470
			file = find_trace_probe_file(tk, top_trace_array());
1471 1472 1473 1474
			if (WARN_ON_ONCE(file == NULL)) {
				pr_warn("error on getting probe file.\n");
				warn++;
			} else
1475
				enable_trace_kprobe(tk, file);
1476
		}
1477 1478 1479 1480
	}

	if (warn)
		goto end;
1481 1482 1483

	ret = target(1, 2, 3, 4, 5, 6);

1484 1485 1486 1487 1488 1489 1490 1491
	/*
	 * Not expecting an error here, the check is only to prevent the
	 * optimizer from removing the call to target() as otherwise there
	 * are no side-effects and the call is never performed.
	 */
	if (ret != 21)
		warn++;

1492
	/* Disable trace points before removing it */
1493 1494
	tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
	if (WARN_ON_ONCE(tk == NULL)) {
1495
		pr_warn("error on getting test probe.\n");
1496
		warn++;
1497
	} else {
1498 1499 1500 1501 1502
		if (trace_kprobe_nhit(tk) != 1) {
			pr_warn("incorrect number of testprobe hits\n");
			warn++;
		}

1503
		file = find_trace_probe_file(tk, top_trace_array());
1504 1505 1506 1507
		if (WARN_ON_ONCE(file == NULL)) {
			pr_warn("error on getting probe file.\n");
			warn++;
		} else
1508
			disable_trace_kprobe(tk, file);
1509
	}
1510

1511 1512
	tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
	if (WARN_ON_ONCE(tk == NULL)) {
1513
		pr_warn("error on getting 2nd test probe.\n");
1514
		warn++;
1515
	} else {
1516 1517 1518 1519 1520
		if (trace_kprobe_nhit(tk) != 1) {
			pr_warn("incorrect number of testprobe2 hits\n");
			warn++;
		}

1521
		file = find_trace_probe_file(tk, top_trace_array());
1522 1523 1524 1525
		if (WARN_ON_ONCE(file == NULL)) {
			pr_warn("error on getting probe file.\n");
			warn++;
		} else
1526
			disable_trace_kprobe(tk, file);
1527
	}
1528

1529
	ret = traceprobe_command("-:testprobe", create_trace_kprobe);
1530
	if (WARN_ON_ONCE(ret)) {
1531
		pr_warn("error on deleting a probe.\n");
1532 1533 1534
		warn++;
	}

1535
	ret = traceprobe_command("-:testprobe2", create_trace_kprobe);
1536
	if (WARN_ON_ONCE(ret)) {
1537
		pr_warn("error on deleting a probe.\n");
1538 1539
		warn++;
	}
1540

1541
end:
1542
	release_all_trace_kprobes();
1543 1544 1545 1546 1547
	/*
	 * Wait for the optimizer work to finish. Otherwise it might fiddle
	 * with probes in already freed __init text.
	 */
	wait_for_kprobe_optimizer();
1548 1549 1550 1551
	if (warn)
		pr_cont("NG: Some tests are failed. Please check them.\n");
	else
		pr_cont("OK\n");
1552 1553 1554 1555 1556 1557
	return 0;
}

late_initcall(kprobe_trace_self_tests_init);

#endif