trace_kprobe.c 31.1 KB
Newer Older
1
/*
2
 * Kprobes-based tracing events
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *
 * Created by Masami Hiramatsu <mhiramat@redhat.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */

#include <linux/module.h>
#include <linux/uaccess.h>

23
#include "trace_probe.h"
24

25
#define KPROBE_EVENT_SYSTEM "kprobes"
26

27
/**
28
 * Kprobe event core functions
29 30 31 32
 */

struct trace_probe {
	struct list_head	list;
33
	struct kretprobe	rp;	/* Use rp.kp for kprobe use */
34
	unsigned long 		nhit;
35
	unsigned int		flags;	/* For TP_FLAG_* */
36
	const char		*symbol;	/* symbol name */
37
	struct ftrace_event_class	class;
38
	struct ftrace_event_call	call;
39
	ssize_t			size;		/* trace entry size */
40
	unsigned int		nr_args;
41
	struct probe_arg	args[];
42 43
};

44 45
#define SIZEOF_TRACE_PROBE(n)			\
	(offsetof(struct trace_probe, args) +	\
46
	(sizeof(struct probe_arg) * (n)))
47

48

49
static __kprobes bool trace_probe_is_return(struct trace_probe *tp)
50
{
51
	return tp->rp.handler != NULL;
52 53
}

54
static __kprobes const char *trace_probe_symbol(struct trace_probe *tp)
55 56 57 58
{
	return tp->symbol ? tp->symbol : "unknown";
}

59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
static __kprobes unsigned long trace_probe_offset(struct trace_probe *tp)
{
	return tp->rp.kp.offset;
}

static __kprobes bool trace_probe_is_enabled(struct trace_probe *tp)
{
	return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE));
}

static __kprobes bool trace_probe_is_registered(struct trace_probe *tp)
{
	return !!(tp->flags & TP_FLAG_REGISTERED);
}

static __kprobes bool trace_probe_has_gone(struct trace_probe *tp)
{
	return !!(kprobe_gone(&tp->rp.kp));
}

static __kprobes bool trace_probe_within_module(struct trace_probe *tp,
						struct module *mod)
{
	int len = strlen(mod->name);
	const char *name = trace_probe_symbol(tp);
	return strncmp(mod->name, name, len) == 0 && name[len] == ':';
}

static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
{
	return !!strchr(trace_probe_symbol(tp), ':');
}

92 93 94 95 96 97
static int register_probe_event(struct trace_probe *tp);
static void unregister_probe_event(struct trace_probe *tp);

static DEFINE_MUTEX(probe_lock);
static LIST_HEAD(probe_list);

98 99 100 101
static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
static int kretprobe_dispatcher(struct kretprobe_instance *ri,
				struct pt_regs *regs);

102 103 104
/*
 * Allocate new trace_probe and initialize it (including kprobes).
 */
105 106
static struct trace_probe *alloc_trace_probe(const char *group,
					     const char *event,
107 108 109
					     void *addr,
					     const char *symbol,
					     unsigned long offs,
110
					     int nargs, bool is_return)
111 112
{
	struct trace_probe *tp;
113
	int ret = -ENOMEM;
114

115
	tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL);
116
	if (!tp)
117
		return ERR_PTR(ret);
118 119 120 121 122

	if (symbol) {
		tp->symbol = kstrdup(symbol, GFP_KERNEL);
		if (!tp->symbol)
			goto error;
123 124 125 126 127 128
		tp->rp.kp.symbol_name = tp->symbol;
		tp->rp.kp.offset = offs;
	} else
		tp->rp.kp.addr = addr;

	if (is_return)
129
		tp->rp.handler = kretprobe_dispatcher;
130
	else
131
		tp->rp.kp.pre_handler = kprobe_dispatcher;
132

133
	if (!event || !is_good_name(event)) {
134
		ret = -EINVAL;
135
		goto error;
136 137
	}

138
	tp->call.class = &tp->class;
139 140 141
	tp->call.name = kstrdup(event, GFP_KERNEL);
	if (!tp->call.name)
		goto error;
142

143
	if (!group || !is_good_name(group)) {
144
		ret = -EINVAL;
145
		goto error;
146 147
	}

148 149
	tp->class.system = kstrdup(group, GFP_KERNEL);
	if (!tp->class.system)
150 151
		goto error;

152 153 154
	INIT_LIST_HEAD(&tp->list);
	return tp;
error:
155
	kfree(tp->call.name);
156 157
	kfree(tp->symbol);
	kfree(tp);
158
	return ERR_PTR(ret);
159 160 161 162 163 164 165
}

static void free_trace_probe(struct trace_probe *tp)
{
	int i;

	for (i = 0; i < tp->nr_args; i++)
166
		traceprobe_free_probe_arg(&tp->args[i]);
167

168
	kfree(tp->call.class->system);
169 170 171 172 173
	kfree(tp->call.name);
	kfree(tp->symbol);
	kfree(tp);
}

174
static struct trace_probe *find_trace_probe(const char *event,
175
					    const char *group)
176 177 178 179
{
	struct trace_probe *tp;

	list_for_each_entry(tp, &probe_list, list)
180
		if (strcmp(tp->call.name, event) == 0 &&
181
		    strcmp(tp->call.class->system, group) == 0)
182 183 184 185
			return tp;
	return NULL;
}

186 187 188 189 190 191
/* Enable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */
static int enable_trace_probe(struct trace_probe *tp, int flag)
{
	int ret = 0;

	tp->flags |= flag;
192 193
	if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) &&
	    !trace_probe_has_gone(tp)) {
194 195 196 197 198 199 200 201 202 203 204 205 206
		if (trace_probe_is_return(tp))
			ret = enable_kretprobe(&tp->rp);
		else
			ret = enable_kprobe(&tp->rp.kp);
	}

	return ret;
}

/* Disable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */
static void disable_trace_probe(struct trace_probe *tp, int flag)
{
	tp->flags &= ~flag;
207
	if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) {
208 209 210 211 212 213 214
		if (trace_probe_is_return(tp))
			disable_kretprobe(&tp->rp);
		else
			disable_kprobe(&tp->rp.kp);
	}
}

215 216
/* Internal register function - just handle k*probes and flags */
static int __register_trace_probe(struct trace_probe *tp)
217
{
218
	int i, ret;
219 220 221 222

	if (trace_probe_is_registered(tp))
		return -EINVAL;

223
	for (i = 0; i < tp->nr_args; i++)
224
		traceprobe_update_arg(&tp->args[i]);
225

226 227 228 229 230 231
	/* Set/clear disabled flag according to tp->flag */
	if (trace_probe_is_enabled(tp))
		tp->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
	else
		tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;

232
	if (trace_probe_is_return(tp))
233
		ret = register_kretprobe(&tp->rp);
234
	else
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
		ret = register_kprobe(&tp->rp.kp);

	if (ret == 0)
		tp->flags |= TP_FLAG_REGISTERED;
	else {
		pr_warning("Could not insert probe at %s+%lu: %d\n",
			   trace_probe_symbol(tp), trace_probe_offset(tp), ret);
		if (ret == -ENOENT && trace_probe_is_on_module(tp)) {
			pr_warning("This probe might be able to register after"
				   "target module is loaded. Continue.\n");
			ret = 0;
		} else if (ret == -EILSEQ) {
			pr_warning("Probing address(0x%p) is not an "
				   "instruction boundary.\n",
				   tp->rp.kp.addr);
			ret = -EINVAL;
		}
	}

	return ret;
}

/* Internal unregister function - just handle k*probes and flags */
static void __unregister_trace_probe(struct trace_probe *tp)
{
	if (trace_probe_is_registered(tp)) {
		if (trace_probe_is_return(tp))
			unregister_kretprobe(&tp->rp);
		else
			unregister_kprobe(&tp->rp.kp);
		tp->flags &= ~TP_FLAG_REGISTERED;
		/* Cleanup kprobe for reuse */
		if (tp->rp.kp.symbol_name)
			tp->rp.kp.addr = NULL;
	}
}

/* Unregister a trace_probe and probe_event: call with locking probe_lock */
273
static int unregister_trace_probe(struct trace_probe *tp)
274
{
275 276 277 278
	/* Enabled event can not be unregistered */
	if (trace_probe_is_enabled(tp))
		return -EBUSY;

279
	__unregister_trace_probe(tp);
280
	list_del(&tp->list);
281
	unregister_probe_event(tp);
282 283

	return 0;
284 285 286 287 288 289 290 291 292 293
}

/* Register a trace_probe and probe_event */
static int register_trace_probe(struct trace_probe *tp)
{
	struct trace_probe *old_tp;
	int ret;

	mutex_lock(&probe_lock);

294
	/* Delete old (same name) event if exist */
295
	old_tp = find_trace_probe(tp->call.name, tp->call.class->system);
296
	if (old_tp) {
297 298 299
		ret = unregister_trace_probe(old_tp);
		if (ret < 0)
			goto end;
300 301
		free_trace_probe(old_tp);
	}
302 303

	/* Register new event */
304 305
	ret = register_probe_event(tp);
	if (ret) {
P
Paul Bolle 已提交
306
		pr_warning("Failed to register probe event(%d)\n", ret);
307 308 309
		goto end;
	}

310 311 312
	/* Register k*probe */
	ret = __register_trace_probe(tp);
	if (ret < 0)
313
		unregister_probe_event(tp);
314
	else
315
		list_add_tail(&tp->list, &probe_list);
316

317 318 319 320 321
end:
	mutex_unlock(&probe_lock);
	return ret;
}

322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
/* Module notifier call back, checking event on the module */
static int trace_probe_module_callback(struct notifier_block *nb,
				       unsigned long val, void *data)
{
	struct module *mod = data;
	struct trace_probe *tp;
	int ret;

	if (val != MODULE_STATE_COMING)
		return NOTIFY_DONE;

	/* Update probes on coming module */
	mutex_lock(&probe_lock);
	list_for_each_entry(tp, &probe_list, list) {
		if (trace_probe_within_module(tp, mod)) {
337
			/* Don't need to check busy - this should have gone. */
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
			__unregister_trace_probe(tp);
			ret = __register_trace_probe(tp);
			if (ret)
				pr_warning("Failed to re-register probe %s on"
					   "%s: %d\n",
					   tp->call.name, mod->name, ret);
		}
	}
	mutex_unlock(&probe_lock);

	return NOTIFY_DONE;
}

static struct notifier_block trace_probe_module_nb = {
	.notifier_call = trace_probe_module_callback,
	.priority = 1	/* Invoked after kprobe module callback */
};

356 357 358 359
static int create_trace_probe(int argc, char **argv)
{
	/*
	 * Argument syntax:
360 361
	 *  - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
	 *  - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
362
	 * Fetch args:
363 364 365
	 *  $retval	: fetch return value
	 *  $stack	: fetch stack address
	 *  $stackN	: fetch Nth of stack (N:0-)
366 367 368
	 *  @ADDR	: fetch memory at ADDR (ADDR should be in kernel)
	 *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
	 *  %REG	: fetch register REG
369
	 * Dereferencing memory fetch:
370
	 *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
371 372
	 * Alias name of args:
	 *  NAME=FETCHARG : set NAME as alias of FETCHARG.
373 374
	 * Type of args:
	 *  FETCHARG:TYPE : use TYPE instead of unsigned long.
375 376 377
	 */
	struct trace_probe *tp;
	int i, ret = 0;
378
	bool is_return = false, is_delete = false;
379
	char *symbol = NULL, *event = NULL, *group = NULL;
380
	char *arg;
381
	unsigned long offset = 0;
382
	void *addr = NULL;
383
	char buf[MAX_EVENT_NAME_LEN];
384

385
	/* argc must be >= 1 */
386
	if (argv[0][0] == 'p')
387
		is_return = false;
388
	else if (argv[0][0] == 'r')
389
		is_return = true;
390
	else if (argv[0][0] == '-')
391
		is_delete = true;
392
	else {
393 394
		pr_info("Probe definition must be started with 'p', 'r' or"
			" '-'.\n");
395
		return -EINVAL;
396
	}
397 398 399

	if (argv[0][1] == ':') {
		event = &argv[0][2];
400 401 402 403 404
		if (strchr(event, '/')) {
			group = event;
			event = strchr(group, '/') + 1;
			event[-1] = '\0';
			if (strlen(group) == 0) {
405
				pr_info("Group name is not specified\n");
406 407 408
				return -EINVAL;
			}
		}
409
		if (strlen(event) == 0) {
410
			pr_info("Event name is not specified\n");
411 412 413
			return -EINVAL;
		}
	}
414 415
	if (!group)
		group = KPROBE_EVENT_SYSTEM;
416

417 418 419 420 421
	if (is_delete) {
		if (!event) {
			pr_info("Delete command needs an event name.\n");
			return -EINVAL;
		}
422
		mutex_lock(&probe_lock);
423
		tp = find_trace_probe(event, group);
424
		if (!tp) {
425
			mutex_unlock(&probe_lock);
426 427 428 429
			pr_info("Event %s/%s doesn't exist.\n", group, event);
			return -ENOENT;
		}
		/* delete an event */
430 431 432
		ret = unregister_trace_probe(tp);
		if (ret == 0)
			free_trace_probe(tp);
433
		mutex_unlock(&probe_lock);
434
		return ret;
435 436 437 438 439 440
	}

	if (argc < 2) {
		pr_info("Probe point is not specified.\n");
		return -EINVAL;
	}
441
	if (isdigit(argv[1][0])) {
442 443
		if (is_return) {
			pr_info("Return probe point must be a symbol.\n");
444
			return -EINVAL;
445
		}
446
		/* an address specified */
447
		ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
448 449
		if (ret) {
			pr_info("Failed to parse address.\n");
450
			return ret;
451
		}
452 453 454 455
	} else {
		/* a symbol specified */
		symbol = argv[1];
		/* TODO: support .init module functions */
456
		ret = traceprobe_split_symbol_offset(symbol, &offset);
457 458
		if (ret) {
			pr_info("Failed to parse symbol.\n");
459
			return ret;
460 461 462
		}
		if (offset && is_return) {
			pr_info("Return probe must be used without offset.\n");
463
			return -EINVAL;
464
		}
465
	}
466
	argc -= 2; argv += 2;
467 468

	/* setup a probe */
469 470 471
	if (!event) {
		/* Make a new event name */
		if (symbol)
472
			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
473 474
				 is_return ? 'r' : 'p', symbol, offset);
		else
475
			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
476
				 is_return ? 'r' : 'p', addr);
477 478
		event = buf;
	}
479 480
	tp = alloc_trace_probe(group, event, addr, symbol, offset, argc,
			       is_return);
481 482 483
	if (IS_ERR(tp)) {
		pr_info("Failed to allocate trace_probe.(%d)\n",
			(int)PTR_ERR(tp));
484
		return PTR_ERR(tp);
485
	}
486 487

	/* parse arguments */
488 489
	ret = 0;
	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
490 491 492
		/* Increment count for freeing args in error case */
		tp->nr_args++;

493 494
		/* Parse argument name */
		arg = strchr(argv[i], '=');
495
		if (arg) {
496
			*arg++ = '\0';
497 498
			tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
		} else {
499
			arg = argv[i];
500 501 502 503
			/* If argument name is omitted, set "argN" */
			snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
			tp->args[i].name = kstrdup(buf, GFP_KERNEL);
		}
504

505
		if (!tp->args[i].name) {
506
			pr_info("Failed to allocate argument[%d] name.\n", i);
507
			ret = -ENOMEM;
508 509
			goto error;
		}
510 511 512 513 514 515 516

		if (!is_good_name(tp->args[i].name)) {
			pr_info("Invalid argument[%d] name: %s\n",
				i, tp->args[i].name);
			ret = -EINVAL;
			goto error;
		}
517

518 519
		if (traceprobe_conflict_field_name(tp->args[i].name,
							tp->args, i)) {
520
			pr_info("Argument[%d] name '%s' conflicts with "
521 522 523 524
				"another field.\n", i, argv[i]);
			ret = -EINVAL;
			goto error;
		}
525 526

		/* Parse fetch argument */
527
		ret = traceprobe_parse_probe_arg(arg, &tp->size, &tp->args[i],
528
						is_return, true);
529
		if (ret) {
530
			pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
531
			goto error;
532
		}
533 534 535 536 537 538 539 540 541 542 543 544
	}

	ret = register_trace_probe(tp);
	if (ret)
		goto error;
	return 0;

error:
	free_trace_probe(tp);
	return ret;
}

545
static int release_all_trace_probes(void)
546 547
{
	struct trace_probe *tp;
548
	int ret = 0;
549 550

	mutex_lock(&probe_lock);
551 552 553 554 555 556
	/* Ensure no probe is in use. */
	list_for_each_entry(tp, &probe_list, list)
		if (trace_probe_is_enabled(tp)) {
			ret = -EBUSY;
			goto end;
		}
557 558 559 560 561 562
	/* TODO: Use batch unregistration */
	while (!list_empty(&probe_list)) {
		tp = list_entry(probe_list.next, struct trace_probe, list);
		unregister_trace_probe(tp);
		free_trace_probe(tp);
	}
563 564

end:
565
	mutex_unlock(&probe_lock);
566 567

	return ret;
568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
}

/* Probes listing interfaces */
static void *probes_seq_start(struct seq_file *m, loff_t *pos)
{
	mutex_lock(&probe_lock);
	return seq_list_start(&probe_list, *pos);
}

static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
{
	return seq_list_next(v, &probe_list, pos);
}

static void probes_seq_stop(struct seq_file *m, void *v)
{
	mutex_unlock(&probe_lock);
}

static int probes_seq_show(struct seq_file *m, void *v)
{
	struct trace_probe *tp = v;
590
	int i;
591

592
	seq_printf(m, "%c", trace_probe_is_return(tp) ? 'r' : 'p');
593
	seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name);
594

595 596 597
	if (!tp->symbol)
		seq_printf(m, " 0x%p", tp->rp.kp.addr);
	else if (tp->rp.kp.offset)
598 599
		seq_printf(m, " %s+%u", trace_probe_symbol(tp),
			   tp->rp.kp.offset);
600
	else
601
		seq_printf(m, " %s", trace_probe_symbol(tp));
602

603 604
	for (i = 0; i < tp->nr_args; i++)
		seq_printf(m, " %s=%s", tp->args[i].name, tp->args[i].comm);
605
	seq_printf(m, "\n");
606

607 608 609 610 611 612 613 614 615 616 617 618
	return 0;
}

static const struct seq_operations probes_seq_op = {
	.start  = probes_seq_start,
	.next   = probes_seq_next,
	.stop   = probes_seq_stop,
	.show   = probes_seq_show
};

static int probes_open(struct inode *inode, struct file *file)
{
619 620 621 622 623 624 625
	int ret;

	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
		ret = release_all_trace_probes();
		if (ret < 0)
			return ret;
	}
626 627 628 629 630 631 632

	return seq_open(file, &probes_seq_op);
}

static ssize_t probes_write(struct file *file, const char __user *buffer,
			    size_t count, loff_t *ppos)
{
633 634
	return traceprobe_probes_write(file, buffer, count, ppos,
			create_trace_probe);
635 636 637 638 639 640 641 642 643 644 645
}

static const struct file_operations kprobe_events_ops = {
	.owner          = THIS_MODULE,
	.open           = probes_open,
	.read           = seq_read,
	.llseek         = seq_lseek,
	.release        = seq_release,
	.write		= probes_write,
};

646 647 648 649 650 651
/* Probes profiling interfaces */
static int probes_profile_seq_show(struct seq_file *m, void *v)
{
	struct trace_probe *tp = v;

	seq_printf(m, "  %-44s %15lu %15lu\n", tp->call.name, tp->nhit,
652
		   tp->rp.kp.nmissed);
653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676

	return 0;
}

static const struct seq_operations profile_seq_op = {
	.start  = probes_seq_start,
	.next   = probes_seq_next,
	.stop   = probes_seq_stop,
	.show   = probes_profile_seq_show
};

static int profile_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &profile_seq_op);
}

static const struct file_operations kprobe_profile_ops = {
	.owner          = THIS_MODULE,
	.open           = profile_open,
	.read           = seq_read,
	.llseek         = seq_lseek,
	.release        = seq_release,
};

677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
/* Sum up total data length for dynamic arraies (strings) */
static __kprobes int __get_data_size(struct trace_probe *tp,
				     struct pt_regs *regs)
{
	int i, ret = 0;
	u32 len;

	for (i = 0; i < tp->nr_args; i++)
		if (unlikely(tp->args[i].fetch_size.fn)) {
			call_fetch(&tp->args[i].fetch_size, regs, &len);
			ret += len;
		}

	return ret;
}

/* Store the value of each argument */
static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp,
				       struct pt_regs *regs,
				       u8 *data, int maxlen)
{
	int i;
	u32 end = tp->size;
	u32 *dl;	/* Data (relative) location */

	for (i = 0; i < tp->nr_args; i++) {
		if (unlikely(tp->args[i].fetch_size.fn)) {
			/*
			 * First, we set the relative location and
			 * maximum data length to *dl
			 */
			dl = (u32 *)(data + tp->args[i].offset);
			*dl = make_data_rloc(maxlen, end - tp->args[i].offset);
			/* Then try to fetch string or dynamic array data */
			call_fetch(&tp->args[i].fetch, regs, dl);
			/* Reduce maximum length */
			end += get_rloc_len(*dl);
			maxlen -= get_rloc_len(*dl);
			/* Trick here, convert data_rloc to data_loc */
			*dl = convert_rloc_to_loc(*dl,
				 ent_size + tp->args[i].offset);
		} else
			/* Just fetching data normally */
			call_fetch(&tp->args[i].fetch, regs,
				   data + tp->args[i].offset);
	}
}

725
/* Kprobe handler */
726 727
static __kprobes void
kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs)
728
{
729
	struct kprobe_trace_entry_head *entry;
730
	struct ring_buffer_event *event;
731
	struct ring_buffer *buffer;
732
	int size, dsize, pc;
733
	unsigned long irq_flags;
734
	struct ftrace_event_call *call = &tp->call;
735 736 737 738

	local_save_flags(irq_flags);
	pc = preempt_count();

739 740
	dsize = __get_data_size(tp, regs);
	size = sizeof(*entry) + tp->size + dsize;
741

742 743
	event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
						  size, irq_flags, pc);
744
	if (!event)
745
		return;
746 747

	entry = ring_buffer_event_data(event);
748
	entry->ip = (unsigned long)tp->rp.kp.addr;
749
	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
750

751
	if (!filter_current_check_discard(buffer, call, entry, event))
752 753
		trace_buffer_unlock_commit_regs(buffer, event,
						irq_flags, pc, regs);
754 755 756
}

/* Kretprobe handler */
757 758 759
static __kprobes void
kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
		     struct pt_regs *regs)
760
{
761
	struct kretprobe_trace_entry_head *entry;
762
	struct ring_buffer_event *event;
763
	struct ring_buffer *buffer;
764
	int size, pc, dsize;
765
	unsigned long irq_flags;
766
	struct ftrace_event_call *call = &tp->call;
767 768 769 770

	local_save_flags(irq_flags);
	pc = preempt_count();

771 772
	dsize = __get_data_size(tp, regs);
	size = sizeof(*entry) + tp->size + dsize;
773

774 775
	event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
						  size, irq_flags, pc);
776
	if (!event)
777
		return;
778 779

	entry = ring_buffer_event_data(event);
780
	entry->func = (unsigned long)tp->rp.kp.addr;
781
	entry->ret_ip = (unsigned long)ri->ret_addr;
782
	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
783

784
	if (!filter_current_check_discard(buffer, call, entry, event))
785 786
		trace_buffer_unlock_commit_regs(buffer, event,
						irq_flags, pc, regs);
787 788 789 790
}

/* Event entry printers */
enum print_line_t
791 792
print_kprobe_event(struct trace_iterator *iter, int flags,
		   struct trace_event *event)
793
{
794
	struct kprobe_trace_entry_head *field;
795
	struct trace_seq *s = &iter->seq;
796
	struct trace_probe *tp;
797
	u8 *data;
798 799
	int i;

800
	field = (struct kprobe_trace_entry_head *)iter->ent;
801
	tp = container_of(event, struct trace_probe, call.event);
802

803 804 805
	if (!trace_seq_printf(s, "%s: (", tp->call.name))
		goto partial;

806 807 808
	if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
		goto partial;

809
	if (!trace_seq_puts(s, ")"))
810 811
		goto partial;

812 813 814
	data = (u8 *)&field[1];
	for (i = 0; i < tp->nr_args; i++)
		if (!tp->args[i].type->print(s, tp->args[i].name,
815
					     data + tp->args[i].offset, field))
816 817 818 819 820 821 822 823 824 825 826
			goto partial;

	if (!trace_seq_puts(s, "\n"))
		goto partial;

	return TRACE_TYPE_HANDLED;
partial:
	return TRACE_TYPE_PARTIAL_LINE;
}

enum print_line_t
827 828
print_kretprobe_event(struct trace_iterator *iter, int flags,
		      struct trace_event *event)
829
{
830
	struct kretprobe_trace_entry_head *field;
831
	struct trace_seq *s = &iter->seq;
832
	struct trace_probe *tp;
833
	u8 *data;
834 835
	int i;

836
	field = (struct kretprobe_trace_entry_head *)iter->ent;
837
	tp = container_of(event, struct trace_probe, call.event);
838

839 840 841
	if (!trace_seq_printf(s, "%s: (", tp->call.name))
		goto partial;

842 843 844 845 846 847 848 849 850
	if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
		goto partial;

	if (!trace_seq_puts(s, " <- "))
		goto partial;

	if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
		goto partial;

851
	if (!trace_seq_puts(s, ")"))
852 853
		goto partial;

854 855 856
	data = (u8 *)&field[1];
	for (i = 0; i < tp->nr_args; i++)
		if (!tp->args[i].type->print(s, tp->args[i].name,
857
					     data + tp->args[i].offset, field))
858 859 860 861 862 863 864 865 866 867 868 869 870 871
			goto partial;

	if (!trace_seq_puts(s, "\n"))
		goto partial;

	return TRACE_TYPE_HANDLED;
partial:
	return TRACE_TYPE_PARTIAL_LINE;
}


static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
{
	int ret, i;
872
	struct kprobe_trace_entry_head field;
873 874
	struct trace_probe *tp = (struct trace_probe *)event_call->data;

875
	DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
876
	/* Set argument names as fields */
877
	for (i = 0; i < tp->nr_args; i++) {
878
		ret = trace_define_field(event_call, tp->args[i].type->fmttype,
879 880 881 882 883 884 885 886
					 tp->args[i].name,
					 sizeof(field) + tp->args[i].offset,
					 tp->args[i].type->size,
					 tp->args[i].type->is_signed,
					 FILTER_OTHER);
		if (ret)
			return ret;
	}
887 888 889 890 891 892
	return 0;
}

static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
{
	int ret, i;
893
	struct kretprobe_trace_entry_head field;
894 895
	struct trace_probe *tp = (struct trace_probe *)event_call->data;

896 897
	DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
	DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
898
	/* Set argument names as fields */
899
	for (i = 0; i < tp->nr_args; i++) {
900
		ret = trace_define_field(event_call, tp->args[i].type->fmttype,
901 902 903 904 905 906 907 908
					 tp->args[i].name,
					 sizeof(field) + tp->args[i].offset,
					 tp->args[i].type->size,
					 tp->args[i].type->is_signed,
					 FILTER_OTHER);
		if (ret)
			return ret;
	}
909 910 911
	return 0;
}

912 913 914 915 916 917 918
static int __set_print_fmt(struct trace_probe *tp, char *buf, int len)
{
	int i;
	int pos = 0;

	const char *fmt, *arg;

919
	if (!trace_probe_is_return(tp)) {
920 921 922 923 924 925 926 927 928 929 930 931 932
		fmt = "(%lx)";
		arg = "REC->" FIELD_STRING_IP;
	} else {
		fmt = "(%lx <- %lx)";
		arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
	}

	/* When len=0, we just calculate the needed length */
#define LEN_OR_ZERO (len ? len - pos : 0)

	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);

	for (i = 0; i < tp->nr_args; i++) {
933 934
		pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s",
				tp->args[i].name, tp->args[i].type->fmt);
935 936 937 938 939
	}

	pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);

	for (i = 0; i < tp->nr_args; i++) {
940 941 942 943 944 945 946
		if (strcmp(tp->args[i].type->name, "string") == 0)
			pos += snprintf(buf + pos, LEN_OR_ZERO,
					", __get_str(%s)",
					tp->args[i].name);
		else
			pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s",
					tp->args[i].name);
947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972
	}

#undef LEN_OR_ZERO

	/* return the length of print_fmt */
	return pos;
}

static int set_print_fmt(struct trace_probe *tp)
{
	int len;
	char *print_fmt;

	/* First: called with 0 length to calculate the needed length */
	len = __set_print_fmt(tp, NULL, 0);
	print_fmt = kmalloc(len + 1, GFP_KERNEL);
	if (!print_fmt)
		return -ENOMEM;

	/* Second: actually write the @print_fmt */
	__set_print_fmt(tp, print_fmt, len + 1);
	tp->call.print_fmt = print_fmt;

	return 0;
}

973
#ifdef CONFIG_PERF_EVENTS
974 975

/* Kprobe profile handler */
976 977
static __kprobes void
kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs)
978 979
{
	struct ftrace_event_call *call = &tp->call;
980
	struct kprobe_trace_entry_head *entry;
981
	struct hlist_head *head;
982
	int size, __size, dsize;
983
	int rctx;
984

985 986
	dsize = __get_data_size(tp, regs);
	__size = sizeof(*entry) + tp->size + dsize;
987 988
	size = ALIGN(__size + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);
989
	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
990
		     "profile buffer not large enough"))
991
		return;
992

S
Steven Rostedt 已提交
993
	entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
994
	if (!entry)
995
		return;
996

997
	entry->ip = (unsigned long)tp->rp.kp.addr;
998 999
	memset(&entry[1], 0, dsize);
	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
1000

1001
	head = this_cpu_ptr(call->perf_events);
1002 1003
	perf_trace_buf_submit(entry, size, rctx,
					entry->ip, 1, regs, head, NULL);
1004 1005 1006
}

/* Kretprobe profile handler */
1007 1008 1009
static __kprobes void
kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
		    struct pt_regs *regs)
1010 1011
{
	struct ftrace_event_call *call = &tp->call;
1012
	struct kretprobe_trace_entry_head *entry;
1013
	struct hlist_head *head;
1014
	int size, __size, dsize;
1015
	int rctx;
1016

1017 1018
	dsize = __get_data_size(tp, regs);
	__size = sizeof(*entry) + tp->size + dsize;
1019 1020
	size = ALIGN(__size + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);
1021
	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1022
		     "profile buffer not large enough"))
1023
		return;
1024

S
Steven Rostedt 已提交
1025
	entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1026
	if (!entry)
1027
		return;
1028

1029 1030
	entry->func = (unsigned long)tp->rp.kp.addr;
	entry->ret_ip = (unsigned long)ri->ret_addr;
1031
	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
1032

1033
	head = this_cpu_ptr(call->perf_events);
1034 1035
	perf_trace_buf_submit(entry, size, rctx,
					entry->ret_ip, 1, regs, head, NULL);
1036
}
1037
#endif	/* CONFIG_PERF_EVENTS */
1038

1039
static __kprobes
1040 1041
int kprobe_register(struct ftrace_event_call *event,
		    enum trace_reg type, void *data)
1042
{
1043 1044
	struct trace_probe *tp = (struct trace_probe *)event->data;

1045 1046
	switch (type) {
	case TRACE_REG_REGISTER:
1047
		return enable_trace_probe(tp, TP_FLAG_TRACE);
1048
	case TRACE_REG_UNREGISTER:
1049
		disable_trace_probe(tp, TP_FLAG_TRACE);
1050 1051 1052 1053
		return 0;

#ifdef CONFIG_PERF_EVENTS
	case TRACE_REG_PERF_REGISTER:
1054
		return enable_trace_probe(tp, TP_FLAG_PROFILE);
1055
	case TRACE_REG_PERF_UNREGISTER:
1056
		disable_trace_probe(tp, TP_FLAG_PROFILE);
1057
		return 0;
1058 1059
	case TRACE_REG_PERF_OPEN:
	case TRACE_REG_PERF_CLOSE:
1060 1061
	case TRACE_REG_PERF_ADD:
	case TRACE_REG_PERF_DEL:
1062
		return 0;
1063 1064 1065 1066
#endif
	}
	return 0;
}
1067 1068 1069 1070 1071

static __kprobes
int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
{
	struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1072

1073 1074
	tp->nhit++;

1075
	if (tp->flags & TP_FLAG_TRACE)
1076
		kprobe_trace_func(tp, regs);
1077
#ifdef CONFIG_PERF_EVENTS
1078
	if (tp->flags & TP_FLAG_PROFILE)
1079
		kprobe_perf_func(tp, regs);
1080
#endif
1081 1082 1083 1084 1085 1086 1087 1088
	return 0;	/* We don't tweek kernel, so just return 0 */
}

static __kprobes
int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
{
	struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);

1089 1090
	tp->nhit++;

1091
	if (tp->flags & TP_FLAG_TRACE)
1092
		kretprobe_trace_func(tp, ri, regs);
1093
#ifdef CONFIG_PERF_EVENTS
1094
	if (tp->flags & TP_FLAG_PROFILE)
1095
		kretprobe_perf_func(tp, ri, regs);
1096
#endif
1097 1098
	return 0;	/* We don't tweek kernel, so just return 0 */
}
1099

1100 1101 1102 1103 1104 1105 1106 1107
static struct trace_event_functions kretprobe_funcs = {
	.trace		= print_kretprobe_event
};

static struct trace_event_functions kprobe_funcs = {
	.trace		= print_kprobe_event
};

1108 1109 1110 1111 1112 1113
static int register_probe_event(struct trace_probe *tp)
{
	struct ftrace_event_call *call = &tp->call;
	int ret;

	/* Initialize ftrace_event_call */
1114
	INIT_LIST_HEAD(&call->class->fields);
1115
	if (trace_probe_is_return(tp)) {
1116
		call->event.funcs = &kretprobe_funcs;
1117
		call->class->define_fields = kretprobe_event_define_fields;
1118
	} else {
1119
		call->event.funcs = &kprobe_funcs;
1120
		call->class->define_fields = kprobe_event_define_fields;
1121
	}
1122 1123
	if (set_print_fmt(tp) < 0)
		return -ENOMEM;
1124 1125
	ret = register_ftrace_event(&call->event);
	if (!ret) {
1126
		kfree(call->print_fmt);
1127
		return -ENODEV;
1128
	}
1129
	call->flags = 0;
1130
	call->class->reg = kprobe_register;
1131 1132
	call->data = tp;
	ret = trace_add_event_call(call);
1133
	if (ret) {
1134
		pr_info("Failed to register kprobe event: %s\n", call->name);
1135
		kfree(call->print_fmt);
1136
		unregister_ftrace_event(&call->event);
1137
	}
1138 1139 1140 1141 1142
	return ret;
}

static void unregister_probe_event(struct trace_probe *tp)
{
1143
	/* tp->event is unregistered in trace_remove_event_call() */
1144
	trace_remove_event_call(&tp->call);
1145
	kfree(tp->call.print_fmt);
1146 1147
}

L
Lucas De Marchi 已提交
1148
/* Make a debugfs interface for controlling probe points */
1149 1150 1151 1152 1153
static __init int init_kprobe_trace(void)
{
	struct dentry *d_tracer;
	struct dentry *entry;

1154 1155 1156
	if (register_module_notifier(&trace_probe_module_nb))
		return -EINVAL;

1157 1158 1159 1160 1161 1162 1163
	d_tracer = tracing_init_dentry();
	if (!d_tracer)
		return 0;

	entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
				    NULL, &kprobe_events_ops);

1164
	/* Event list interface */
1165 1166 1167
	if (!entry)
		pr_warning("Could not create debugfs "
			   "'kprobe_events' entry\n");
1168 1169 1170 1171 1172 1173 1174 1175

	/* Profile interface */
	entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
				    NULL, &kprobe_profile_ops);

	if (!entry)
		pr_warning("Could not create debugfs "
			   "'kprobe_profile' entry\n");
1176 1177 1178 1179 1180 1181 1182
	return 0;
}
fs_initcall(init_kprobe_trace);


#ifdef CONFIG_FTRACE_STARTUP_TEST

1183 1184 1185 1186 1187 1188
/*
 * The "__used" keeps gcc from removing the function symbol
 * from the kallsyms table.
 */
static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
					       int a4, int a5, int a6)
1189 1190 1191 1192 1193 1194
{
	return a1 + a2 + a3 + a4 + a5 + a6;
}

static __init int kprobe_trace_self_tests_init(void)
{
1195
	int ret, warn = 0;
1196
	int (*target)(int, int, int, int, int, int);
1197
	struct trace_probe *tp;
1198 1199 1200 1201 1202

	target = kprobe_trace_selftest_target;

	pr_info("Testing kprobe tracing: ");

1203 1204 1205
	ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
				  "$stack $stack0 +0($stack)",
				  create_trace_probe);
1206 1207 1208 1209 1210
	if (WARN_ON_ONCE(ret)) {
		pr_warning("error on probing function entry.\n");
		warn++;
	} else {
		/* Enable trace point */
1211
		tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
1212 1213 1214 1215
		if (WARN_ON_ONCE(tp == NULL)) {
			pr_warning("error on getting new probe.\n");
			warn++;
		} else
1216
			enable_trace_probe(tp, TP_FLAG_TRACE);
1217
	}
1218

1219 1220
	ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
				  "$retval", create_trace_probe);
1221 1222 1223 1224 1225
	if (WARN_ON_ONCE(ret)) {
		pr_warning("error on probing function return.\n");
		warn++;
	} else {
		/* Enable trace point */
1226
		tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
1227 1228 1229 1230
		if (WARN_ON_ONCE(tp == NULL)) {
			pr_warning("error on getting new probe.\n");
			warn++;
		} else
1231
			enable_trace_probe(tp, TP_FLAG_TRACE);
1232 1233 1234 1235
	}

	if (warn)
		goto end;
1236 1237 1238

	ret = target(1, 2, 3, 4, 5, 6);

1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
	/* Disable trace points before removing it */
	tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
	if (WARN_ON_ONCE(tp == NULL)) {
		pr_warning("error on getting test probe.\n");
		warn++;
	} else
		disable_trace_probe(tp, TP_FLAG_TRACE);

	tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
	if (WARN_ON_ONCE(tp == NULL)) {
		pr_warning("error on getting 2nd test probe.\n");
		warn++;
	} else
		disable_trace_probe(tp, TP_FLAG_TRACE);

1254
	ret = traceprobe_command("-:testprobe", create_trace_probe);
1255 1256 1257 1258 1259
	if (WARN_ON_ONCE(ret)) {
		pr_warning("error on deleting a probe.\n");
		warn++;
	}

1260
	ret = traceprobe_command("-:testprobe2", create_trace_probe);
1261 1262 1263 1264
	if (WARN_ON_ONCE(ret)) {
		pr_warning("error on deleting a probe.\n");
		warn++;
	}
1265

1266
end:
1267
	release_all_trace_probes();
1268 1269 1270 1271
	if (warn)
		pr_cont("NG: Some tests are failed. Please check them.\n");
	else
		pr_cont("OK\n");
1272 1273 1274 1275 1276 1277
	return 0;
}

late_initcall(kprobe_trace_self_tests_init);

#endif