trace_kprobe.c 31.3 KB
Newer Older
1
/*
2
 * Kprobes-based tracing events
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *
 * Created by Masami Hiramatsu <mhiramat@redhat.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */

#include <linux/module.h>
#include <linux/uaccess.h>

23
#include "trace_probe.h"
24

25
#define KPROBE_EVENT_SYSTEM "kprobes"
26

27
/**
28
 * Kprobe event core functions
29 30 31 32
 */

struct trace_probe {
	struct list_head	list;
33
	struct kretprobe	rp;	/* Use rp.kp for kprobe use */
34
	unsigned long 		nhit;
35
	unsigned int		flags;	/* For TP_FLAG_* */
36
	const char		*symbol;	/* symbol name */
37
	struct ftrace_event_class	class;
38
	struct ftrace_event_call	call;
39
	ssize_t			size;		/* trace entry size */
40
	unsigned int		nr_args;
41
	struct probe_arg	args[];
42 43
};

44 45
#define SIZEOF_TRACE_PROBE(n)			\
	(offsetof(struct trace_probe, args) +	\
46
	(sizeof(struct probe_arg) * (n)))
47

48

49
static __kprobes int trace_probe_is_return(struct trace_probe *tp)
50
{
51
	return tp->rp.handler != NULL;
52 53
}

54
static __kprobes const char *trace_probe_symbol(struct trace_probe *tp)
55 56 57 58
{
	return tp->symbol ? tp->symbol : "unknown";
}

59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
static __kprobes unsigned long trace_probe_offset(struct trace_probe *tp)
{
	return tp->rp.kp.offset;
}

static __kprobes bool trace_probe_is_enabled(struct trace_probe *tp)
{
	return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE));
}

static __kprobes bool trace_probe_is_registered(struct trace_probe *tp)
{
	return !!(tp->flags & TP_FLAG_REGISTERED);
}

static __kprobes bool trace_probe_has_gone(struct trace_probe *tp)
{
	return !!(kprobe_gone(&tp->rp.kp));
}

static __kprobes bool trace_probe_within_module(struct trace_probe *tp,
						struct module *mod)
{
	int len = strlen(mod->name);
	const char *name = trace_probe_symbol(tp);
	return strncmp(mod->name, name, len) == 0 && name[len] == ':';
}

static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
{
	return !!strchr(trace_probe_symbol(tp), ':');
}

92 93 94 95 96 97
static int register_probe_event(struct trace_probe *tp);
static void unregister_probe_event(struct trace_probe *tp);

static DEFINE_MUTEX(probe_lock);
static LIST_HEAD(probe_list);

98 99 100 101
static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
static int kretprobe_dispatcher(struct kretprobe_instance *ri,
				struct pt_regs *regs);

102 103 104
/*
 * Allocate new trace_probe and initialize it (including kprobes).
 */
105 106
static struct trace_probe *alloc_trace_probe(const char *group,
					     const char *event,
107 108 109
					     void *addr,
					     const char *symbol,
					     unsigned long offs,
110
					     int nargs, bool is_return)
111 112
{
	struct trace_probe *tp;
113
	int ret = -ENOMEM;
114

115
	tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL);
116
	if (!tp)
117
		return ERR_PTR(ret);
118 119 120 121 122

	if (symbol) {
		tp->symbol = kstrdup(symbol, GFP_KERNEL);
		if (!tp->symbol)
			goto error;
123 124 125 126 127 128
		tp->rp.kp.symbol_name = tp->symbol;
		tp->rp.kp.offset = offs;
	} else
		tp->rp.kp.addr = addr;

	if (is_return)
129
		tp->rp.handler = kretprobe_dispatcher;
130
	else
131
		tp->rp.kp.pre_handler = kprobe_dispatcher;
132

133
	if (!event || !is_good_name(event)) {
134
		ret = -EINVAL;
135
		goto error;
136 137
	}

138
	tp->call.class = &tp->class;
139 140 141
	tp->call.name = kstrdup(event, GFP_KERNEL);
	if (!tp->call.name)
		goto error;
142

143
	if (!group || !is_good_name(group)) {
144
		ret = -EINVAL;
145
		goto error;
146 147
	}

148 149
	tp->class.system = kstrdup(group, GFP_KERNEL);
	if (!tp->class.system)
150 151
		goto error;

152 153 154
	INIT_LIST_HEAD(&tp->list);
	return tp;
error:
155
	kfree(tp->call.name);
156 157
	kfree(tp->symbol);
	kfree(tp);
158
	return ERR_PTR(ret);
159 160 161 162 163 164 165
}

static void free_trace_probe(struct trace_probe *tp)
{
	int i;

	for (i = 0; i < tp->nr_args; i++)
166
		traceprobe_free_probe_arg(&tp->args[i]);
167

168
	kfree(tp->call.class->system);
169 170 171 172 173
	kfree(tp->call.name);
	kfree(tp->symbol);
	kfree(tp);
}

174
static struct trace_probe *find_trace_probe(const char *event,
175
					    const char *group)
176 177 178 179
{
	struct trace_probe *tp;

	list_for_each_entry(tp, &probe_list, list)
180
		if (strcmp(tp->call.name, event) == 0 &&
181
		    strcmp(tp->call.class->system, group) == 0)
182 183 184 185
			return tp;
	return NULL;
}

186 187 188 189 190 191
/* Enable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */
static int enable_trace_probe(struct trace_probe *tp, int flag)
{
	int ret = 0;

	tp->flags |= flag;
192 193
	if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) &&
	    !trace_probe_has_gone(tp)) {
194 195 196 197 198 199 200 201 202 203 204 205 206
		if (trace_probe_is_return(tp))
			ret = enable_kretprobe(&tp->rp);
		else
			ret = enable_kprobe(&tp->rp.kp);
	}

	return ret;
}

/* Disable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */
static void disable_trace_probe(struct trace_probe *tp, int flag)
{
	tp->flags &= ~flag;
207
	if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) {
208 209 210 211 212 213 214
		if (trace_probe_is_return(tp))
			disable_kretprobe(&tp->rp);
		else
			disable_kprobe(&tp->rp.kp);
	}
}

215 216
/* Internal register function - just handle k*probes and flags */
static int __register_trace_probe(struct trace_probe *tp)
217
{
218
	int i, ret;
219 220 221 222

	if (trace_probe_is_registered(tp))
		return -EINVAL;

223
	for (i = 0; i < tp->nr_args; i++)
224
		traceprobe_update_arg(&tp->args[i]);
225

226 227 228 229 230 231
	/* Set/clear disabled flag according to tp->flag */
	if (trace_probe_is_enabled(tp))
		tp->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
	else
		tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;

232
	if (trace_probe_is_return(tp))
233
		ret = register_kretprobe(&tp->rp);
234
	else
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
		ret = register_kprobe(&tp->rp.kp);

	if (ret == 0)
		tp->flags |= TP_FLAG_REGISTERED;
	else {
		pr_warning("Could not insert probe at %s+%lu: %d\n",
			   trace_probe_symbol(tp), trace_probe_offset(tp), ret);
		if (ret == -ENOENT && trace_probe_is_on_module(tp)) {
			pr_warning("This probe might be able to register after"
				   "target module is loaded. Continue.\n");
			ret = 0;
		} else if (ret == -EILSEQ) {
			pr_warning("Probing address(0x%p) is not an "
				   "instruction boundary.\n",
				   tp->rp.kp.addr);
			ret = -EINVAL;
		}
	}

	return ret;
}

/* Internal unregister function - just handle k*probes and flags */
static void __unregister_trace_probe(struct trace_probe *tp)
{
	if (trace_probe_is_registered(tp)) {
		if (trace_probe_is_return(tp))
			unregister_kretprobe(&tp->rp);
		else
			unregister_kprobe(&tp->rp.kp);
		tp->flags &= ~TP_FLAG_REGISTERED;
		/* Cleanup kprobe for reuse */
		if (tp->rp.kp.symbol_name)
			tp->rp.kp.addr = NULL;
	}
}

/* Unregister a trace_probe and probe_event: call with locking probe_lock */
273
static int unregister_trace_probe(struct trace_probe *tp)
274
{
275 276 277 278
	/* Enabled event can not be unregistered */
	if (trace_probe_is_enabled(tp))
		return -EBUSY;

279
	__unregister_trace_probe(tp);
280
	list_del(&tp->list);
281
	unregister_probe_event(tp);
282 283

	return 0;
284 285 286 287 288 289 290 291 292 293
}

/* Register a trace_probe and probe_event */
static int register_trace_probe(struct trace_probe *tp)
{
	struct trace_probe *old_tp;
	int ret;

	mutex_lock(&probe_lock);

294
	/* Delete old (same name) event if exist */
295
	old_tp = find_trace_probe(tp->call.name, tp->call.class->system);
296
	if (old_tp) {
297 298 299
		ret = unregister_trace_probe(old_tp);
		if (ret < 0)
			goto end;
300 301
		free_trace_probe(old_tp);
	}
302 303

	/* Register new event */
304 305
	ret = register_probe_event(tp);
	if (ret) {
P
Paul Bolle 已提交
306
		pr_warning("Failed to register probe event(%d)\n", ret);
307 308 309
		goto end;
	}

310 311 312
	/* Register k*probe */
	ret = __register_trace_probe(tp);
	if (ret < 0)
313
		unregister_probe_event(tp);
314
	else
315
		list_add_tail(&tp->list, &probe_list);
316

317 318 319 320 321
end:
	mutex_unlock(&probe_lock);
	return ret;
}

322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
/* Module notifier call back, checking event on the module */
static int trace_probe_module_callback(struct notifier_block *nb,
				       unsigned long val, void *data)
{
	struct module *mod = data;
	struct trace_probe *tp;
	int ret;

	if (val != MODULE_STATE_COMING)
		return NOTIFY_DONE;

	/* Update probes on coming module */
	mutex_lock(&probe_lock);
	list_for_each_entry(tp, &probe_list, list) {
		if (trace_probe_within_module(tp, mod)) {
337
			/* Don't need to check busy - this should have gone. */
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
			__unregister_trace_probe(tp);
			ret = __register_trace_probe(tp);
			if (ret)
				pr_warning("Failed to re-register probe %s on"
					   "%s: %d\n",
					   tp->call.name, mod->name, ret);
		}
	}
	mutex_unlock(&probe_lock);

	return NOTIFY_DONE;
}

static struct notifier_block trace_probe_module_nb = {
	.notifier_call = trace_probe_module_callback,
	.priority = 1	/* Invoked after kprobe module callback */
};

356 357 358 359
static int create_trace_probe(int argc, char **argv)
{
	/*
	 * Argument syntax:
360 361
	 *  - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
	 *  - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
362
	 * Fetch args:
363 364 365
	 *  $retval	: fetch return value
	 *  $stack	: fetch stack address
	 *  $stackN	: fetch Nth of stack (N:0-)
366 367 368
	 *  @ADDR	: fetch memory at ADDR (ADDR should be in kernel)
	 *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
	 *  %REG	: fetch register REG
369
	 * Dereferencing memory fetch:
370
	 *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
371 372
	 * Alias name of args:
	 *  NAME=FETCHARG : set NAME as alias of FETCHARG.
373 374
	 * Type of args:
	 *  FETCHARG:TYPE : use TYPE instead of unsigned long.
375 376 377
	 */
	struct trace_probe *tp;
	int i, ret = 0;
378
	bool is_return = false, is_delete = false;
379
	char *symbol = NULL, *event = NULL, *group = NULL;
380
	char *arg;
381
	unsigned long offset = 0;
382
	void *addr = NULL;
383
	char buf[MAX_EVENT_NAME_LEN];
384

385
	/* argc must be >= 1 */
386
	if (argv[0][0] == 'p')
387
		is_return = false;
388
	else if (argv[0][0] == 'r')
389
		is_return = true;
390
	else if (argv[0][0] == '-')
391
		is_delete = true;
392
	else {
393 394
		pr_info("Probe definition must be started with 'p', 'r' or"
			" '-'.\n");
395
		return -EINVAL;
396
	}
397 398 399

	if (argv[0][1] == ':') {
		event = &argv[0][2];
400 401 402 403 404
		if (strchr(event, '/')) {
			group = event;
			event = strchr(group, '/') + 1;
			event[-1] = '\0';
			if (strlen(group) == 0) {
405
				pr_info("Group name is not specified\n");
406 407 408
				return -EINVAL;
			}
		}
409
		if (strlen(event) == 0) {
410
			pr_info("Event name is not specified\n");
411 412 413
			return -EINVAL;
		}
	}
414 415
	if (!group)
		group = KPROBE_EVENT_SYSTEM;
416

417 418 419 420 421
	if (is_delete) {
		if (!event) {
			pr_info("Delete command needs an event name.\n");
			return -EINVAL;
		}
422
		mutex_lock(&probe_lock);
423
		tp = find_trace_probe(event, group);
424
		if (!tp) {
425
			mutex_unlock(&probe_lock);
426 427 428 429
			pr_info("Event %s/%s doesn't exist.\n", group, event);
			return -ENOENT;
		}
		/* delete an event */
430 431 432
		ret = unregister_trace_probe(tp);
		if (ret == 0)
			free_trace_probe(tp);
433
		mutex_unlock(&probe_lock);
434
		return ret;
435 436 437 438 439 440
	}

	if (argc < 2) {
		pr_info("Probe point is not specified.\n");
		return -EINVAL;
	}
441
	if (isdigit(argv[1][0])) {
442 443
		if (is_return) {
			pr_info("Return probe point must be a symbol.\n");
444
			return -EINVAL;
445
		}
446
		/* an address specified */
447
		ret = strict_strtoul(&argv[1][0], 0, (unsigned long *)&addr);
448 449
		if (ret) {
			pr_info("Failed to parse address.\n");
450
			return ret;
451
		}
452 453 454 455
	} else {
		/* a symbol specified */
		symbol = argv[1];
		/* TODO: support .init module functions */
456
		ret = traceprobe_split_symbol_offset(symbol, &offset);
457 458
		if (ret) {
			pr_info("Failed to parse symbol.\n");
459
			return ret;
460 461 462
		}
		if (offset && is_return) {
			pr_info("Return probe must be used without offset.\n");
463
			return -EINVAL;
464
		}
465
	}
466
	argc -= 2; argv += 2;
467 468

	/* setup a probe */
469 470 471
	if (!event) {
		/* Make a new event name */
		if (symbol)
472
			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
473 474
				 is_return ? 'r' : 'p', symbol, offset);
		else
475
			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
476
				 is_return ? 'r' : 'p', addr);
477 478
		event = buf;
	}
479 480
	tp = alloc_trace_probe(group, event, addr, symbol, offset, argc,
			       is_return);
481 482 483
	if (IS_ERR(tp)) {
		pr_info("Failed to allocate trace_probe.(%d)\n",
			(int)PTR_ERR(tp));
484
		return PTR_ERR(tp);
485
	}
486 487

	/* parse arguments */
488 489
	ret = 0;
	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
490 491 492
		/* Increment count for freeing args in error case */
		tp->nr_args++;

493 494
		/* Parse argument name */
		arg = strchr(argv[i], '=');
495
		if (arg) {
496
			*arg++ = '\0';
497 498
			tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
		} else {
499
			arg = argv[i];
500 501 502 503
			/* If argument name is omitted, set "argN" */
			snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
			tp->args[i].name = kstrdup(buf, GFP_KERNEL);
		}
504

505
		if (!tp->args[i].name) {
506
			pr_info("Failed to allocate argument[%d] name.\n", i);
507
			ret = -ENOMEM;
508 509
			goto error;
		}
510 511 512 513 514 515 516

		if (!is_good_name(tp->args[i].name)) {
			pr_info("Invalid argument[%d] name: %s\n",
				i, tp->args[i].name);
			ret = -EINVAL;
			goto error;
		}
517

518 519
		if (traceprobe_conflict_field_name(tp->args[i].name,
							tp->args, i)) {
520
			pr_info("Argument[%d] name '%s' conflicts with "
521 522 523 524
				"another field.\n", i, argv[i]);
			ret = -EINVAL;
			goto error;
		}
525 526

		/* Parse fetch argument */
527
		ret = traceprobe_parse_probe_arg(arg, &tp->size, &tp->args[i],
528
						is_return, true);
529
		if (ret) {
530
			pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
531
			goto error;
532
		}
533 534 535 536 537 538 539 540 541 542 543 544
	}

	ret = register_trace_probe(tp);
	if (ret)
		goto error;
	return 0;

error:
	free_trace_probe(tp);
	return ret;
}

545
static int release_all_trace_probes(void)
546 547
{
	struct trace_probe *tp;
548
	int ret = 0;
549 550

	mutex_lock(&probe_lock);
551 552 553 554 555 556
	/* Ensure no probe is in use. */
	list_for_each_entry(tp, &probe_list, list)
		if (trace_probe_is_enabled(tp)) {
			ret = -EBUSY;
			goto end;
		}
557 558 559 560 561 562
	/* TODO: Use batch unregistration */
	while (!list_empty(&probe_list)) {
		tp = list_entry(probe_list.next, struct trace_probe, list);
		unregister_trace_probe(tp);
		free_trace_probe(tp);
	}
563 564

end:
565
	mutex_unlock(&probe_lock);
566 567

	return ret;
568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
}

/* Probes listing interfaces */
static void *probes_seq_start(struct seq_file *m, loff_t *pos)
{
	mutex_lock(&probe_lock);
	return seq_list_start(&probe_list, *pos);
}

static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
{
	return seq_list_next(v, &probe_list, pos);
}

static void probes_seq_stop(struct seq_file *m, void *v)
{
	mutex_unlock(&probe_lock);
}

static int probes_seq_show(struct seq_file *m, void *v)
{
	struct trace_probe *tp = v;
590
	int i;
591

592
	seq_printf(m, "%c", trace_probe_is_return(tp) ? 'r' : 'p');
593
	seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name);
594

595 596 597
	if (!tp->symbol)
		seq_printf(m, " 0x%p", tp->rp.kp.addr);
	else if (tp->rp.kp.offset)
598 599
		seq_printf(m, " %s+%u", trace_probe_symbol(tp),
			   tp->rp.kp.offset);
600
	else
601
		seq_printf(m, " %s", trace_probe_symbol(tp));
602

603 604
	for (i = 0; i < tp->nr_args; i++)
		seq_printf(m, " %s=%s", tp->args[i].name, tp->args[i].comm);
605
	seq_printf(m, "\n");
606

607 608 609 610 611 612 613 614 615 616 617 618
	return 0;
}

static const struct seq_operations probes_seq_op = {
	.start  = probes_seq_start,
	.next   = probes_seq_next,
	.stop   = probes_seq_stop,
	.show   = probes_seq_show
};

static int probes_open(struct inode *inode, struct file *file)
{
619 620 621 622 623 624 625
	int ret;

	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
		ret = release_all_trace_probes();
		if (ret < 0)
			return ret;
	}
626 627 628 629 630 631 632

	return seq_open(file, &probes_seq_op);
}

static ssize_t probes_write(struct file *file, const char __user *buffer,
			    size_t count, loff_t *ppos)
{
633 634
	return traceprobe_probes_write(file, buffer, count, ppos,
			create_trace_probe);
635 636 637 638 639 640 641 642 643 644 645
}

static const struct file_operations kprobe_events_ops = {
	.owner          = THIS_MODULE,
	.open           = probes_open,
	.read           = seq_read,
	.llseek         = seq_lseek,
	.release        = seq_release,
	.write		= probes_write,
};

646 647 648 649 650 651
/* Probes profiling interfaces */
static int probes_profile_seq_show(struct seq_file *m, void *v)
{
	struct trace_probe *tp = v;

	seq_printf(m, "  %-44s %15lu %15lu\n", tp->call.name, tp->nhit,
652
		   tp->rp.kp.nmissed);
653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676

	return 0;
}

static const struct seq_operations profile_seq_op = {
	.start  = probes_seq_start,
	.next   = probes_seq_next,
	.stop   = probes_seq_stop,
	.show   = probes_profile_seq_show
};

static int profile_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &profile_seq_op);
}

static const struct file_operations kprobe_profile_ops = {
	.owner          = THIS_MODULE,
	.open           = profile_open,
	.read           = seq_read,
	.llseek         = seq_lseek,
	.release        = seq_release,
};

677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
/* Sum up total data length for dynamic arraies (strings) */
static __kprobes int __get_data_size(struct trace_probe *tp,
				     struct pt_regs *regs)
{
	int i, ret = 0;
	u32 len;

	for (i = 0; i < tp->nr_args; i++)
		if (unlikely(tp->args[i].fetch_size.fn)) {
			call_fetch(&tp->args[i].fetch_size, regs, &len);
			ret += len;
		}

	return ret;
}

/* Store the value of each argument */
static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp,
				       struct pt_regs *regs,
				       u8 *data, int maxlen)
{
	int i;
	u32 end = tp->size;
	u32 *dl;	/* Data (relative) location */

	for (i = 0; i < tp->nr_args; i++) {
		if (unlikely(tp->args[i].fetch_size.fn)) {
			/*
			 * First, we set the relative location and
			 * maximum data length to *dl
			 */
			dl = (u32 *)(data + tp->args[i].offset);
			*dl = make_data_rloc(maxlen, end - tp->args[i].offset);
			/* Then try to fetch string or dynamic array data */
			call_fetch(&tp->args[i].fetch, regs, dl);
			/* Reduce maximum length */
			end += get_rloc_len(*dl);
			maxlen -= get_rloc_len(*dl);
			/* Trick here, convert data_rloc to data_loc */
			*dl = convert_rloc_to_loc(*dl,
				 ent_size + tp->args[i].offset);
		} else
			/* Just fetching data normally */
			call_fetch(&tp->args[i].fetch, regs,
				   data + tp->args[i].offset);
	}
}

725
/* Kprobe handler */
726
static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
727
{
728
	struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
729
	struct kprobe_trace_entry_head *entry;
730
	struct ring_buffer_event *event;
731
	struct ring_buffer *buffer;
732
	int size, dsize, pc;
733
	unsigned long irq_flags;
734
	struct ftrace_event_call *call = &tp->call;
735

736 737
	tp->nhit++;

738 739 740
	local_save_flags(irq_flags);
	pc = preempt_count();

741 742
	dsize = __get_data_size(tp, regs);
	size = sizeof(*entry) + tp->size + dsize;
743

744 745
	event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
						  size, irq_flags, pc);
746
	if (!event)
747
		return;
748 749 750

	entry = ring_buffer_event_data(event);
	entry->ip = (unsigned long)kp->addr;
751
	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
752

753
	if (!filter_current_check_discard(buffer, call, entry, event))
754 755
		trace_nowake_buffer_unlock_commit_regs(buffer, event,
						       irq_flags, pc, regs);
756 757 758
}

/* Kretprobe handler */
759
static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
760 761 762
					  struct pt_regs *regs)
{
	struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
763
	struct kretprobe_trace_entry_head *entry;
764
	struct ring_buffer_event *event;
765
	struct ring_buffer *buffer;
766
	int size, pc, dsize;
767
	unsigned long irq_flags;
768
	struct ftrace_event_call *call = &tp->call;
769 770 771 772

	local_save_flags(irq_flags);
	pc = preempt_count();

773 774
	dsize = __get_data_size(tp, regs);
	size = sizeof(*entry) + tp->size + dsize;
775

776 777
	event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
						  size, irq_flags, pc);
778
	if (!event)
779
		return;
780 781

	entry = ring_buffer_event_data(event);
782
	entry->func = (unsigned long)tp->rp.kp.addr;
783
	entry->ret_ip = (unsigned long)ri->ret_addr;
784
	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
785

786
	if (!filter_current_check_discard(buffer, call, entry, event))
787 788
		trace_nowake_buffer_unlock_commit_regs(buffer, event,
						       irq_flags, pc, regs);
789 790 791 792
}

/* Event entry printers */
enum print_line_t
793 794
print_kprobe_event(struct trace_iterator *iter, int flags,
		   struct trace_event *event)
795
{
796
	struct kprobe_trace_entry_head *field;
797
	struct trace_seq *s = &iter->seq;
798
	struct trace_probe *tp;
799
	u8 *data;
800 801
	int i;

802
	field = (struct kprobe_trace_entry_head *)iter->ent;
803
	tp = container_of(event, struct trace_probe, call.event);
804

805 806 807
	if (!trace_seq_printf(s, "%s: (", tp->call.name))
		goto partial;

808 809 810
	if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
		goto partial;

811
	if (!trace_seq_puts(s, ")"))
812 813
		goto partial;

814 815 816
	data = (u8 *)&field[1];
	for (i = 0; i < tp->nr_args; i++)
		if (!tp->args[i].type->print(s, tp->args[i].name,
817
					     data + tp->args[i].offset, field))
818 819 820 821 822 823 824 825 826 827 828
			goto partial;

	if (!trace_seq_puts(s, "\n"))
		goto partial;

	return TRACE_TYPE_HANDLED;
partial:
	return TRACE_TYPE_PARTIAL_LINE;
}

enum print_line_t
829 830
print_kretprobe_event(struct trace_iterator *iter, int flags,
		      struct trace_event *event)
831
{
832
	struct kretprobe_trace_entry_head *field;
833
	struct trace_seq *s = &iter->seq;
834
	struct trace_probe *tp;
835
	u8 *data;
836 837
	int i;

838
	field = (struct kretprobe_trace_entry_head *)iter->ent;
839
	tp = container_of(event, struct trace_probe, call.event);
840

841 842 843
	if (!trace_seq_printf(s, "%s: (", tp->call.name))
		goto partial;

844 845 846 847 848 849 850 851 852
	if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
		goto partial;

	if (!trace_seq_puts(s, " <- "))
		goto partial;

	if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
		goto partial;

853
	if (!trace_seq_puts(s, ")"))
854 855
		goto partial;

856 857 858
	data = (u8 *)&field[1];
	for (i = 0; i < tp->nr_args; i++)
		if (!tp->args[i].type->print(s, tp->args[i].name,
859
					     data + tp->args[i].offset, field))
860 861 862 863 864 865 866 867 868 869 870 871 872 873
			goto partial;

	if (!trace_seq_puts(s, "\n"))
		goto partial;

	return TRACE_TYPE_HANDLED;
partial:
	return TRACE_TYPE_PARTIAL_LINE;
}


static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
{
	int ret, i;
874
	struct kprobe_trace_entry_head field;
875 876
	struct trace_probe *tp = (struct trace_probe *)event_call->data;

877
	DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
878
	/* Set argument names as fields */
879
	for (i = 0; i < tp->nr_args; i++) {
880
		ret = trace_define_field(event_call, tp->args[i].type->fmttype,
881 882 883 884 885 886 887 888
					 tp->args[i].name,
					 sizeof(field) + tp->args[i].offset,
					 tp->args[i].type->size,
					 tp->args[i].type->is_signed,
					 FILTER_OTHER);
		if (ret)
			return ret;
	}
889 890 891 892 893 894
	return 0;
}

static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
{
	int ret, i;
895
	struct kretprobe_trace_entry_head field;
896 897
	struct trace_probe *tp = (struct trace_probe *)event_call->data;

898 899
	DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
	DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
900
	/* Set argument names as fields */
901
	for (i = 0; i < tp->nr_args; i++) {
902
		ret = trace_define_field(event_call, tp->args[i].type->fmttype,
903 904 905 906 907 908 909 910
					 tp->args[i].name,
					 sizeof(field) + tp->args[i].offset,
					 tp->args[i].type->size,
					 tp->args[i].type->is_signed,
					 FILTER_OTHER);
		if (ret)
			return ret;
	}
911 912 913
	return 0;
}

914 915 916 917 918 919 920
static int __set_print_fmt(struct trace_probe *tp, char *buf, int len)
{
	int i;
	int pos = 0;

	const char *fmt, *arg;

921
	if (!trace_probe_is_return(tp)) {
922 923 924 925 926 927 928 929 930 931 932 933 934
		fmt = "(%lx)";
		arg = "REC->" FIELD_STRING_IP;
	} else {
		fmt = "(%lx <- %lx)";
		arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
	}

	/* When len=0, we just calculate the needed length */
#define LEN_OR_ZERO (len ? len - pos : 0)

	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);

	for (i = 0; i < tp->nr_args; i++) {
935 936
		pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s",
				tp->args[i].name, tp->args[i].type->fmt);
937 938 939 940 941
	}

	pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);

	for (i = 0; i < tp->nr_args; i++) {
942 943 944 945 946 947 948
		if (strcmp(tp->args[i].type->name, "string") == 0)
			pos += snprintf(buf + pos, LEN_OR_ZERO,
					", __get_str(%s)",
					tp->args[i].name);
		else
			pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s",
					tp->args[i].name);
949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974
	}

#undef LEN_OR_ZERO

	/* return the length of print_fmt */
	return pos;
}

static int set_print_fmt(struct trace_probe *tp)
{
	int len;
	char *print_fmt;

	/* First: called with 0 length to calculate the needed length */
	len = __set_print_fmt(tp, NULL, 0);
	print_fmt = kmalloc(len + 1, GFP_KERNEL);
	if (!print_fmt)
		return -ENOMEM;

	/* Second: actually write the @print_fmt */
	__set_print_fmt(tp, print_fmt, len + 1);
	tp->call.print_fmt = print_fmt;

	return 0;
}

975
#ifdef CONFIG_PERF_EVENTS
976 977

/* Kprobe profile handler */
978
static __kprobes void kprobe_perf_func(struct kprobe *kp,
979 980 981 982
					 struct pt_regs *regs)
{
	struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
	struct ftrace_event_call *call = &tp->call;
983
	struct kprobe_trace_entry_head *entry;
984
	struct hlist_head *head;
985
	int size, __size, dsize;
986
	int rctx;
987

988 989
	dsize = __get_data_size(tp, regs);
	__size = sizeof(*entry) + tp->size + dsize;
990 991
	size = ALIGN(__size + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);
992
	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
993
		     "profile buffer not large enough"))
994
		return;
995

S
Steven Rostedt 已提交
996
	entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
997
	if (!entry)
998
		return;
999 1000

	entry->ip = (unsigned long)kp->addr;
1001 1002
	memset(&entry[1], 0, dsize);
	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
1003

1004
	head = this_cpu_ptr(call->perf_events);
1005 1006
	perf_trace_buf_submit(entry, size, rctx,
					entry->ip, 1, regs, head, NULL);
1007 1008 1009
}

/* Kretprobe profile handler */
1010
static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
1011 1012 1013 1014
					    struct pt_regs *regs)
{
	struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
	struct ftrace_event_call *call = &tp->call;
1015
	struct kretprobe_trace_entry_head *entry;
1016
	struct hlist_head *head;
1017
	int size, __size, dsize;
1018
	int rctx;
1019

1020 1021
	dsize = __get_data_size(tp, regs);
	__size = sizeof(*entry) + tp->size + dsize;
1022 1023
	size = ALIGN(__size + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);
1024
	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1025
		     "profile buffer not large enough"))
1026
		return;
1027

S
Steven Rostedt 已提交
1028
	entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1029
	if (!entry)
1030
		return;
1031

1032 1033
	entry->func = (unsigned long)tp->rp.kp.addr;
	entry->ret_ip = (unsigned long)ri->ret_addr;
1034
	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
1035

1036
	head = this_cpu_ptr(call->perf_events);
1037 1038
	perf_trace_buf_submit(entry, size, rctx,
					entry->ret_ip, 1, regs, head, NULL);
1039
}
1040
#endif	/* CONFIG_PERF_EVENTS */
1041

1042
static __kprobes
1043 1044
int kprobe_register(struct ftrace_event_call *event,
		    enum trace_reg type, void *data)
1045
{
1046 1047
	struct trace_probe *tp = (struct trace_probe *)event->data;

1048 1049
	switch (type) {
	case TRACE_REG_REGISTER:
1050
		return enable_trace_probe(tp, TP_FLAG_TRACE);
1051
	case TRACE_REG_UNREGISTER:
1052
		disable_trace_probe(tp, TP_FLAG_TRACE);
1053 1054 1055 1056
		return 0;

#ifdef CONFIG_PERF_EVENTS
	case TRACE_REG_PERF_REGISTER:
1057
		return enable_trace_probe(tp, TP_FLAG_PROFILE);
1058
	case TRACE_REG_PERF_UNREGISTER:
1059
		disable_trace_probe(tp, TP_FLAG_PROFILE);
1060
		return 0;
1061 1062
	case TRACE_REG_PERF_OPEN:
	case TRACE_REG_PERF_CLOSE:
1063 1064
	case TRACE_REG_PERF_ADD:
	case TRACE_REG_PERF_DEL:
1065
		return 0;
1066 1067 1068 1069
#endif
	}
	return 0;
}
1070 1071 1072 1073 1074

static __kprobes
int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
{
	struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1075

1076 1077
	if (tp->flags & TP_FLAG_TRACE)
		kprobe_trace_func(kp, regs);
1078
#ifdef CONFIG_PERF_EVENTS
1079
	if (tp->flags & TP_FLAG_PROFILE)
1080
		kprobe_perf_func(kp, regs);
1081
#endif
1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
	return 0;	/* We don't tweek kernel, so just return 0 */
}

static __kprobes
int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
{
	struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);

	if (tp->flags & TP_FLAG_TRACE)
		kretprobe_trace_func(ri, regs);
1092
#ifdef CONFIG_PERF_EVENTS
1093
	if (tp->flags & TP_FLAG_PROFILE)
1094
		kretprobe_perf_func(ri, regs);
1095
#endif
1096 1097
	return 0;	/* We don't tweek kernel, so just return 0 */
}
1098

1099 1100 1101 1102 1103 1104 1105 1106
static struct trace_event_functions kretprobe_funcs = {
	.trace		= print_kretprobe_event
};

static struct trace_event_functions kprobe_funcs = {
	.trace		= print_kprobe_event
};

1107 1108 1109 1110 1111 1112
static int register_probe_event(struct trace_probe *tp)
{
	struct ftrace_event_call *call = &tp->call;
	int ret;

	/* Initialize ftrace_event_call */
1113
	INIT_LIST_HEAD(&call->class->fields);
1114
	if (trace_probe_is_return(tp)) {
1115
		call->event.funcs = &kretprobe_funcs;
1116
		call->class->define_fields = kretprobe_event_define_fields;
1117
	} else {
1118
		call->event.funcs = &kprobe_funcs;
1119
		call->class->define_fields = kprobe_event_define_fields;
1120
	}
1121 1122
	if (set_print_fmt(tp) < 0)
		return -ENOMEM;
1123 1124
	ret = register_ftrace_event(&call->event);
	if (!ret) {
1125
		kfree(call->print_fmt);
1126
		return -ENODEV;
1127
	}
1128
	call->flags = 0;
1129
	call->class->reg = kprobe_register;
1130 1131
	call->data = tp;
	ret = trace_add_event_call(call);
1132
	if (ret) {
1133
		pr_info("Failed to register kprobe event: %s\n", call->name);
1134
		kfree(call->print_fmt);
1135
		unregister_ftrace_event(&call->event);
1136
	}
1137 1138 1139 1140 1141
	return ret;
}

static void unregister_probe_event(struct trace_probe *tp)
{
1142
	/* tp->event is unregistered in trace_remove_event_call() */
1143
	trace_remove_event_call(&tp->call);
1144
	kfree(tp->call.print_fmt);
1145 1146
}

L
Lucas De Marchi 已提交
1147
/* Make a debugfs interface for controlling probe points */
1148 1149 1150 1151 1152
static __init int init_kprobe_trace(void)
{
	struct dentry *d_tracer;
	struct dentry *entry;

1153 1154 1155
	if (register_module_notifier(&trace_probe_module_nb))
		return -EINVAL;

1156 1157 1158 1159 1160 1161 1162
	d_tracer = tracing_init_dentry();
	if (!d_tracer)
		return 0;

	entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
				    NULL, &kprobe_events_ops);

1163
	/* Event list interface */
1164 1165 1166
	if (!entry)
		pr_warning("Could not create debugfs "
			   "'kprobe_events' entry\n");
1167 1168 1169 1170 1171 1172 1173 1174

	/* Profile interface */
	entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
				    NULL, &kprobe_profile_ops);

	if (!entry)
		pr_warning("Could not create debugfs "
			   "'kprobe_profile' entry\n");
1175 1176 1177 1178 1179 1180 1181
	return 0;
}
fs_initcall(init_kprobe_trace);


#ifdef CONFIG_FTRACE_STARTUP_TEST

1182 1183 1184 1185 1186 1187
/*
 * The "__used" keeps gcc from removing the function symbol
 * from the kallsyms table.
 */
static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
					       int a4, int a5, int a6)
1188 1189 1190 1191 1192 1193
{
	return a1 + a2 + a3 + a4 + a5 + a6;
}

static __init int kprobe_trace_self_tests_init(void)
{
1194
	int ret, warn = 0;
1195
	int (*target)(int, int, int, int, int, int);
1196
	struct trace_probe *tp;
1197 1198 1199 1200 1201

	target = kprobe_trace_selftest_target;

	pr_info("Testing kprobe tracing: ");

1202 1203 1204
	ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
				  "$stack $stack0 +0($stack)",
				  create_trace_probe);
1205 1206 1207 1208 1209
	if (WARN_ON_ONCE(ret)) {
		pr_warning("error on probing function entry.\n");
		warn++;
	} else {
		/* Enable trace point */
1210
		tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
1211 1212 1213 1214
		if (WARN_ON_ONCE(tp == NULL)) {
			pr_warning("error on getting new probe.\n");
			warn++;
		} else
1215
			enable_trace_probe(tp, TP_FLAG_TRACE);
1216
	}
1217

1218 1219
	ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
				  "$retval", create_trace_probe);
1220 1221 1222 1223 1224
	if (WARN_ON_ONCE(ret)) {
		pr_warning("error on probing function return.\n");
		warn++;
	} else {
		/* Enable trace point */
1225
		tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
1226 1227 1228 1229
		if (WARN_ON_ONCE(tp == NULL)) {
			pr_warning("error on getting new probe.\n");
			warn++;
		} else
1230
			enable_trace_probe(tp, TP_FLAG_TRACE);
1231 1232 1233 1234
	}

	if (warn)
		goto end;
1235 1236 1237

	ret = target(1, 2, 3, 4, 5, 6);

1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
	/* Disable trace points before removing it */
	tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
	if (WARN_ON_ONCE(tp == NULL)) {
		pr_warning("error on getting test probe.\n");
		warn++;
	} else
		disable_trace_probe(tp, TP_FLAG_TRACE);

	tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
	if (WARN_ON_ONCE(tp == NULL)) {
		pr_warning("error on getting 2nd test probe.\n");
		warn++;
	} else
		disable_trace_probe(tp, TP_FLAG_TRACE);

1253
	ret = traceprobe_command("-:testprobe", create_trace_probe);
1254 1255 1256 1257 1258
	if (WARN_ON_ONCE(ret)) {
		pr_warning("error on deleting a probe.\n");
		warn++;
	}

1259
	ret = traceprobe_command("-:testprobe2", create_trace_probe);
1260 1261 1262 1263
	if (WARN_ON_ONCE(ret)) {
		pr_warning("error on deleting a probe.\n");
		warn++;
	}
1264

1265
end:
1266
	release_all_trace_probes();
1267 1268 1269 1270
	if (warn)
		pr_cont("NG: Some tests are failed. Please check them.\n");
	else
		pr_cont("OK\n");
1271 1272 1273 1274 1275 1276
	return 0;
}

late_initcall(kprobe_trace_self_tests_init);

#endif