trace_events.c 25.5 KB
Newer Older
1 2 3 4 5
/*
 * event tracer
 *
 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
 *
6 7 8
 *  - Added format output of fields of the trace point.
 *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
 *
9 10
 */

11 12 13
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
14 15 16 17
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/ctype.h>
18
#include <linux/delay.h>
19

20
#include "trace_output.h"
21

22 23
#define TRACE_SYSTEM "TRACE_SYSTEM"

24 25
static DEFINE_MUTEX(event_mutex);

26 27
LIST_HEAD(ftrace_events);

28 29 30 31 32
int trace_define_field(struct ftrace_event_call *call, char *type,
		       char *name, int offset, int size)
{
	struct ftrace_event_field *field;

33
	field = kzalloc(sizeof(*field), GFP_KERNEL);
34 35
	if (!field)
		goto err;
36

37 38 39
	field->name = kstrdup(name, GFP_KERNEL);
	if (!field->name)
		goto err;
40

41 42 43
	field->type = kstrdup(type, GFP_KERNEL);
	if (!field->type)
		goto err;
44

45 46 47 48 49
	field->offset = offset;
	field->size = size;
	list_add(&field->link, &call->fields);

	return 0;
50

51 52 53 54 55 56
err:
	if (field) {
		kfree(field->name);
		kfree(field->type);
	}
	kfree(field);
57

58 59
	return -ENOMEM;
}
60
EXPORT_SYMBOL_GPL(trace_define_field);
61

62 63
static void ftrace_clear_events(void)
{
64
	struct ftrace_event_call *call;
65

66
	list_for_each_entry(call, &ftrace_events, list) {
67 68 69 70 71 72 73 74

		if (call->enabled) {
			call->enabled = 0;
			call->unregfunc();
		}
	}
}

75 76 77 78 79 80 81 82 83 84 85 86
static void ftrace_event_enable_disable(struct ftrace_event_call *call,
					int enable)
{

	switch (enable) {
	case 0:
		if (call->enabled) {
			call->enabled = 0;
			call->unregfunc();
		}
		break;
	case 1:
87
		if (!call->enabled) {
88 89 90 91 92 93 94
			call->enabled = 1;
			call->regfunc();
		}
		break;
	}
}

95 96
static int ftrace_set_clr_event(char *buf, int set)
{
97
	struct ftrace_event_call *call;
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
	char *event = NULL, *sub = NULL, *match;
	int ret = -EINVAL;

	/*
	 * The buf format can be <subsystem>:<event-name>
	 *  *:<event-name> means any event by that name.
	 *  :<event-name> is the same.
	 *
	 *  <subsystem>:* means all events in that subsystem
	 *  <subsystem>: means the same.
	 *
	 *  <name> (no ':') means all events in a subsystem with
	 *  the name <name> or any event that matches <name>
	 */

	match = strsep(&buf, ":");
	if (buf) {
		sub = match;
		event = buf;
		match = NULL;

		if (!strlen(sub) || strcmp(sub, "*") == 0)
			sub = NULL;
		if (!strlen(event) || strcmp(event, "*") == 0)
			event = NULL;
	}
124

125
	mutex_lock(&event_mutex);
126
	list_for_each_entry(call, &ftrace_events, list) {
127

128
		if (!call->name || !call->regfunc)
129 130
			continue;

131 132 133 134 135 136 137 138 139
		if (match &&
		    strcmp(match, call->name) != 0 &&
		    strcmp(match, call->system) != 0)
			continue;

		if (sub && strcmp(sub, call->system) != 0)
			continue;

		if (event && strcmp(event, call->name) != 0)
140 141
			continue;

142 143
		ftrace_event_enable_disable(call, set);

144
		ret = 0;
145
	}
146 147
	mutex_unlock(&event_mutex);

148
	return ret;
149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
}

/* 128 should be much more than enough */
#define EVENT_BUF_SIZE		127

static ssize_t
ftrace_event_write(struct file *file, const char __user *ubuf,
		   size_t cnt, loff_t *ppos)
{
	size_t read = 0;
	int i, set = 1;
	ssize_t ret;
	char *buf;
	char ch;

	if (!cnt || cnt < 0)
		return 0;

167 168 169 170
	ret = tracing_update_buffers();
	if (ret < 0)
		return ret;

171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
	ret = get_user(ch, ubuf++);
	if (ret)
		return ret;
	read++;
	cnt--;

	/* skip white space */
	while (cnt && isspace(ch)) {
		ret = get_user(ch, ubuf++);
		if (ret)
			return ret;
		read++;
		cnt--;
	}

	/* Only white space found? */
	if (isspace(ch)) {
		file->f_pos += read;
		ret = read;
		return ret;
	}

	buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

	if (cnt > EVENT_BUF_SIZE)
		cnt = EVENT_BUF_SIZE;

	i = 0;
	while (cnt && !isspace(ch)) {
		if (!i && ch == '!')
			set = 0;
		else
			buf[i++] = ch;

		ret = get_user(ch, ubuf++);
		if (ret)
			goto out_free;
		read++;
		cnt--;
	}
	buf[i] = 0;

	file->f_pos += read;

	ret = ftrace_set_clr_event(buf, set);
	if (ret)
		goto out_free;

	ret = read;

 out_free:
	kfree(buf);

	return ret;
}

static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
232 233
	struct list_head *list = m->private;
	struct ftrace_event_call *call;
234 235 236

	(*pos)++;

237
	for (;;) {
238
		if (list == &ftrace_events)
239 240
			return NULL;

241 242
		call = list_entry(list, struct ftrace_event_call, list);

243 244 245 246 247 248 249
		/*
		 * The ftrace subsystem is for showing formats only.
		 * They can not be enabled or disabled via the event files.
		 */
		if (call->regfunc)
			break;

250
		list = list->next;
251
	}
252

253
	m->private = list->next;
254 255 256 257 258 259 260 261 262 263 264 265

	return call;
}

static void *t_start(struct seq_file *m, loff_t *pos)
{
	return t_next(m, NULL, pos);
}

static void *
s_next(struct seq_file *m, void *v, loff_t *pos)
{
266 267
	struct list_head *list = m->private;
	struct ftrace_event_call *call;
268 269 270 271

	(*pos)++;

 retry:
272
	if (list == &ftrace_events)
273 274
		return NULL;

275 276
	call = list_entry(list, struct ftrace_event_call, list);

277
	if (!call->enabled) {
278
		list = list->next;
279 280 281
		goto retry;
	}

282
	m->private = list->next;
283 284 285 286 287 288 289 290 291 292 293 294 295

	return call;
}

static void *s_start(struct seq_file *m, loff_t *pos)
{
	return s_next(m, NULL, pos);
}

static int t_show(struct seq_file *m, void *v)
{
	struct ftrace_event_call *call = v;

296 297
	if (strcmp(call->system, TRACE_SYSTEM) != 0)
		seq_printf(m, "%s:", call->system);
298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
	seq_printf(m, "%s\n", call->name);

	return 0;
}

static void t_stop(struct seq_file *m, void *p)
{
}

static int
ftrace_event_seq_open(struct inode *inode, struct file *file)
{
	int ret;
	const struct seq_operations *seq_ops;

	if ((file->f_mode & FMODE_WRITE) &&
	    !(file->f_flags & O_APPEND))
		ftrace_clear_events();

	seq_ops = inode->i_private;
	ret = seq_open(file, seq_ops);
	if (!ret) {
		struct seq_file *m = file->private_data;

322
		m->private = ftrace_events.next;
323 324 325 326
	}
	return ret;
}

327 328 329 330 331 332 333
static ssize_t
event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
		  loff_t *ppos)
{
	struct ftrace_event_call *call = filp->private_data;
	char *buf;

334
	if (call->enabled)
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
		buf = "1\n";
	else
		buf = "0\n";

	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
}

static ssize_t
event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
		   loff_t *ppos)
{
	struct ftrace_event_call *call = filp->private_data;
	char buf[64];
	unsigned long val;
	int ret;

	if (cnt >= sizeof(buf))
		return -EINVAL;

	if (copy_from_user(&buf, ubuf, cnt))
		return -EFAULT;

	buf[cnt] = 0;

	ret = strict_strtoul(buf, 10, &val);
	if (ret < 0)
		return ret;

363 364 365 366
	ret = tracing_update_buffers();
	if (ret < 0)
		return ret;

367 368 369
	switch (val) {
	case 0:
	case 1:
370
		mutex_lock(&event_mutex);
371
		ftrace_event_enable_disable(call, val);
372
		mutex_unlock(&event_mutex);
373 374 375 376 377 378 379 380 381 382 383
		break;

	default:
		return -EINVAL;
	}

	*ppos += cnt;

	return cnt;
}

384 385
extern char *__bad_type_size(void);

386
#undef FIELD
387
#define FIELD(type, name)						\
388
	sizeof(type) != sizeof(field.name) ? __bad_type_size() :	\
389 390
	#type, "common_" #name, offsetof(typeof(field), name),		\
		sizeof(field.name)
391 392 393 394 395 396 397

static int trace_write_header(struct trace_seq *s)
{
	struct trace_entry field;

	/* struct trace_entry */
	return trace_seq_printf(s,
398 399 400 401 402
				"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
				"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
				"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
				"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
				"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
403
				"\n",
404
				FIELD(unsigned short, type),
405 406 407 408 409
				FIELD(unsigned char, flags),
				FIELD(unsigned char, preempt_count),
				FIELD(int, pid),
				FIELD(int, tgid));
}
410

411 412 413 414 415 416 417 418 419
static ssize_t
event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
		  loff_t *ppos)
{
	struct ftrace_event_call *call = filp->private_data;
	struct trace_seq *s;
	char *buf;
	int r;

420 421 422
	if (*ppos)
		return 0;

423 424 425 426 427 428
	s = kmalloc(sizeof(*s), GFP_KERNEL);
	if (!s)
		return -ENOMEM;

	trace_seq_init(s);

429 430 431 432 433
	/* If any of the first writes fail, so will the show_format. */

	trace_seq_printf(s, "name: %s\n", call->name);
	trace_seq_printf(s, "ID: %d\n", call->id);
	trace_seq_printf(s, "format:\n");
434 435
	trace_write_header(s);

436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
	r = call->show_format(s);
	if (!r) {
		/*
		 * ug!  The format output is bigger than a PAGE!!
		 */
		buf = "FORMAT TOO BIG\n";
		r = simple_read_from_buffer(ubuf, cnt, ppos,
					      buf, strlen(buf));
		goto out;
	}

	r = simple_read_from_buffer(ubuf, cnt, ppos,
				    s->buffer, s->len);
 out:
	kfree(s);
	return r;
}

454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
static ssize_t
event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
{
	struct ftrace_event_call *call = filp->private_data;
	struct trace_seq *s;
	int r;

	if (*ppos)
		return 0;

	s = kmalloc(sizeof(*s), GFP_KERNEL);
	if (!s)
		return -ENOMEM;

	trace_seq_init(s);
	trace_seq_printf(s, "%d\n", call->id);

	r = simple_read_from_buffer(ubuf, cnt, ppos,
				    s->buffer, s->len);
	kfree(s);
	return r;
}

T
Tom Zanussi 已提交
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493
static ssize_t
event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
		  loff_t *ppos)
{
	struct ftrace_event_call *call = filp->private_data;
	struct trace_seq *s;
	int r;

	if (*ppos)
		return 0;

	s = kmalloc(sizeof(*s), GFP_KERNEL);
	if (!s)
		return -ENOMEM;

	trace_seq_init(s);

494
	filter_print_preds(call, s);
495
	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
T
Tom Zanussi 已提交
496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515

	kfree(s);

	return r;
}

static ssize_t
event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
		   loff_t *ppos)
{
	struct ftrace_event_call *call = filp->private_data;
	char buf[64], *pbuf = buf;
	struct filter_pred *pred;
	int err;

	if (cnt >= sizeof(buf))
		return -EINVAL;

	if (copy_from_user(&buf, ubuf, cnt))
		return -EFAULT;
516
	buf[cnt] = '\0';
T
Tom Zanussi 已提交
517 518 519 520 521 522 523 524 525 526 527 528

	pred = kzalloc(sizeof(*pred), GFP_KERNEL);
	if (!pred)
		return -ENOMEM;

	err = filter_parse(&pbuf, pred);
	if (err < 0) {
		filter_free_pred(pred);
		return err;
	}

	if (pred->clear) {
529
		filter_disable_preds(call);
530
		filter_free_pred(pred);
T
Tom Zanussi 已提交
531 532 533
		return cnt;
	}

534 535
	err = filter_add_pred(call, pred);
	if (err < 0) {
T
Tom Zanussi 已提交
536
		filter_free_pred(pred);
537
		return err;
T
Tom Zanussi 已提交
538 539
	}

540 541
	filter_free_pred(pred);

T
Tom Zanussi 已提交
542 543 544 545 546
	*ppos += cnt;

	return cnt;
}

547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563
static ssize_t
subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
		      loff_t *ppos)
{
	struct event_subsystem *system = filp->private_data;
	struct trace_seq *s;
	int r;

	if (*ppos)
		return 0;

	s = kmalloc(sizeof(*s), GFP_KERNEL);
	if (!s)
		return -ENOMEM;

	trace_seq_init(s);

564
	filter_print_subsystem_preds(system, s);
565
	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585

	kfree(s);

	return r;
}

static ssize_t
subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
		       loff_t *ppos)
{
	struct event_subsystem *system = filp->private_data;
	char buf[64], *pbuf = buf;
	struct filter_pred *pred;
	int err;

	if (cnt >= sizeof(buf))
		return -EINVAL;

	if (copy_from_user(&buf, ubuf, cnt))
		return -EFAULT;
586
	buf[cnt] = '\0';
587 588 589 590 591 592 593 594 595 596 597 598 599

	pred = kzalloc(sizeof(*pred), GFP_KERNEL);
	if (!pred)
		return -ENOMEM;

	err = filter_parse(&pbuf, pred);
	if (err < 0) {
		filter_free_pred(pred);
		return err;
	}

	if (pred->clear) {
		filter_free_subsystem_preds(system);
600
		filter_free_pred(pred);
601 602 603
		return cnt;
	}

604 605
	err = filter_add_subsystem_pred(system, pred);
	if (err < 0) {
606
		filter_free_pred(pred);
607
		return err;
608 609 610 611 612 613 614
	}

	*ppos += cnt;

	return cnt;
}

615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
static ssize_t
show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
{
	int (*func)(struct trace_seq *s) = filp->private_data;
	struct trace_seq *s;
	int r;

	if (*ppos)
		return 0;

	s = kmalloc(sizeof(*s), GFP_KERNEL);
	if (!s)
		return -ENOMEM;

	trace_seq_init(s);

	func(s);
	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);

	kfree(s);

	return r;
}

639 640 641 642 643 644 645 646 647 648 649 650 651 652
static const struct seq_operations show_event_seq_ops = {
	.start = t_start,
	.next = t_next,
	.show = t_show,
	.stop = t_stop,
};

static const struct seq_operations show_set_event_seq_ops = {
	.start = s_start,
	.next = s_next,
	.show = t_show,
	.stop = t_stop,
};

653 654 655 656 657 658 659
static const struct file_operations ftrace_avail_fops = {
	.open = ftrace_event_seq_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = seq_release,
};

660 661 662 663 664 665 666 667
static const struct file_operations ftrace_set_event_fops = {
	.open = ftrace_event_seq_open,
	.read = seq_read,
	.write = ftrace_event_write,
	.llseek = seq_lseek,
	.release = seq_release,
};

668 669 670 671 672 673
static const struct file_operations ftrace_enable_fops = {
	.open = tracing_open_generic,
	.read = event_enable_read,
	.write = event_enable_write,
};

674 675 676 677 678
static const struct file_operations ftrace_event_format_fops = {
	.open = tracing_open_generic,
	.read = event_format_read,
};

679 680 681 682 683
static const struct file_operations ftrace_event_id_fops = {
	.open = tracing_open_generic,
	.read = event_id_read,
};

T
Tom Zanussi 已提交
684 685 686 687 688 689
static const struct file_operations ftrace_event_filter_fops = {
	.open = tracing_open_generic,
	.read = event_filter_read,
	.write = event_filter_write,
};

690 691 692 693 694 695
static const struct file_operations ftrace_subsystem_filter_fops = {
	.open = tracing_open_generic,
	.read = subsystem_filter_read,
	.write = subsystem_filter_write,
};

696 697 698 699 700
static const struct file_operations ftrace_show_header_fops = {
	.open = tracing_open_generic,
	.read = show_header,
};

701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720
static struct dentry *event_trace_events_dir(void)
{
	static struct dentry *d_tracer;
	static struct dentry *d_events;

	if (d_events)
		return d_events;

	d_tracer = tracing_init_dentry();
	if (!d_tracer)
		return NULL;

	d_events = debugfs_create_dir("events", d_tracer);
	if (!d_events)
		pr_warning("Could not create debugfs "
			   "'events' directory\n");

	return d_events;
}

721 722 723 724 725 726
static LIST_HEAD(event_subsystems);

static struct dentry *
event_subsystem_dir(const char *name, struct dentry *d_events)
{
	struct event_subsystem *system;
727
	struct dentry *entry;
728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750

	/* First see if we did not already create this dir */
	list_for_each_entry(system, &event_subsystems, list) {
		if (strcmp(system->name, name) == 0)
			return system->entry;
	}

	/* need to create new entry */
	system = kmalloc(sizeof(*system), GFP_KERNEL);
	if (!system) {
		pr_warning("No memory to create event subsystem %s\n",
			   name);
		return d_events;
	}

	system->entry = debugfs_create_dir(name, d_events);
	if (!system->entry) {
		pr_warning("Could not create event subsystem %s\n",
			   name);
		kfree(system);
		return d_events;
	}

751 752 753 754 755 756 757
	system->name = kstrdup(name, GFP_KERNEL);
	if (!system->name) {
		debugfs_remove(system->entry);
		kfree(system);
		return d_events;
	}

758 759
	list_add(&system->list, &event_subsystems);

760
	system->filter = NULL;
761

762 763 764 765 766 767
	entry = debugfs_create_file("filter", 0644, system->entry, system,
				    &ftrace_subsystem_filter_fops);
	if (!entry)
		pr_warning("Could not create debugfs "
			   "'%s/filter' entry\n", name);

768 769 770
	return system->entry;
}

771
static int
772 773 774 775 776
event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
		 const struct file_operations *id,
		 const struct file_operations *enable,
		 const struct file_operations *filter,
		 const struct file_operations *format)
777 778
{
	struct dentry *entry;
779
	int ret;
780

781 782 783 784
	/*
	 * If the trace point header did not define TRACE_SYSTEM
	 * then the system would be called "TRACE_SYSTEM".
	 */
785
	if (strcmp(call->system, TRACE_SYSTEM) != 0)
786 787
		d_events = event_subsystem_dir(call->system, d_events);

788 789 790 791 792 793 794 795 796
	if (call->raw_init) {
		ret = call->raw_init();
		if (ret < 0) {
			pr_warning("Could not initialize trace point"
				   " events/%s\n", call->name);
			return ret;
		}
	}

797 798 799 800 801 802 803
	call->dir = debugfs_create_dir(call->name, d_events);
	if (!call->dir) {
		pr_warning("Could not create debugfs "
			   "'%s' directory\n", call->name);
		return -1;
	}

804 805
	if (call->regfunc)
		entry = trace_create_file("enable", 0644, call->dir, call,
806
					  enable);
807

808 809
	if (call->id)
		entry = trace_create_file("id", 0444, call->dir, call,
810
					  id);
811

812 813 814 815 816 817 818
	if (call->define_fields) {
		ret = call->define_fields();
		if (ret < 0) {
			pr_warning("Could not initialize trace point"
				   " events/%s\n", call->name);
			return ret;
		}
819
		entry = trace_create_file("filter", 0644, call->dir, call,
820
					  filter);
821 822
	}

823 824 825 826
	/* A trace may not want to export its format */
	if (!call->show_format)
		return 0;

827
	entry = trace_create_file("format", 0444, call->dir, call,
828
				  format);
829 830 831 832 833 834 835 836 837

	return 0;
}

#define for_each_event(event, start, end)			\
	for (event = start;					\
	     (unsigned long)event < (unsigned long)end;		\
	     event++)

838
#ifdef CONFIG_MODULES
839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889

static LIST_HEAD(ftrace_module_file_list);

/*
 * Modules must own their file_operations to keep up with
 * reference counting.
 */
struct ftrace_module_file_ops {
	struct list_head		list;
	struct module			*mod;
	struct file_operations		id;
	struct file_operations		enable;
	struct file_operations		format;
	struct file_operations		filter;
};

static struct ftrace_module_file_ops *
trace_create_file_ops(struct module *mod)
{
	struct ftrace_module_file_ops *file_ops;

	/*
	 * This is a bit of a PITA. To allow for correct reference
	 * counting, modules must "own" their file_operations.
	 * To do this, we allocate the file operations that will be
	 * used in the event directory.
	 */

	file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
	if (!file_ops)
		return NULL;

	file_ops->mod = mod;

	file_ops->id = ftrace_event_id_fops;
	file_ops->id.owner = mod;

	file_ops->enable = ftrace_enable_fops;
	file_ops->enable.owner = mod;

	file_ops->filter = ftrace_event_filter_fops;
	file_ops->filter.owner = mod;

	file_ops->format = ftrace_event_format_fops;
	file_ops->format.owner = mod;

	list_add(&file_ops->list, &ftrace_module_file_list);

	return file_ops;
}

890 891
static void trace_module_add_events(struct module *mod)
{
892
	struct ftrace_module_file_ops *file_ops = NULL;
893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909
	struct ftrace_event_call *call, *start, *end;
	struct dentry *d_events;

	start = mod->trace_events;
	end = mod->trace_events + mod->num_trace_events;

	if (start == end)
		return;

	d_events = event_trace_events_dir();
	if (!d_events)
		return;

	for_each_event(call, start, end) {
		/* The linker may leave blanks */
		if (!call->name)
			continue;
910 911 912 913 914 915 916 917 918 919

		/*
		 * This module has events, create file ops for this module
		 * if not already done.
		 */
		if (!file_ops) {
			file_ops = trace_create_file_ops(mod);
			if (!file_ops)
				return;
		}
920 921
		call->mod = mod;
		list_add(&call->list, &ftrace_events);
922 923 924
		event_create_dir(call, d_events,
				 &file_ops->id, &file_ops->enable,
				 &file_ops->filter, &file_ops->format);
925 926 927 928 929
	}
}

static void trace_module_remove_events(struct module *mod)
{
930
	struct ftrace_module_file_ops *file_ops;
931 932 933 934 935 936 937 938 939 940 941 942 943 944
	struct ftrace_event_call *call, *p;

	list_for_each_entry_safe(call, p, &ftrace_events, list) {
		if (call->mod == mod) {
			if (call->enabled) {
				call->enabled = 0;
				call->unregfunc();
			}
			if (call->event)
				unregister_ftrace_event(call->event);
			debugfs_remove_recursive(call->dir);
			list_del(&call->list);
		}
	}
945 946 947 948 949 950 951 952 953 954

	/* Now free the file_operations */
	list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
		if (file_ops->mod == mod)
			break;
	}
	if (&file_ops->list != &ftrace_module_file_list) {
		list_del(&file_ops->list);
		kfree(file_ops);
	}
955 956
}

957 958
static int trace_module_notify(struct notifier_block *self,
			       unsigned long val, void *data)
959 960 961 962 963 964 965 966 967 968 969 970 971
{
	struct module *mod = data;

	mutex_lock(&event_mutex);
	switch (val) {
	case MODULE_STATE_COMING:
		trace_module_add_events(mod);
		break;
	case MODULE_STATE_GOING:
		trace_module_remove_events(mod);
		break;
	}
	mutex_unlock(&event_mutex);
972

973 974
	return 0;
}
975 976 977 978 979 980 981
#else
static int trace_module_notify(struct notifier_block *self,
			       unsigned long val, void *data)
{
	return 0;
}
#endif /* CONFIG_MODULES */
982

983 984 985 986 987
struct notifier_block trace_module_nb = {
	.notifier_call = trace_module_notify,
	.priority = 0,
};

988 989 990
extern struct ftrace_event_call __start_ftrace_events[];
extern struct ftrace_event_call __stop_ftrace_events[];

991 992
static __init int event_trace_init(void)
{
993
	struct ftrace_event_call *call;
994 995
	struct dentry *d_tracer;
	struct dentry *entry;
996
	struct dentry *d_events;
997
	int ret;
998 999 1000 1001 1002

	d_tracer = tracing_init_dentry();
	if (!d_tracer)
		return 0;

1003 1004 1005 1006 1007 1008 1009
	entry = debugfs_create_file("available_events", 0444, d_tracer,
				    (void *)&show_event_seq_ops,
				    &ftrace_avail_fops);
	if (!entry)
		pr_warning("Could not create debugfs "
			   "'available_events' entry\n");

1010 1011 1012 1013 1014 1015 1016
	entry = debugfs_create_file("set_event", 0644, d_tracer,
				    (void *)&show_set_event_seq_ops,
				    &ftrace_set_event_fops);
	if (!entry)
		pr_warning("Could not create debugfs "
			   "'set_event' entry\n");

1017 1018 1019 1020
	d_events = event_trace_events_dir();
	if (!d_events)
		return 0;

1021 1022 1023 1024 1025 1026 1027 1028 1029
	/* ring buffer internal formats */
	trace_create_file("header_page", 0444, d_events,
			  ring_buffer_print_page_header,
			  &ftrace_show_header_fops);

	trace_create_file("header_event", 0444, d_events,
			  ring_buffer_print_entry_header,
			  &ftrace_show_header_fops);

1030
	for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
1031 1032 1033
		/* The linker may leave blanks */
		if (!call->name)
			continue;
1034
		list_add(&call->list, &ftrace_events);
1035 1036 1037
		event_create_dir(call, d_events, &ftrace_event_id_fops,
				 &ftrace_enable_fops, &ftrace_event_filter_fops,
				 &ftrace_event_format_fops);
1038 1039
	}

1040 1041 1042 1043
	ret = register_module_notifier(&trace_module_nb);
	if (!ret)
		pr_warning("Failed to register trace events module notifier\n");

1044 1045 1046
	return 0;
}
fs_initcall(event_trace_init);
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101

#ifdef CONFIG_FTRACE_STARTUP_TEST

static DEFINE_SPINLOCK(test_spinlock);
static DEFINE_SPINLOCK(test_spinlock_irq);
static DEFINE_MUTEX(test_mutex);

static __init void test_work(struct work_struct *dummy)
{
	spin_lock(&test_spinlock);
	spin_lock_irq(&test_spinlock_irq);
	udelay(1);
	spin_unlock_irq(&test_spinlock_irq);
	spin_unlock(&test_spinlock);

	mutex_lock(&test_mutex);
	msleep(1);
	mutex_unlock(&test_mutex);
}

static __init int event_test_thread(void *unused)
{
	void *test_malloc;

	test_malloc = kmalloc(1234, GFP_KERNEL);
	if (!test_malloc)
		pr_info("failed to kmalloc\n");

	schedule_on_each_cpu(test_work);

	kfree(test_malloc);

	set_current_state(TASK_INTERRUPTIBLE);
	while (!kthread_should_stop())
		schedule();

	return 0;
}

/*
 * Do various things that may trigger events.
 */
static __init void event_test_stuff(void)
{
	struct task_struct *test_thread;

	test_thread = kthread_run(event_test_thread, NULL, "test-events");
	msleep(1);
	kthread_stop(test_thread);
}

/*
 * For every trace event defined, we will test each trace point separately,
 * and then by groups, and finally all trace points.
 */
1102
static __init void event_trace_self_tests(void)
1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
{
	struct ftrace_event_call *call;
	struct event_subsystem *system;
	char *sysname;
	int ret;

	pr_info("Running tests on trace events:\n");

	list_for_each_entry(call, &ftrace_events, list) {

		/* Only test those that have a regfunc */
		if (!call->regfunc)
			continue;

		pr_info("Testing event %s: ", call->name);

		/*
		 * If an event is already enabled, someone is using
		 * it and the self test should not be on.
		 */
		if (call->enabled) {
			pr_warning("Enabled event during self test!\n");
			WARN_ON_ONCE(1);
			continue;
		}

		call->enabled = 1;
		call->regfunc();

		event_test_stuff();

		call->unregfunc();
		call->enabled = 0;

		pr_cont("OK\n");
	}

	/* Now test at the sub system level */

	pr_info("Running tests on trace event systems:\n");

	list_for_each_entry(system, &event_subsystems, list) {

		/* the ftrace system is special, skip it */
		if (strcmp(system->name, "ftrace") == 0)
			continue;

		pr_info("Testing event system %s: ", system->name);

		/* ftrace_set_clr_event can modify the name passed in. */
		sysname = kstrdup(system->name, GFP_KERNEL);
		if (WARN_ON(!sysname)) {
			pr_warning("Can't allocate memory, giving up!\n");
1156
			return;
1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
		}
		ret = ftrace_set_clr_event(sysname, 1);
		kfree(sysname);
		if (WARN_ON_ONCE(ret)) {
			pr_warning("error enabling system %s\n",
				   system->name);
			continue;
		}

		event_test_stuff();

		sysname = kstrdup(system->name, GFP_KERNEL);
		if (WARN_ON(!sysname)) {
			pr_warning("Can't allocate memory, giving up!\n");
1171
			return;
1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
		}
		ret = ftrace_set_clr_event(sysname, 0);
		kfree(sysname);

		if (WARN_ON_ONCE(ret))
			pr_warning("error disabling system %s\n",
				   system->name);

		pr_cont("OK\n");
	}

	/* Test with all events enabled */

	pr_info("Running tests on all trace events:\n");
	pr_info("Testing all events: ");

	sysname = kmalloc(4, GFP_KERNEL);
	if (WARN_ON(!sysname)) {
		pr_warning("Can't allocate memory, giving up!\n");
1191
		return;
1192 1193 1194 1195 1196 1197
	}
	memcpy(sysname, "*:*", 4);
	ret = ftrace_set_clr_event(sysname, 1);
	if (WARN_ON_ONCE(ret)) {
		kfree(sysname);
		pr_warning("error enabling all events\n");
1198
		return;
1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
	}

	event_test_stuff();

	/* reset sysname */
	memcpy(sysname, "*:*", 4);
	ret = ftrace_set_clr_event(sysname, 0);
	kfree(sysname);

	if (WARN_ON_ONCE(ret)) {
		pr_warning("error disabling all events\n");
1210
		return;
1211 1212 1213
	}

	pr_cont("OK\n");
1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
}

#ifdef CONFIG_FUNCTION_TRACER

static DEFINE_PER_CPU(atomic_t, test_event_disable);

static void
function_test_events_call(unsigned long ip, unsigned long parent_ip)
{
	struct ring_buffer_event *event;
	struct ftrace_entry *entry;
	unsigned long flags;
	long disabled;
	int resched;
	int cpu;
	int pc;

	pc = preempt_count();
	resched = ftrace_preempt_disable();
	cpu = raw_smp_processor_id();
	disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu));

	if (disabled != 1)
		goto out;

	local_save_flags(flags);

	event = trace_current_buffer_lock_reserve(TRACE_FN, sizeof(*entry),
						  flags, pc);
	if (!event)
		goto out;
	entry	= ring_buffer_event_data(event);
	entry->ip			= ip;
	entry->parent_ip		= parent_ip;

1249
	trace_nowake_buffer_unlock_commit(event, flags, pc);
1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279

 out:
	atomic_dec(&per_cpu(test_event_disable, cpu));
	ftrace_preempt_enable(resched);
}

static struct ftrace_ops trace_ops __initdata  =
{
	.func = function_test_events_call,
};

static __init void event_trace_self_test_with_function(void)
{
	register_ftrace_function(&trace_ops);
	pr_info("Running tests again, along with the function tracer\n");
	event_trace_self_tests();
	unregister_ftrace_function(&trace_ops);
}
#else
static __init void event_trace_self_test_with_function(void)
{
}
#endif

static __init int event_trace_self_tests_init(void)
{

	event_trace_self_tests();

	event_trace_self_test_with_function();
1280 1281 1282 1283

	return 0;
}

1284
late_initcall(event_trace_self_tests_init);
1285 1286

#endif