trace_events.c 76.3 KB
Newer Older
1 2 3 4 5
/*
 * event tracer
 *
 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
 *
6 7 8
 *  - Added format output of fields of the trace point.
 *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
 *
9 10
 */

11 12
#define pr_fmt(fmt) fmt

13 14 15
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
16
#include <linux/tracefs.h>
17 18 19
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/ctype.h>
20
#include <linux/sort.h>
21
#include <linux/slab.h>
22
#include <linux/delay.h>
23

24 25
#include <trace/events/sched.h>

26 27
#include <asm/setup.h>

28
#include "trace_output.h"
29

30
#undef TRACE_SYSTEM
31 32
#define TRACE_SYSTEM "TRACE_SYSTEM"

33
DEFINE_MUTEX(event_mutex);
34

35
LIST_HEAD(ftrace_events);
36
static LIST_HEAD(ftrace_generic_fields);
37
static LIST_HEAD(ftrace_common_fields);
38

39 40 41 42 43
#define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)

static struct kmem_cache *field_cachep;
static struct kmem_cache *file_cachep;

44 45
static inline int system_refcount(struct event_subsystem *system)
{
46
	return system->ref_count;
47 48 49 50
}

static int system_refcount_inc(struct event_subsystem *system)
{
51
	return system->ref_count++;
52 53 54 55
}

static int system_refcount_dec(struct event_subsystem *system)
{
56
	return --system->ref_count;
57 58
}

59 60 61 62 63 64 65
/* Double loops, do not use break, only goto's work */
#define do_for_each_event_file(tr, file)			\
	list_for_each_entry(tr, &ftrace_trace_arrays, list) {	\
		list_for_each_entry(file, &tr->events, list)

#define do_for_each_event_file_safe(tr, file)			\
	list_for_each_entry(tr, &ftrace_trace_arrays, list) {	\
66
		struct trace_event_file *___n;				\
67 68 69 70 71
		list_for_each_entry_safe(file, ___n, &tr->events, list)

#define while_for_each_event_file()		\
	}

72
static struct list_head *
73
trace_get_fields(struct trace_event_call *event_call)
74 75 76 77 78 79
{
	if (!event_call->class->get_fields)
		return &event_call->class->fields;
	return event_call->class->get_fields(event_call);
}

80 81 82 83 84 85 86 87 88 89 90 91 92 93
static struct ftrace_event_field *
__find_event_field(struct list_head *head, char *name)
{
	struct ftrace_event_field *field;

	list_for_each_entry(field, head, link) {
		if (!strcmp(field->name, name))
			return field;
	}

	return NULL;
}

struct ftrace_event_field *
94
trace_find_event_field(struct trace_event_call *call, char *name)
95 96 97 98
{
	struct ftrace_event_field *field;
	struct list_head *head;

99 100
	head = trace_get_fields(call);
	field = __find_event_field(head, name);
101 102 103
	if (field)
		return field;

104
	field = __find_event_field(&ftrace_generic_fields, name);
105 106 107
	if (field)
		return field;

108
	return __find_event_field(&ftrace_common_fields, name);
109 110
}

111 112 113
static int __trace_define_field(struct list_head *head, const char *type,
				const char *name, int offset, int size,
				int is_signed, int filter_type)
114 115 116
{
	struct ftrace_event_field *field;

117
	field = kmem_cache_alloc(field_cachep, GFP_TRACE);
118
	if (!field)
119
		return -ENOMEM;
120

121 122
	field->name = name;
	field->type = type;
123

124 125 126 127 128
	if (filter_type == FILTER_OTHER)
		field->filter_type = filter_assign_type(type);
	else
		field->filter_type = filter_type;

129 130
	field->offset = offset;
	field->size = size;
131
	field->is_signed = is_signed;
132

133
	list_add(&field->link, head);
134 135 136

	return 0;
}
137

138
int trace_define_field(struct trace_event_call *call, const char *type,
139 140 141 142 143 144 145 146 147 148 149 150
		       const char *name, int offset, int size, int is_signed,
		       int filter_type)
{
	struct list_head *head;

	if (WARN_ON(!call->class))
		return 0;

	head = trace_get_fields(call);
	return __trace_define_field(head, type, name, offset, size,
				    is_signed, filter_type);
}
151
EXPORT_SYMBOL_GPL(trace_define_field);
152

153 154 155 156 157 158 159
#define __generic_field(type, item, filter_type)			\
	ret = __trace_define_field(&ftrace_generic_fields, #type,	\
				   #item, 0, 0, is_signed_type(type),	\
				   filter_type);			\
	if (ret)							\
		return ret;

160
#define __common_field(type, item)					\
161 162 163 164 165
	ret = __trace_define_field(&ftrace_common_fields, #type,	\
				   "common_" #item,			\
				   offsetof(typeof(ent), item),		\
				   sizeof(ent.item),			\
				   is_signed_type(type), FILTER_OTHER);	\
166 167 168
	if (ret)							\
		return ret;

169 170 171 172
static int trace_define_generic_fields(void)
{
	int ret;

173 174 175 176
	__generic_field(int, CPU, FILTER_CPU);
	__generic_field(int, cpu, FILTER_CPU);
	__generic_field(char *, COMM, FILTER_COMM);
	__generic_field(char *, comm, FILTER_COMM);
177 178 179 180

	return ret;
}

181
static int trace_define_common_fields(void)
182 183 184 185 186 187 188 189 190 191 192 193
{
	int ret;
	struct trace_entry ent;

	__common_field(unsigned short, type);
	__common_field(unsigned char, flags);
	__common_field(unsigned char, preempt_count);
	__common_field(int, pid);

	return ret;
}

194
static void trace_destroy_fields(struct trace_event_call *call)
195 196
{
	struct ftrace_event_field *field, *next;
197
	struct list_head *head;
198

199 200
	head = trace_get_fields(call);
	list_for_each_entry_safe(field, next, head, link) {
201
		list_del(&field->link);
202
		kmem_cache_free(field_cachep, field);
203 204 205
	}
}

206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
/*
 * run-time version of trace_event_get_offsets_<call>() that returns the last
 * accessible offset of trace fields excluding __dynamic_array bytes
 */
int trace_event_get_offsets(struct trace_event_call *call)
{
	struct ftrace_event_field *tail;
	struct list_head *head;

	head = trace_get_fields(call);
	/*
	 * head->next points to the last field with the largest offset,
	 * since it was added last by trace_define_field()
	 */
	tail = list_first_entry(head, struct ftrace_event_field, link);
	return tail->offset + tail->size;
}

224
int trace_event_raw_init(struct trace_event_call *call)
225 226 227
{
	int id;

228
	id = register_trace_event(&call->event);
229 230 231 232 233 234 235
	if (!id)
		return -ENODEV;

	return 0;
}
EXPORT_SYMBOL_GPL(trace_event_raw_init);

236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
bool trace_event_ignore_this_pid(struct trace_event_file *trace_file)
{
	struct trace_array *tr = trace_file->tr;
	struct trace_array_cpu *data;
	struct trace_pid_list *pid_list;

	pid_list = rcu_dereference_sched(tr->filtered_pids);
	if (!pid_list)
		return false;

	data = this_cpu_ptr(tr->trace_buffer.data);

	return data->ignore_pid;
}
EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid);

252 253 254
void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
				 struct trace_event_file *trace_file,
				 unsigned long len)
255
{
256
	struct trace_event_call *event_call = trace_file->event_call;
257

258 259 260 261
	if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) &&
	    trace_event_ignore_this_pid(trace_file))
		return NULL;

262 263
	local_save_flags(fbuffer->flags);
	fbuffer->pc = preempt_count();
264 265 266 267 268 269 270 271
	/*
	 * If CONFIG_PREEMPT is enabled, then the tracepoint itself disables
	 * preemption (adding one to the preempt_count). Since we are
	 * interested in the preempt_count at the time the tracepoint was
	 * hit, we need to subtract one to offset the increment.
	 */
	if (IS_ENABLED(CONFIG_PREEMPT))
		fbuffer->pc--;
272
	fbuffer->trace_file = trace_file;
273 274

	fbuffer->event =
275
		trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
276 277 278 279 280 281 282 283
						event_call->event.type, len,
						fbuffer->flags, fbuffer->pc);
	if (!fbuffer->event)
		return NULL;

	fbuffer->entry = ring_buffer_event_data(fbuffer->event);
	return fbuffer->entry;
}
284
EXPORT_SYMBOL_GPL(trace_event_buffer_reserve);
285

286
int trace_event_reg(struct trace_event_call *call,
287
		    enum trace_reg type, void *data)
288
{
289
	struct trace_event_file *file = data;
290

291
	WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
292 293
	switch (type) {
	case TRACE_REG_REGISTER:
294
		return tracepoint_probe_register(call->tp,
295
						 call->class->probe,
296
						 file);
297
	case TRACE_REG_UNREGISTER:
298
		tracepoint_probe_unregister(call->tp,
299
					    call->class->probe,
300
					    file);
301 302 303 304
		return 0;

#ifdef CONFIG_PERF_EVENTS
	case TRACE_REG_PERF_REGISTER:
305
		return tracepoint_probe_register(call->tp,
306 307 308
						 call->class->perf_probe,
						 call);
	case TRACE_REG_PERF_UNREGISTER:
309
		tracepoint_probe_unregister(call->tp,
310 311 312
					    call->class->perf_probe,
					    call);
		return 0;
313 314
	case TRACE_REG_PERF_OPEN:
	case TRACE_REG_PERF_CLOSE:
315 316
	case TRACE_REG_PERF_ADD:
	case TRACE_REG_PERF_DEL:
317
		return 0;
318 319 320 321
#endif
	}
	return 0;
}
322
EXPORT_SYMBOL_GPL(trace_event_reg);
323

324 325
void trace_event_enable_cmd_record(bool enable)
{
326
	struct trace_event_file *file;
327
	struct trace_array *tr;
328 329

	mutex_lock(&event_mutex);
330 331
	do_for_each_event_file(tr, file) {

332
		if (!(file->flags & EVENT_FILE_FL_ENABLED))
333 334 335 336
			continue;

		if (enable) {
			tracing_start_cmdline_record();
337
			set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
338 339
		} else {
			tracing_stop_cmdline_record();
340
			clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
341
		}
342
	} while_for_each_event_file();
343 344 345
	mutex_unlock(&event_mutex);
}

346
static int __ftrace_event_enable_disable(struct trace_event_file *file,
347
					 int enable, int soft_disable)
348
{
349
	struct trace_event_call *call = file->event_call;
350
	struct trace_array *tr = file->tr;
351
	unsigned long file_flags = file->flags;
352
	int ret = 0;
353
	int disable;
354

355 356
	switch (enable) {
	case 0:
357
		/*
358 359
		 * When soft_disable is set and enable is cleared, the sm_ref
		 * reference counter is decremented. If it reaches 0, we want
360 361 362 363 364 365 366 367 368 369 370
		 * to clear the SOFT_DISABLED flag but leave the event in the
		 * state that it was. That is, if the event was enabled and
		 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
		 * is set we do not want the event to be enabled before we
		 * clear the bit.
		 *
		 * When soft_disable is not set but the SOFT_MODE flag is,
		 * we do nothing. Do not disable the tracepoint, otherwise
		 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
		 */
		if (soft_disable) {
371 372
			if (atomic_dec_return(&file->sm_ref) > 0)
				break;
373 374
			disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
			clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
375
		} else
376
			disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
377

378 379 380
		if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) {
			clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
			if (file->flags & EVENT_FILE_FL_RECORDED_CMD) {
381
				tracing_stop_cmdline_record();
382
				clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
383
			}
384
			call->class->reg(call, TRACE_REG_UNREGISTER, file);
385
		}
386
		/* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
387 388
		if (file->flags & EVENT_FILE_FL_SOFT_MODE)
			set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
389
		else
390
			clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
391 392
		break;
	case 1:
393 394 395 396 397 398 399 400 401
		/*
		 * When soft_disable is set and enable is set, we want to
		 * register the tracepoint for the event, but leave the event
		 * as is. That means, if the event was already enabled, we do
		 * nothing (but set SOFT_MODE). If the event is disabled, we
		 * set SOFT_DISABLED before enabling the event tracepoint, so
		 * it still seems to be disabled.
		 */
		if (!soft_disable)
402
			clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
403 404 405
		else {
			if (atomic_inc_return(&file->sm_ref) > 1)
				break;
406
			set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
407
		}
408

409
		if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
410 411 412

			/* Keep the event disabled, when going to SOFT_MODE. */
			if (soft_disable)
413
				set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
414

415
			if (tr->trace_flags & TRACE_ITER_RECORD_CMD) {
416
				tracing_start_cmdline_record();
417
				set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
418
			}
419
			ret = call->class->reg(call, TRACE_REG_REGISTER, file);
420 421 422
			if (ret) {
				tracing_stop_cmdline_record();
				pr_info("event trace: Could not enable event "
423
					"%s\n", trace_event_name(call));
424 425
				break;
			}
426
			set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
427 428 429

			/* WAS_ENABLED gets set but never cleared. */
			call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
430 431 432
		}
		break;
	}
433

434 435 436 437 438 439 440 441 442
	/* Enable or disable use of trace_buffered_event */
	if ((file_flags & EVENT_FILE_FL_SOFT_DISABLED) !=
	    (file->flags & EVENT_FILE_FL_SOFT_DISABLED)) {
		if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
			trace_buffered_event_enable();
		else
			trace_buffered_event_disable();
	}

443
	return ret;
444 445
}

446
int trace_event_enable_disable(struct trace_event_file *file,
447 448 449 450 451
			       int enable, int soft_disable)
{
	return __ftrace_event_enable_disable(file, enable, soft_disable);
}

452
static int ftrace_event_enable_disable(struct trace_event_file *file,
453 454 455 456 457
				       int enable)
{
	return __ftrace_event_enable_disable(file, enable, 0);
}

458
static void ftrace_clear_events(struct trace_array *tr)
459
{
460
	struct trace_event_file *file;
461 462

	mutex_lock(&event_mutex);
463 464
	list_for_each_entry(file, &tr->events, list) {
		ftrace_event_enable_disable(file, 0);
465 466 467 468
	}
	mutex_unlock(&event_mutex);
}

469 470 471 472 473 474 475
static void
event_filter_pid_sched_process_exit(void *data, struct task_struct *task)
{
	struct trace_pid_list *pid_list;
	struct trace_array *tr = data;

	pid_list = rcu_dereference_sched(tr->filtered_pids);
476
	trace_filter_add_remove_task(pid_list, NULL, task);
477 478 479 480 481 482 483 484 485 486 487
}

static void
event_filter_pid_sched_process_fork(void *data,
				    struct task_struct *self,
				    struct task_struct *task)
{
	struct trace_pid_list *pid_list;
	struct trace_array *tr = data;

	pid_list = rcu_dereference_sched(tr->filtered_pids);
488
	trace_filter_add_remove_task(pid_list, self, task);
489 490 491 492 493 494 495 496 497 498 499 500 501 502 503
}

void trace_event_follow_fork(struct trace_array *tr, bool enable)
{
	if (enable) {
		register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork,
						       tr, INT_MIN);
		register_trace_prio_sched_process_exit(event_filter_pid_sched_process_exit,
						       tr, INT_MAX);
	} else {
		unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork,
						    tr);
		unregister_trace_sched_process_exit(event_filter_pid_sched_process_exit,
						    tr);
	}
504 505 506
}

static void
507
event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
508 509 510 511 512 513 514 515
		    struct task_struct *prev, struct task_struct *next)
{
	struct trace_array *tr = data;
	struct trace_pid_list *pid_list;

	pid_list = rcu_dereference_sched(tr->filtered_pids);

	this_cpu_write(tr->trace_buffer.data->ignore_pid,
516 517
		       trace_ignore_this_task(pid_list, prev) &&
		       trace_ignore_this_task(pid_list, next));
518 519 520
}

static void
521
event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
522 523 524 525 526 527 528 529
		    struct task_struct *prev, struct task_struct *next)
{
	struct trace_array *tr = data;
	struct trace_pid_list *pid_list;

	pid_list = rcu_dereference_sched(tr->filtered_pids);

	this_cpu_write(tr->trace_buffer.data->ignore_pid,
530
		       trace_ignore_this_task(pid_list, next));
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
}

static void
event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
{
	struct trace_array *tr = data;
	struct trace_pid_list *pid_list;

	/* Nothing to do if we are already tracing */
	if (!this_cpu_read(tr->trace_buffer.data->ignore_pid))
		return;

	pid_list = rcu_dereference_sched(tr->filtered_pids);

	this_cpu_write(tr->trace_buffer.data->ignore_pid,
546
		       trace_ignore_this_task(pid_list, task));
547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
}

static void
event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
{
	struct trace_array *tr = data;
	struct trace_pid_list *pid_list;

	/* Nothing to do if we are not tracing */
	if (this_cpu_read(tr->trace_buffer.data->ignore_pid))
		return;

	pid_list = rcu_dereference_sched(tr->filtered_pids);

	/* Set tracing if current is enabled */
	this_cpu_write(tr->trace_buffer.data->ignore_pid,
563
		       trace_ignore_this_task(pid_list, current));
564 565
}

566 567 568
static void __ftrace_clear_event_pids(struct trace_array *tr)
{
	struct trace_pid_list *pid_list;
569 570
	struct trace_event_file *file;
	int cpu;
571 572 573 574 575 576

	pid_list = rcu_dereference_protected(tr->filtered_pids,
					     lockdep_is_held(&event_mutex));
	if (!pid_list)
		return;

577 578 579 580 581 582
	unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr);
	unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr);

	unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr);
	unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr);

583 584 585 586 587 588
	unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr);
	unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr);

	unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr);
	unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr);

589 590 591 592 593 594 595
	list_for_each_entry(file, &tr->events, list) {
		clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
	}

	for_each_possible_cpu(cpu)
		per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false;

596 597 598 599 600
	rcu_assign_pointer(tr->filtered_pids, NULL);

	/* Wait till all users are no longer using pid filtering */
	synchronize_sched();

601
	trace_free_pid_list(pid_list);
602 603 604 605 606 607 608 609 610
}

static void ftrace_clear_event_pids(struct trace_array *tr)
{
	mutex_lock(&event_mutex);
	__ftrace_clear_event_pids(tr);
	mutex_unlock(&event_mutex);
}

611 612 613 614
static void __put_system(struct event_subsystem *system)
{
	struct event_filter *filter = system->filter;

615 616
	WARN_ON_ONCE(system_refcount(system) == 0);
	if (system_refcount_dec(system))
617 618
		return;

619 620
	list_del(&system->list);

621 622 623 624
	if (filter) {
		kfree(filter->filter_string);
		kfree(filter);
	}
625
	kfree_const(system->name);
626 627 628 629 630
	kfree(system);
}

static void __get_system(struct event_subsystem *system)
{
631 632
	WARN_ON_ONCE(system_refcount(system) == 0);
	system_refcount_inc(system);
633 634
}

635
static void __get_system_dir(struct trace_subsystem_dir *dir)
636 637 638 639 640 641
{
	WARN_ON_ONCE(dir->ref_count == 0);
	dir->ref_count++;
	__get_system(dir->subsystem);
}

642
static void __put_system_dir(struct trace_subsystem_dir *dir)
643 644 645
{
	WARN_ON_ONCE(dir->ref_count == 0);
	/* If the subsystem is about to be freed, the dir must be too */
646
	WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
647 648 649 650 651 652

	__put_system(dir->subsystem);
	if (!--dir->ref_count)
		kfree(dir);
}

653
static void put_system(struct trace_subsystem_dir *dir)
654 655
{
	mutex_lock(&event_mutex);
656
	__put_system_dir(dir);
657 658 659
	mutex_unlock(&event_mutex);
}

660
static void remove_subsystem(struct trace_subsystem_dir *dir)
661 662 663 664 665
{
	if (!dir)
		return;

	if (!--dir->nr_events) {
666
		tracefs_remove_recursive(dir->entry);
667 668 669 670 671
		list_del(&dir->list);
		__put_system_dir(dir);
	}
}

672
static void remove_event_file_dir(struct trace_event_file *file)
673
{
674 675 676 677 678
	struct dentry *dir = file->dir;
	struct dentry *child;

	if (dir) {
		spin_lock(&dir->d_lock);	/* probably unneeded */
679
		list_for_each_entry(child, &dir->d_subdirs, d_child) {
680 681
			if (d_really_is_positive(child))	/* probably unneeded */
				d_inode(child)->i_private = NULL;
682 683 684
		}
		spin_unlock(&dir->d_lock);

685
		tracefs_remove_recursive(dir);
686 687
	}

688 689
	list_del(&file->list);
	remove_subsystem(file->system);
690
	free_event_filter(file->filter);
691 692 693
	kmem_cache_free(file_cachep, file);
}

694 695 696
/*
 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
 */
697 698 699
static int
__ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
			      const char *sub, const char *event, int set)
700
{
701
	struct trace_event_file *file;
702
	struct trace_event_call *call;
703
	const char *name;
704
	int ret = -EINVAL;
705
	int eret = 0;
706

707 708 709
	list_for_each_entry(file, &tr->events, list) {

		call = file->event_call;
710
		name = trace_event_name(call);
711

712
		if (!name || !call->class || !call->class->reg)
713 714
			continue;

715 716 717
		if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
			continue;

718
		if (match &&
719
		    strcmp(match, name) != 0 &&
720
		    strcmp(match, call->class->system) != 0)
721 722
			continue;

723
		if (sub && strcmp(sub, call->class->system) != 0)
724 725
			continue;

726
		if (event && strcmp(event, name) != 0)
727 728
			continue;

729
		ret = ftrace_event_enable_disable(file, set);
730

731 732 733 734 735 736 737 738 739
		/*
		 * Save the first error and return that. Some events
		 * may still have been enabled, but let the user
		 * know that something went wrong.
		 */
		if (ret && !eret)
			eret = ret;

		ret = eret;
740
	}
741 742 743 744 745 746 747 748 749 750 751

	return ret;
}

static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
				  const char *sub, const char *event, int set)
{
	int ret;

	mutex_lock(&event_mutex);
	ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
752 753 754 755 756
	mutex_unlock(&event_mutex);

	return ret;
}

757
static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
758
{
759
	char *event = NULL, *sub = NULL, *match;
760
	int ret;
761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784

	/*
	 * The buf format can be <subsystem>:<event-name>
	 *  *:<event-name> means any event by that name.
	 *  :<event-name> is the same.
	 *
	 *  <subsystem>:* means all events in that subsystem
	 *  <subsystem>: means the same.
	 *
	 *  <name> (no ':') means all events in a subsystem with
	 *  the name <name> or any event that matches <name>
	 */

	match = strsep(&buf, ":");
	if (buf) {
		sub = match;
		event = buf;
		match = NULL;

		if (!strlen(sub) || strcmp(sub, "*") == 0)
			sub = NULL;
		if (!strlen(event) || strcmp(event, "*") == 0)
			event = NULL;
	}
785

786 787 788 789 790 791 792
	ret = __ftrace_set_clr_event(tr, match, sub, event, set);

	/* Put back the colon to allow this to be called again */
	if (buf)
		*(buf - 1) = ':';

	return ret;
793 794
}

795 796 797 798 799 800 801 802 803 804 805 806 807 808
/**
 * trace_set_clr_event - enable or disable an event
 * @system: system name to match (NULL for any system)
 * @event: event name to match (NULL for all events, within system)
 * @set: 1 to enable, 0 to disable
 *
 * This is a way for other parts of the kernel to enable or disable
 * event recording.
 *
 * Returns 0 on success, -EINVAL if the parameters do not match any
 * registered events.
 */
int trace_set_clr_event(const char *system, const char *event, int set)
{
809 810
	struct trace_array *tr = top_trace_array();

811 812 813
	if (!tr)
		return -ENODEV;

814
	return __ftrace_set_clr_event(tr, NULL, system, event, set);
815
}
816
EXPORT_SYMBOL_GPL(trace_set_clr_event);
817

818 819 820 821 822 823 824
/* 128 should be much more than enough */
#define EVENT_BUF_SIZE		127

static ssize_t
ftrace_event_write(struct file *file, const char __user *ubuf,
		   size_t cnt, loff_t *ppos)
{
825
	struct trace_parser parser;
826 827
	struct seq_file *m = file->private_data;
	struct trace_array *tr = m->private;
828
	ssize_t read, ret;
829

830
	if (!cnt)
831 832
		return 0;

833 834 835 836
	ret = tracing_update_buffers();
	if (ret < 0)
		return ret;

837
	if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
838 839
		return -ENOMEM;

840 841
	read = trace_get_user(&parser, ubuf, cnt, ppos);

842
	if (read >= 0 && trace_parser_loaded((&parser))) {
843
		int set = 1;
844

845
		if (*parser.buffer == '!')
846 847
			set = 0;

848 849
		parser.buffer[parser.idx] = 0;

850
		ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
851
		if (ret)
852
			goto out_put;
853 854 855 856
	}

	ret = read;

857 858
 out_put:
	trace_parser_put(&parser);
859 860 861 862 863 864 865

	return ret;
}

static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
866
	struct trace_event_file *file = v;
867
	struct trace_event_call *call;
868
	struct trace_array *tr = m->private;
869 870 871

	(*pos)++;

872 873
	list_for_each_entry_continue(file, &tr->events, list) {
		call = file->event_call;
874 875 876 877
		/*
		 * The ftrace subsystem is for showing formats only.
		 * They can not be enabled or disabled via the event files.
		 */
878 879
		if (call->class && call->class->reg &&
		    !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
880
			return file;
881
	}
882

883
	return NULL;
884 885 886 887
}

static void *t_start(struct seq_file *m, loff_t *pos)
{
888
	struct trace_event_file *file;
889
	struct trace_array *tr = m->private;
890 891
	loff_t l;

892
	mutex_lock(&event_mutex);
893

894
	file = list_entry(&tr->events, struct trace_event_file, list);
895
	for (l = 0; l <= *pos; ) {
896 897
		file = t_next(m, file, &l);
		if (!file)
898 899
			break;
	}
900
	return file;
901 902 903 904 905
}

static void *
s_next(struct seq_file *m, void *v, loff_t *pos)
{
906
	struct trace_event_file *file = v;
907
	struct trace_array *tr = m->private;
908 909 910

	(*pos)++;

911
	list_for_each_entry_continue(file, &tr->events, list) {
912
		if (file->flags & EVENT_FILE_FL_ENABLED)
913
			return file;
914 915
	}

916
	return NULL;
917 918 919 920
}

static void *s_start(struct seq_file *m, loff_t *pos)
{
921
	struct trace_event_file *file;
922
	struct trace_array *tr = m->private;
923 924
	loff_t l;

925
	mutex_lock(&event_mutex);
926

927
	file = list_entry(&tr->events, struct trace_event_file, list);
928
	for (l = 0; l <= *pos; ) {
929 930
		file = s_next(m, file, &l);
		if (!file)
931 932
			break;
	}
933
	return file;
934 935 936 937
}

static int t_show(struct seq_file *m, void *v)
{
938
	struct trace_event_file *file = v;
939
	struct trace_event_call *call = file->event_call;
940

941 942
	if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
		seq_printf(m, "%s:", call->class->system);
943
	seq_printf(m, "%s\n", trace_event_name(call));
944 945 946 947 948 949

	return 0;
}

static void t_stop(struct seq_file *m, void *p)
{
950
	mutex_unlock(&event_mutex);
951 952
}

953 954 955 956 957 958
static void *
p_next(struct seq_file *m, void *v, loff_t *pos)
{
	struct trace_array *tr = m->private;
	struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids);

959
	return trace_pid_next(pid_list, v, pos);
960 961
}

962
static void *p_start(struct seq_file *m, loff_t *pos)
963
	__acquires(RCU)
964 965 966 967 968 969 970 971 972 973 974 975 976 977 978
{
	struct trace_pid_list *pid_list;
	struct trace_array *tr = m->private;

	/*
	 * Grab the mutex, to keep calls to p_next() having the same
	 * tr->filtered_pids as p_start() has.
	 * If we just passed the tr->filtered_pids around, then RCU would
	 * have been enough, but doing that makes things more complex.
	 */
	mutex_lock(&event_mutex);
	rcu_read_lock_sched();

	pid_list = rcu_dereference_sched(tr->filtered_pids);

979
	if (!pid_list)
980 981
		return NULL;

982
	return trace_pid_start(pid_list, pos);
983 984 985
}

static void p_stop(struct seq_file *m, void *p)
986
	__releases(RCU)
987 988 989 990 991
{
	rcu_read_unlock_sched();
	mutex_unlock(&event_mutex);
}

992 993 994 995
static ssize_t
event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
		  loff_t *ppos)
{
996
	struct trace_event_file *file;
997
	unsigned long flags;
998 999
	char buf[4] = "0";

1000 1001 1002 1003 1004 1005 1006 1007 1008
	mutex_lock(&event_mutex);
	file = event_file_data(filp);
	if (likely(file))
		flags = file->flags;
	mutex_unlock(&event_mutex);

	if (!file)
		return -ENODEV;

1009 1010
	if (flags & EVENT_FILE_FL_ENABLED &&
	    !(flags & EVENT_FILE_FL_SOFT_DISABLED))
1011 1012
		strcpy(buf, "1");

1013 1014
	if (flags & EVENT_FILE_FL_SOFT_DISABLED ||
	    flags & EVENT_FILE_FL_SOFT_MODE)
1015 1016 1017
		strcat(buf, "*");

	strcat(buf, "\n");
1018

1019
	return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
1020 1021 1022 1023 1024 1025
}

static ssize_t
event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
		   loff_t *ppos)
{
1026
	struct trace_event_file *file;
1027 1028 1029
	unsigned long val;
	int ret;

1030 1031
	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
	if (ret)
1032 1033
		return ret;

1034 1035 1036 1037
	ret = tracing_update_buffers();
	if (ret < 0)
		return ret;

1038 1039 1040
	switch (val) {
	case 0:
	case 1:
1041
		ret = -ENODEV;
1042
		mutex_lock(&event_mutex);
1043 1044 1045
		file = event_file_data(filp);
		if (likely(file))
			ret = ftrace_event_enable_disable(file, val);
1046
		mutex_unlock(&event_mutex);
1047 1048 1049 1050 1051 1052 1053 1054
		break;

	default:
		return -EINVAL;
	}

	*ppos += cnt;

1055
	return ret ? ret : cnt;
1056 1057
}

1058 1059 1060 1061
static ssize_t
system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
		   loff_t *ppos)
{
1062
	const char set_to_char[4] = { '?', '0', '1', 'X' };
1063
	struct trace_subsystem_dir *dir = filp->private_data;
1064
	struct event_subsystem *system = dir->subsystem;
1065
	struct trace_event_call *call;
1066
	struct trace_event_file *file;
1067
	struct trace_array *tr = dir->tr;
1068
	char buf[2];
1069
	int set = 0;
1070 1071 1072
	int ret;

	mutex_lock(&event_mutex);
1073 1074
	list_for_each_entry(file, &tr->events, list) {
		call = file->event_call;
1075
		if (!trace_event_name(call) || !call->class || !call->class->reg)
1076 1077
			continue;

1078
		if (system && strcmp(call->class->system, system->name) != 0)
1079 1080 1081 1082 1083 1084 1085
			continue;

		/*
		 * We need to find out if all the events are set
		 * or if all events or cleared, or if we have
		 * a mixture.
		 */
1086
		set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED));
1087

1088 1089 1090
		/*
		 * If we have a mixture, no need to look further.
		 */
1091
		if (set == 3)
1092 1093 1094 1095
			break;
	}
	mutex_unlock(&event_mutex);

1096
	buf[0] = set_to_char[set];
1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
	buf[1] = '\n';

	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);

	return ret;
}

static ssize_t
system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
		    loff_t *ppos)
{
1108
	struct trace_subsystem_dir *dir = filp->private_data;
1109
	struct event_subsystem *system = dir->subsystem;
1110
	const char *name = NULL;
1111 1112 1113
	unsigned long val;
	ssize_t ret;

1114 1115
	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
	if (ret)
1116 1117 1118 1119 1120 1121
		return ret;

	ret = tracing_update_buffers();
	if (ret < 0)
		return ret;

1122
	if (val != 0 && val != 1)
1123 1124
		return -EINVAL;

1125 1126 1127 1128 1129 1130 1131
	/*
	 * Opening of "enable" adds a ref count to system,
	 * so the name is safe to use.
	 */
	if (system)
		name = system->name;

1132
	ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
1133
	if (ret)
1134
		goto out;
1135 1136 1137

	ret = cnt;

1138
out:
1139 1140 1141 1142 1143
	*ppos += cnt;

	return ret;
}

1144 1145
enum {
	FORMAT_HEADER		= 1,
1146 1147
	FORMAT_FIELD_SEPERATOR	= 2,
	FORMAT_PRINTFMT		= 3,
1148 1149 1150
};

static void *f_next(struct seq_file *m, void *v, loff_t *pos)
1151
{
1152
	struct trace_event_call *call = event_file_data(m->private);
1153 1154
	struct list_head *common_head = &ftrace_common_fields;
	struct list_head *head = trace_get_fields(call);
1155
	struct list_head *node = v;
1156

1157
	(*pos)++;
1158

1159 1160
	switch ((unsigned long)v) {
	case FORMAT_HEADER:
1161 1162
		node = common_head;
		break;
1163

1164
	case FORMAT_FIELD_SEPERATOR:
1165 1166
		node = head;
		break;
1167

1168 1169 1170
	case FORMAT_PRINTFMT:
		/* all done */
		return NULL;
1171 1172
	}

1173 1174
	node = node->prev;
	if (node == common_head)
1175
		return (void *)FORMAT_FIELD_SEPERATOR;
1176
	else if (node == head)
1177
		return (void *)FORMAT_PRINTFMT;
1178 1179
	else
		return node;
1180 1181 1182 1183
}

static int f_show(struct seq_file *m, void *v)
{
1184
	struct trace_event_call *call = event_file_data(m->private);
1185 1186 1187 1188 1189
	struct ftrace_event_field *field;
	const char *array_descriptor;

	switch ((unsigned long)v) {
	case FORMAT_HEADER:
1190
		seq_printf(m, "name: %s\n", trace_event_name(call));
1191
		seq_printf(m, "ID: %d\n", call->event.type);
1192
		seq_puts(m, "format:\n");
1193
		return 0;
1194

1195 1196 1197 1198
	case FORMAT_FIELD_SEPERATOR:
		seq_putc(m, '\n');
		return 0;

1199 1200 1201 1202
	case FORMAT_PRINTFMT:
		seq_printf(m, "\nprint fmt: %s\n",
			   call->print_fmt);
		return 0;
1203
	}
1204

1205
	field = list_entry(v, struct ftrace_event_field, link);
1206 1207 1208 1209 1210 1211 1212 1213
	/*
	 * Smartly shows the array type(except dynamic array).
	 * Normal:
	 *	field:TYPE VAR
	 * If TYPE := TYPE[LEN], it is shown:
	 *	field:TYPE VAR[LEN]
	 */
	array_descriptor = strchr(field->type, '[');
1214

1215 1216
	if (!strncmp(field->type, "__data_loc", 10))
		array_descriptor = NULL;
1217

1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
	if (!array_descriptor)
		seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
			   field->type, field->name, field->offset,
			   field->size, !!field->is_signed);
	else
		seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
			   (int)(array_descriptor - field->type),
			   field->type, field->name,
			   array_descriptor, field->offset,
			   field->size, !!field->is_signed);
1228

1229 1230
	return 0;
}
1231

1232 1233 1234 1235 1236
static void *f_start(struct seq_file *m, loff_t *pos)
{
	void *p = (void *)FORMAT_HEADER;
	loff_t l = 0;

1237 1238 1239 1240 1241
	/* ->stop() is called even if ->start() fails */
	mutex_lock(&event_mutex);
	if (!event_file_data(m->private))
		return ERR_PTR(-ENODEV);

1242 1243 1244 1245 1246 1247
	while (l < *pos && p)
		p = f_next(m, p, &l);

	return p;
}

1248 1249
static void f_stop(struct seq_file *m, void *p)
{
1250
	mutex_unlock(&event_mutex);
1251
}
1252

1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269
static const struct seq_operations trace_format_seq_ops = {
	.start		= f_start,
	.next		= f_next,
	.stop		= f_stop,
	.show		= f_show,
};

static int trace_format_open(struct inode *inode, struct file *file)
{
	struct seq_file *m;
	int ret;

	ret = seq_open(file, &trace_format_seq_ops);
	if (ret < 0)
		return ret;

	m = file->private_data;
1270
	m->private = file;
1271 1272

	return 0;
1273 1274
}

1275 1276 1277
static ssize_t
event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
{
1278
	int id = (long)event_file_data(filp);
1279 1280
	char buf[32];
	int len;
1281 1282 1283 1284

	if (*ppos)
		return 0;

1285 1286 1287 1288 1289
	if (unlikely(!id))
		return -ENODEV;

	len = sprintf(buf, "%d\n", id);

1290
	return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
1291 1292
}

T
Tom Zanussi 已提交
1293 1294 1295 1296
static ssize_t
event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
		  loff_t *ppos)
{
1297
	struct trace_event_file *file;
T
Tom Zanussi 已提交
1298
	struct trace_seq *s;
1299
	int r = -ENODEV;
T
Tom Zanussi 已提交
1300 1301 1302 1303 1304

	if (*ppos)
		return 0;

	s = kmalloc(sizeof(*s), GFP_KERNEL);
1305

T
Tom Zanussi 已提交
1306 1307 1308 1309 1310
	if (!s)
		return -ENOMEM;

	trace_seq_init(s);

1311
	mutex_lock(&event_mutex);
1312 1313 1314
	file = event_file_data(filp);
	if (file)
		print_event_filter(file, s);
1315 1316
	mutex_unlock(&event_mutex);

1317
	if (file)
1318 1319
		r = simple_read_from_buffer(ubuf, cnt, ppos,
					    s->buffer, trace_seq_used(s));
T
Tom Zanussi 已提交
1320 1321 1322 1323 1324 1325 1326 1327 1328 1329

	kfree(s);

	return r;
}

static ssize_t
event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
		   loff_t *ppos)
{
1330
	struct trace_event_file *file;
1331
	char *buf;
1332
	int err = -ENODEV;
T
Tom Zanussi 已提交
1333

1334
	if (cnt >= PAGE_SIZE)
T
Tom Zanussi 已提交
1335 1336
		return -EINVAL;

A
Al Viro 已提交
1337 1338 1339
	buf = memdup_user_nul(ubuf, cnt);
	if (IS_ERR(buf))
		return PTR_ERR(buf);
T
Tom Zanussi 已提交
1340

1341
	mutex_lock(&event_mutex);
1342 1343 1344
	file = event_file_data(filp);
	if (file)
		err = apply_event_filter(file, buf);
1345 1346
	mutex_unlock(&event_mutex);

A
Al Viro 已提交
1347
	kfree(buf);
1348
	if (err < 0)
1349
		return err;
1350

T
Tom Zanussi 已提交
1351 1352 1353 1354 1355
	*ppos += cnt;

	return cnt;
}

1356 1357 1358 1359 1360
static LIST_HEAD(event_subsystems);

static int subsystem_open(struct inode *inode, struct file *filp)
{
	struct event_subsystem *system = NULL;
1361
	struct trace_subsystem_dir *dir = NULL; /* Initialize for gcc */
1362
	struct trace_array *tr;
1363 1364
	int ret;

1365 1366 1367
	if (tracing_is_disabled())
		return -ENODEV;

1368
	/* Make sure the system still exists */
1369
	mutex_lock(&trace_types_lock);
1370
	mutex_lock(&event_mutex);
1371 1372 1373 1374 1375 1376 1377 1378 1379
	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
		list_for_each_entry(dir, &tr->systems, list) {
			if (dir == inode->i_private) {
				/* Don't open systems with no events */
				if (dir->nr_events) {
					__get_system_dir(dir);
					system = dir->subsystem;
				}
				goto exit_loop;
1380 1381 1382
			}
		}
	}
1383
 exit_loop:
1384
	mutex_unlock(&event_mutex);
1385
	mutex_unlock(&trace_types_lock);
1386

1387
	if (!system)
1388 1389
		return -ENODEV;

1390 1391 1392
	/* Some versions of gcc think dir can be uninitialized here */
	WARN_ON(!dir);

1393 1394 1395 1396 1397 1398
	/* Still need to increment the ref count of the system */
	if (trace_array_get(tr) < 0) {
		put_system(dir);
		return -ENODEV;
	}

1399
	ret = tracing_open_generic(inode, filp);
1400 1401
	if (ret < 0) {
		trace_array_put(tr);
1402
		put_system(dir);
1403
	}
1404 1405 1406 1407 1408 1409

	return ret;
}

static int system_tr_open(struct inode *inode, struct file *filp)
{
1410
	struct trace_subsystem_dir *dir;
1411 1412 1413
	struct trace_array *tr = inode->i_private;
	int ret;

1414 1415 1416
	if (tracing_is_disabled())
		return -ENODEV;

1417 1418 1419
	if (trace_array_get(tr) < 0)
		return -ENODEV;

1420 1421
	/* Make a temporary dir that has no system but points to tr */
	dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1422 1423
	if (!dir) {
		trace_array_put(tr);
1424
		return -ENOMEM;
1425
	}
1426 1427 1428 1429

	dir->tr = tr;

	ret = tracing_open_generic(inode, filp);
1430 1431
	if (ret < 0) {
		trace_array_put(tr);
1432
		kfree(dir);
1433
		return ret;
1434
	}
1435 1436

	filp->private_data = dir;
1437

1438
	return 0;
1439 1440 1441 1442
}

static int subsystem_release(struct inode *inode, struct file *file)
{
1443
	struct trace_subsystem_dir *dir = file->private_data;
1444

1445 1446
	trace_array_put(dir->tr);

1447 1448 1449 1450 1451 1452 1453 1454 1455
	/*
	 * If dir->subsystem is NULL, then this is a temporary
	 * descriptor that was made for a trace_array to enable
	 * all subsystems.
	 */
	if (dir->subsystem)
		put_system(dir);
	else
		kfree(dir);
1456 1457 1458 1459

	return 0;
}

1460 1461 1462 1463
static ssize_t
subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
		      loff_t *ppos)
{
1464
	struct trace_subsystem_dir *dir = filp->private_data;
1465
	struct event_subsystem *system = dir->subsystem;
1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477
	struct trace_seq *s;
	int r;

	if (*ppos)
		return 0;

	s = kmalloc(sizeof(*s), GFP_KERNEL);
	if (!s)
		return -ENOMEM;

	trace_seq_init(s);

1478
	print_subsystem_event_filter(system, s);
1479 1480
	r = simple_read_from_buffer(ubuf, cnt, ppos,
				    s->buffer, trace_seq_used(s));
1481 1482 1483 1484 1485 1486 1487 1488 1489 1490

	kfree(s);

	return r;
}

static ssize_t
subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
		       loff_t *ppos)
{
1491
	struct trace_subsystem_dir *dir = filp->private_data;
1492
	char *buf;
1493 1494
	int err;

1495
	if (cnt >= PAGE_SIZE)
1496 1497
		return -EINVAL;

A
Al Viro 已提交
1498 1499 1500
	buf = memdup_user_nul(ubuf, cnt);
	if (IS_ERR(buf))
		return PTR_ERR(buf);
1501

1502
	err = apply_subsystem_event_filter(dir, buf);
A
Al Viro 已提交
1503
	kfree(buf);
1504
	if (err < 0)
1505
		return err;
1506 1507 1508 1509 1510 1511

	*ppos += cnt;

	return cnt;
}

1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528
static ssize_t
show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
{
	int (*func)(struct trace_seq *s) = filp->private_data;
	struct trace_seq *s;
	int r;

	if (*ppos)
		return 0;

	s = kmalloc(sizeof(*s), GFP_KERNEL);
	if (!s)
		return -ENOMEM;

	trace_seq_init(s);

	func(s);
1529 1530
	r = simple_read_from_buffer(ubuf, cnt, ppos,
				    s->buffer, trace_seq_used(s));
1531 1532 1533 1534 1535 1536

	kfree(s);

	return r;
}

1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549
static void ignore_task_cpu(void *data)
{
	struct trace_array *tr = data;
	struct trace_pid_list *pid_list;

	/*
	 * This function is called by on_each_cpu() while the
	 * event_mutex is held.
	 */
	pid_list = rcu_dereference_protected(tr->filtered_pids,
					     mutex_is_locked(&event_mutex));

	this_cpu_write(tr->trace_buffer.data->ignore_pid,
1550
		       trace_ignore_this_task(pid_list, current));
1551 1552
}

1553
static ssize_t
1554
ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
1555 1556
		       size_t cnt, loff_t *ppos)
{
1557
	struct seq_file *m = filp->private_data;
1558 1559
	struct trace_array *tr = m->private;
	struct trace_pid_list *filtered_pids = NULL;
1560
	struct trace_pid_list *pid_list;
1561
	struct trace_event_file *file;
1562
	ssize_t ret;
1563 1564 1565 1566 1567 1568 1569 1570 1571

	if (!cnt)
		return 0;

	ret = tracing_update_buffers();
	if (ret < 0)
		return ret;

	mutex_lock(&event_mutex);
1572

1573 1574 1575
	filtered_pids = rcu_dereference_protected(tr->filtered_pids,
					     lockdep_is_held(&event_mutex));

1576 1577
	ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
	if (ret < 0)
1578
		goto out;
1579 1580 1581

	rcu_assign_pointer(tr->filtered_pids, pid_list);

1582 1583 1584
	list_for_each_entry(file, &tr->events, list) {
		set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
	}
1585 1586 1587

	if (filtered_pids) {
		synchronize_sched();
1588 1589
		trace_free_pid_list(filtered_pids);
	} else if (pid_list) {
1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604
		/*
		 * Register a probe that is called before all other probes
		 * to set ignore_pid if next or prev do not match.
		 * Register a probe this is called after all other probes
		 * to only keep ignore_pid set if next pid matches.
		 */
		register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
						 tr, INT_MAX);
		register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
						 tr, 0);

		register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
						 tr, INT_MAX);
		register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
						 tr, 0);
1605 1606 1607 1608 1609 1610 1611 1612 1613 1614

		register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
						     tr, INT_MAX);
		register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
						     tr, 0);

		register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
						 tr, INT_MAX);
		register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
						 tr, 0);
1615 1616
	}

1617 1618 1619 1620 1621 1622 1623
	/*
	 * Ignoring of pids is done at task switch. But we have to
	 * check for those tasks that are currently running.
	 * Always do this in case a pid was appended or removed.
	 */
	on_each_cpu(ignore_task_cpu, tr, 1);

1624
 out:
1625 1626
	mutex_unlock(&event_mutex);

1627 1628
	if (ret > 0)
		*ppos += ret;
1629 1630 1631 1632

	return ret;
}

1633 1634
static int ftrace_event_avail_open(struct inode *inode, struct file *file);
static int ftrace_event_set_open(struct inode *inode, struct file *file);
1635
static int ftrace_event_set_pid_open(struct inode *inode, struct file *file);
1636
static int ftrace_event_release(struct inode *inode, struct file *file);
1637

1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651
static const struct seq_operations show_event_seq_ops = {
	.start = t_start,
	.next = t_next,
	.show = t_show,
	.stop = t_stop,
};

static const struct seq_operations show_set_event_seq_ops = {
	.start = s_start,
	.next = s_next,
	.show = t_show,
	.stop = t_stop,
};

1652 1653 1654
static const struct seq_operations show_set_pid_seq_ops = {
	.start = p_start,
	.next = p_next,
1655
	.show = trace_pid_show,
1656 1657 1658
	.stop = p_stop,
};

1659
static const struct file_operations ftrace_avail_fops = {
1660
	.open = ftrace_event_avail_open,
1661 1662 1663 1664 1665
	.read = seq_read,
	.llseek = seq_lseek,
	.release = seq_release,
};

1666
static const struct file_operations ftrace_set_event_fops = {
1667
	.open = ftrace_event_set_open,
1668 1669 1670
	.read = seq_read,
	.write = ftrace_event_write,
	.llseek = seq_lseek,
1671
	.release = ftrace_event_release,
1672 1673
};

1674 1675 1676 1677 1678 1679 1680 1681
static const struct file_operations ftrace_set_event_pid_fops = {
	.open = ftrace_event_set_pid_open,
	.read = seq_read,
	.write = ftrace_event_pid_write,
	.llseek = seq_lseek,
	.release = ftrace_event_release,
};

1682
static const struct file_operations ftrace_enable_fops = {
1683
	.open = tracing_open_generic,
1684 1685
	.read = event_enable_read,
	.write = event_enable_write,
1686
	.llseek = default_llseek,
1687 1688
};

1689
static const struct file_operations ftrace_event_format_fops = {
1690 1691 1692 1693
	.open = trace_format_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = seq_release,
1694 1695
};

1696 1697
static const struct file_operations ftrace_event_id_fops = {
	.read = event_id_read,
1698
	.llseek = default_llseek,
1699 1700
};

T
Tom Zanussi 已提交
1701 1702 1703 1704
static const struct file_operations ftrace_event_filter_fops = {
	.open = tracing_open_generic,
	.read = event_filter_read,
	.write = event_filter_write,
1705
	.llseek = default_llseek,
T
Tom Zanussi 已提交
1706 1707
};

1708
static const struct file_operations ftrace_subsystem_filter_fops = {
1709
	.open = subsystem_open,
1710 1711
	.read = subsystem_filter_read,
	.write = subsystem_filter_write,
1712
	.llseek = default_llseek,
1713
	.release = subsystem_release,
1714 1715
};

1716
static const struct file_operations ftrace_system_enable_fops = {
1717
	.open = subsystem_open,
1718 1719
	.read = system_enable_read,
	.write = system_enable_write,
1720
	.llseek = default_llseek,
1721
	.release = subsystem_release,
1722 1723
};

1724 1725 1726 1727 1728 1729 1730 1731
static const struct file_operations ftrace_tr_enable_fops = {
	.open = system_tr_open,
	.read = system_enable_read,
	.write = system_enable_write,
	.llseek = default_llseek,
	.release = subsystem_release,
};

1732 1733 1734
static const struct file_operations ftrace_show_header_fops = {
	.open = tracing_open_generic,
	.read = show_header,
1735
	.llseek = default_llseek,
1736 1737
};

1738 1739 1740
static int
ftrace_event_open(struct inode *inode, struct file *file,
		  const struct seq_operations *seq_ops)
1741
{
1742 1743
	struct seq_file *m;
	int ret;
1744

1745 1746 1747 1748 1749 1750
	ret = seq_open(file, seq_ops);
	if (ret < 0)
		return ret;
	m = file->private_data;
	/* copy tr over to seq ops */
	m->private = inode->i_private;
1751

1752
	return ret;
1753 1754
}

1755 1756 1757 1758 1759 1760 1761 1762 1763
static int ftrace_event_release(struct inode *inode, struct file *file)
{
	struct trace_array *tr = inode->i_private;

	trace_array_put(tr);

	return seq_release(inode, file);
}

1764 1765 1766 1767 1768
static int
ftrace_event_avail_open(struct inode *inode, struct file *file)
{
	const struct seq_operations *seq_ops = &show_event_seq_ops;

1769
	return ftrace_event_open(inode, file, seq_ops);
1770 1771 1772 1773 1774 1775
}

static int
ftrace_event_set_open(struct inode *inode, struct file *file)
{
	const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1776
	struct trace_array *tr = inode->i_private;
1777 1778 1779 1780
	int ret;

	if (trace_array_get(tr) < 0)
		return -ENODEV;
1781 1782 1783

	if ((file->f_mode & FMODE_WRITE) &&
	    (file->f_flags & O_TRUNC))
1784
		ftrace_clear_events(tr);
1785

1786 1787 1788 1789
	ret = ftrace_event_open(inode, file, seq_ops);
	if (ret < 0)
		trace_array_put(tr);
	return ret;
1790 1791
}

1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811
static int
ftrace_event_set_pid_open(struct inode *inode, struct file *file)
{
	const struct seq_operations *seq_ops = &show_set_pid_seq_ops;
	struct trace_array *tr = inode->i_private;
	int ret;

	if (trace_array_get(tr) < 0)
		return -ENODEV;

	if ((file->f_mode & FMODE_WRITE) &&
	    (file->f_flags & O_TRUNC))
		ftrace_clear_event_pids(tr);

	ret = ftrace_event_open(inode, file, seq_ops);
	if (ret < 0)
		trace_array_put(tr);
	return ret;
}

1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822
static struct event_subsystem *
create_new_subsystem(const char *name)
{
	struct event_subsystem *system;

	/* need to create new entry */
	system = kmalloc(sizeof(*system), GFP_KERNEL);
	if (!system)
		return NULL;

	system->ref_count = 1;
1823 1824

	/* Only allocate if dynamic (kprobes and modules) */
1825 1826 1827
	system->name = kstrdup_const(name, GFP_KERNEL);
	if (!system->name)
		goto out_free;
1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839

	system->filter = NULL;

	system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
	if (!system->filter)
		goto out_free;

	list_add(&system->list, &event_subsystems);

	return system;

 out_free:
1840
	kfree_const(system->name);
1841 1842
	kfree(system);
	return NULL;
1843 1844
}

1845
static struct dentry *
1846
event_subsystem_dir(struct trace_array *tr, const char *name,
1847
		    struct trace_event_file *file, struct dentry *parent)
1848
{
1849
	struct trace_subsystem_dir *dir;
1850
	struct event_subsystem *system;
1851
	struct dentry *entry;
1852 1853

	/* First see if we did not already create this dir */
1854 1855
	list_for_each_entry(dir, &tr->systems, list) {
		system = dir->subsystem;
1856
		if (strcmp(system->name, name) == 0) {
1857 1858 1859
			dir->nr_events++;
			file->system = dir;
			return dir->entry;
1860
		}
1861 1862
	}

1863 1864 1865 1866
	/* Now see if the system itself exists. */
	list_for_each_entry(system, &event_subsystems, list) {
		if (strcmp(system->name, name) == 0)
			break;
1867
	}
1868 1869 1870
	/* Reset system variable when not found */
	if (&system->list == &event_subsystems)
		system = NULL;
1871

1872 1873 1874
	dir = kmalloc(sizeof(*dir), GFP_KERNEL);
	if (!dir)
		goto out_fail;
1875

1876 1877 1878 1879 1880 1881 1882
	if (!system) {
		system = create_new_subsystem(name);
		if (!system)
			goto out_free;
	} else
		__get_system(system);

1883
	dir->entry = tracefs_create_dir(name, parent);
1884
	if (!dir->entry) {
1885
		pr_warn("Failed to create system directory %s\n", name);
1886 1887
		__put_system(system);
		goto out_free;
1888 1889
	}

1890 1891 1892 1893 1894
	dir->tr = tr;
	dir->ref_count = 1;
	dir->nr_events = 1;
	dir->subsystem = system;
	file->system = dir;
1895

1896
	entry = tracefs_create_file("filter", 0644, dir->entry, dir,
1897
				    &ftrace_subsystem_filter_fops);
1898 1899 1900
	if (!entry) {
		kfree(system->filter);
		system->filter = NULL;
1901
		pr_warn("Could not create tracefs '%s/filter' entry\n", name);
1902
	}
1903

1904
	trace_create_file("enable", 0644, dir->entry, dir,
1905
			  &ftrace_system_enable_fops);
1906

1907 1908 1909 1910 1911 1912 1913 1914 1915
	list_add(&dir->list, &tr->systems);

	return dir->entry;

 out_free:
	kfree(dir);
 out_fail:
	/* Only print this message if failed on memory allocation */
	if (!dir || !system)
1916
		pr_warn("No memory to create event subsystem %s\n", name);
1917
	return NULL;
1918 1919
}

1920
static int
1921
event_create_dir(struct dentry *parent, struct trace_event_file *file)
1922
{
1923
	struct trace_event_call *call = file->event_call;
1924
	struct trace_array *tr = file->tr;
1925
	struct list_head *head;
1926
	struct dentry *d_events;
1927
	const char *name;
1928
	int ret;
1929

1930 1931 1932 1933
	/*
	 * If the trace point header did not define TRACE_SYSTEM
	 * then the system would be called "TRACE_SYSTEM".
	 */
1934 1935 1936 1937 1938 1939 1940
	if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
		d_events = event_subsystem_dir(tr, call->class->system, file, parent);
		if (!d_events)
			return -ENOMEM;
	} else
		d_events = parent;

1941
	name = trace_event_name(call);
1942
	file->dir = tracefs_create_dir(name, d_events);
1943
	if (!file->dir) {
1944
		pr_warn("Could not create tracefs '%s' directory\n", name);
1945 1946 1947
		return -1;
	}

1948
	if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1949
		trace_create_file("enable", 0644, file->dir, file,
1950
				  &ftrace_enable_fops);
1951

1952
#ifdef CONFIG_PERF_EVENTS
1953
	if (call->event.type && call->class->reg)
1954
		trace_create_file("id", 0444, file->dir,
1955 1956
				  (void *)(long)call->event.type,
				  &ftrace_event_id_fops);
1957
#endif
1958

1959 1960 1961 1962 1963 1964 1965 1966
	/*
	 * Other events may have the same class. Only update
	 * the fields if they are not already defined.
	 */
	head = trace_get_fields(call);
	if (list_empty(head)) {
		ret = call->class->define_fields(call);
		if (ret < 0) {
1967 1968
			pr_warn("Could not initialize trace point events/%s\n",
				name);
1969
			return -1;
1970 1971
		}
	}
1972
	trace_create_file("filter", 0644, file->dir, file,
1973
			  &ftrace_event_filter_fops);
1974

1975 1976 1977 1978 1979 1980 1981
	/*
	 * Only event directories that can be enabled should have
	 * triggers.
	 */
	if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
		trace_create_file("trigger", 0644, file->dir, file,
				  &event_trigger_fops);
1982

1983 1984 1985 1986
#ifdef CONFIG_HIST_TRIGGERS
	trace_create_file("hist", 0444, file->dir, file,
			  &event_hist_fops);
#endif
1987
	trace_create_file("format", 0444, file->dir, call,
1988
			  &ftrace_event_format_fops);
1989 1990 1991 1992

	return 0;
}

1993
static void remove_event_from_tracers(struct trace_event_call *call)
1994
{
1995
	struct trace_event_file *file;
1996 1997 1998 1999 2000 2001
	struct trace_array *tr;

	do_for_each_event_file_safe(tr, file) {
		if (file->event_call != call)
			continue;

2002
		remove_event_file_dir(file);
2003 2004 2005 2006 2007 2008 2009 2010 2011 2012
		/*
		 * The do_for_each_event_file_safe() is
		 * a double loop. After finding the call for this
		 * trace_array, we use break to jump to the next
		 * trace_array.
		 */
		break;
	} while_for_each_event_file();
}

2013
static void event_remove(struct trace_event_call *call)
2014
{
2015
	struct trace_array *tr;
2016
	struct trace_event_file *file;
2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030

	do_for_each_event_file(tr, file) {
		if (file->event_call != call)
			continue;
		ftrace_event_enable_disable(file, 0);
		/*
		 * The do_for_each_event_file() is
		 * a double loop. After finding the call for this
		 * trace_array, we use break to jump to the next
		 * trace_array.
		 */
		break;
	} while_for_each_event_file();

2031
	if (call->event.funcs)
2032
		__unregister_trace_event(&call->event);
2033
	remove_event_from_tracers(call);
2034 2035 2036
	list_del(&call->list);
}

2037
static int event_init(struct trace_event_call *call)
2038 2039
{
	int ret = 0;
2040
	const char *name;
2041

2042
	name = trace_event_name(call);
2043
	if (WARN_ON(!name))
2044 2045 2046 2047 2048
		return -EINVAL;

	if (call->class->raw_init) {
		ret = call->class->raw_init(call);
		if (ret < 0 && ret != -ENOSYS)
2049
			pr_warn("Could not initialize trace events/%s\n", name);
2050 2051 2052 2053 2054
	}

	return ret;
}

2055
static int
2056
__register_event(struct trace_event_call *call, struct module *mod)
2057 2058
{
	int ret;
2059

2060 2061 2062
	ret = event_init(call);
	if (ret < 0)
		return ret;
2063

2064
	list_add(&call->list, &ftrace_events);
2065
	call->mod = mod;
2066

2067
	return 0;
2068 2069
}

2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091
static char *enum_replace(char *ptr, struct trace_enum_map *map, int len)
{
	int rlen;
	int elen;

	/* Find the length of the enum value as a string */
	elen = snprintf(ptr, 0, "%ld", map->enum_value);
	/* Make sure there's enough room to replace the string with the value */
	if (len < elen)
		return NULL;

	snprintf(ptr, elen + 1, "%ld", map->enum_value);

	/* Get the rest of the string of ptr */
	rlen = strlen(ptr + len);
	memmove(ptr + elen, ptr + len, rlen);
	/* Make sure we end the new string */
	ptr[elen + rlen] = 0;

	return ptr + elen;
}

2092
static void update_event_printk(struct trace_event_call *call,
2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118
				struct trace_enum_map *map)
{
	char *ptr;
	int quote = 0;
	int len = strlen(map->enum_string);

	for (ptr = call->print_fmt; *ptr; ptr++) {
		if (*ptr == '\\') {
			ptr++;
			/* paranoid */
			if (!*ptr)
				break;
			continue;
		}
		if (*ptr == '"') {
			quote ^= 1;
			continue;
		}
		if (quote)
			continue;
		if (isdigit(*ptr)) {
			/* skip numbers */
			do {
				ptr++;
				/* Check for alpha chars like ULL */
			} while (isalnum(*ptr));
2119 2120
			if (!*ptr)
				break;
2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146
			/*
			 * A number must have some kind of delimiter after
			 * it, and we can ignore that too.
			 */
			continue;
		}
		if (isalpha(*ptr) || *ptr == '_') {
			if (strncmp(map->enum_string, ptr, len) == 0 &&
			    !isalnum(ptr[len]) && ptr[len] != '_') {
				ptr = enum_replace(ptr, map, len);
				/* Hmm, enum string smaller than value */
				if (WARN_ON_ONCE(!ptr))
					return;
				/*
				 * No need to decrement here, as enum_replace()
				 * returns the pointer to the character passed
				 * the enum, and two enums can not be placed
				 * back to back without something in between.
				 * We can skip that something in between.
				 */
				continue;
			}
		skip_more:
			do {
				ptr++;
			} while (isalnum(*ptr) || *ptr == '_');
2147 2148
			if (!*ptr)
				break;
2149 2150 2151 2152 2153 2154
			/*
			 * If what comes after this variable is a '.' or
			 * '->' then we can continue to ignore that string.
			 */
			if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) {
				ptr += *ptr == '.' ? 1 : 2;
2155 2156
				if (!*ptr)
					break;
2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169
				goto skip_more;
			}
			/*
			 * Once again, we can skip the delimiter that came
			 * after the string.
			 */
			continue;
		}
	}
}

void trace_event_enum_update(struct trace_enum_map **map, int len)
{
2170
	struct trace_event_call *call, *p;
2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194
	const char *last_system = NULL;
	int last_i;
	int i;

	down_write(&trace_event_sem);
	list_for_each_entry_safe(call, p, &ftrace_events, list) {
		/* events are usually grouped together with systems */
		if (!last_system || call->class->system != last_system) {
			last_i = 0;
			last_system = call->class->system;
		}

		for (i = last_i; i < len; i++) {
			if (call->class->system == map[i]->system) {
				/* Save the first system if need be */
				if (!last_i)
					last_i = i;
				update_event_printk(call, map[i]);
			}
		}
	}
	up_write(&trace_event_sem);
}

2195
static struct trace_event_file *
2196
trace_create_new_event(struct trace_event_call *call,
2197 2198
		       struct trace_array *tr)
{
2199
	struct trace_event_file *file;
2200 2201 2202 2203 2204 2205 2206 2207

	file = kmem_cache_alloc(file_cachep, GFP_TRACE);
	if (!file)
		return NULL;

	file->event_call = call;
	file->tr = tr;
	atomic_set(&file->sm_ref, 0);
2208 2209
	atomic_set(&file->tm_ref, 0);
	INIT_LIST_HEAD(&file->triggers);
2210 2211 2212 2213 2214
	list_add(&file->list, &tr->events);

	return file;
}

2215 2216
/* Add an event to a trace directory */
static int
2217
__trace_add_new_event(struct trace_event_call *call, struct trace_array *tr)
2218
{
2219
	struct trace_event_file *file;
2220

2221
	file = trace_create_new_event(call, tr);
2222 2223 2224
	if (!file)
		return -ENOMEM;

2225
	return event_create_dir(tr->event_dir, file);
2226 2227
}

2228 2229 2230 2231 2232 2233
/*
 * Just create a decriptor for early init. A descriptor is required
 * for enabling events at boot. We want to enable events before
 * the filesystem is initialized.
 */
static __init int
2234
__trace_early_add_new_event(struct trace_event_call *call,
2235 2236
			    struct trace_array *tr)
{
2237
	struct trace_event_file *file;
2238

2239
	file = trace_create_new_event(call, tr);
2240 2241 2242 2243 2244 2245
	if (!file)
		return -ENOMEM;

	return 0;
}

2246
struct ftrace_module_file_ops;
2247
static void __add_event_to_tracers(struct trace_event_call *call);
2248

2249
/* Add an additional event_call dynamically */
2250
int trace_add_event_call(struct trace_event_call *call)
2251 2252
{
	int ret;
2253
	mutex_lock(&trace_types_lock);
2254
	mutex_lock(&event_mutex);
2255

2256 2257
	ret = __register_event(call, NULL);
	if (ret >= 0)
2258
		__add_event_to_tracers(call);
2259

2260
	mutex_unlock(&event_mutex);
2261
	mutex_unlock(&trace_types_lock);
2262
	return ret;
2263 2264
}

2265
/*
2266 2267
 * Must be called under locking of trace_types_lock, event_mutex and
 * trace_event_sem.
2268
 */
2269
static void __trace_remove_event_call(struct trace_event_call *call)
2270
{
2271
	event_remove(call);
2272
	trace_destroy_fields(call);
2273 2274
	free_event_filter(call->filter);
	call->filter = NULL;
2275 2276
}

2277
static int probe_remove_event_call(struct trace_event_call *call)
2278 2279
{
	struct trace_array *tr;
2280
	struct trace_event_file *file;
2281 2282 2283 2284 2285 2286 2287 2288 2289 2290

#ifdef CONFIG_PERF_EVENTS
	if (call->perf_refcount)
		return -EBUSY;
#endif
	do_for_each_event_file(tr, file) {
		if (file->event_call != call)
			continue;
		/*
		 * We can't rely on ftrace_event_enable_disable(enable => 0)
2291
		 * we are going to do, EVENT_FILE_FL_SOFT_MODE can suppress
2292 2293
		 * TRACE_REG_UNREGISTER.
		 */
2294
		if (file->flags & EVENT_FILE_FL_ENABLED)
2295
			return -EBUSY;
2296 2297 2298 2299 2300 2301
		/*
		 * The do_for_each_event_file_safe() is
		 * a double loop. After finding the call for this
		 * trace_array, we use break to jump to the next
		 * trace_array.
		 */
2302 2303 2304 2305 2306 2307 2308 2309
		break;
	} while_for_each_event_file();

	__trace_remove_event_call(call);

	return 0;
}

2310
/* Remove an event_call */
2311
int trace_remove_event_call(struct trace_event_call *call)
2312
{
2313 2314
	int ret;

2315
	mutex_lock(&trace_types_lock);
2316
	mutex_lock(&event_mutex);
2317
	down_write(&trace_event_sem);
2318
	ret = probe_remove_event_call(call);
2319
	up_write(&trace_event_sem);
2320
	mutex_unlock(&event_mutex);
2321
	mutex_unlock(&trace_types_lock);
2322 2323

	return ret;
2324 2325 2326 2327 2328 2329 2330 2331 2332
}

#define for_each_event(event, start, end)			\
	for (event = start;					\
	     (unsigned long)event < (unsigned long)end;		\
	     event++)

#ifdef CONFIG_MODULES

2333 2334
static void trace_module_add_events(struct module *mod)
{
2335
	struct trace_event_call **call, **start, **end;
2336

2337 2338 2339 2340 2341 2342 2343 2344 2345 2346
	if (!mod->num_trace_events)
		return;

	/* Don't add infrastructure for mods without tracepoints */
	if (trace_module_has_bad_taint(mod)) {
		pr_err("%s: module has bad taint, not creating trace events\n",
		       mod->name);
		return;
	}

2347 2348 2349 2350
	start = mod->trace_events;
	end = mod->trace_events + mod->num_trace_events;

	for_each_event(call, start, end) {
2351
		__register_event(*call, mod);
2352
		__add_event_to_tracers(*call);
2353 2354 2355 2356 2357
	}
}

static void trace_module_remove_events(struct module *mod)
{
2358
	struct trace_event_call *call, *p;
2359
	bool clear_trace = false;
2360

2361
	down_write(&trace_event_sem);
2362 2363
	list_for_each_entry_safe(call, p, &ftrace_events, list) {
		if (call->mod == mod) {
2364 2365
			if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
				clear_trace = true;
2366
			__trace_remove_event_call(call);
2367 2368
		}
	}
2369
	up_write(&trace_event_sem);
2370 2371 2372

	/*
	 * It is safest to reset the ring buffer if the module being unloaded
2373 2374 2375 2376 2377
	 * registered any events that were used. The only worry is if
	 * a new module gets loaded, and takes on the same id as the events
	 * of this module. When printing out the buffer, traced events left
	 * over from this module may be passed to the new module events and
	 * unexpected results may occur.
2378
	 */
2379
	if (clear_trace)
2380
		tracing_reset_all_online_cpus();
2381 2382
}

2383 2384
static int trace_module_notify(struct notifier_block *self,
			       unsigned long val, void *data)
2385 2386 2387
{
	struct module *mod = data;

2388
	mutex_lock(&trace_types_lock);
2389 2390 2391 2392 2393 2394 2395 2396 2397 2398
	mutex_lock(&event_mutex);
	switch (val) {
	case MODULE_STATE_COMING:
		trace_module_add_events(mod);
		break;
	case MODULE_STATE_GOING:
		trace_module_remove_events(mod);
		break;
	}
	mutex_unlock(&event_mutex);
2399
	mutex_unlock(&trace_types_lock);
2400

2401 2402
	return 0;
}
2403

2404 2405
static struct notifier_block trace_module_nb = {
	.notifier_call = trace_module_notify,
2406
	.priority = 1, /* higher than trace.c module notify */
2407
};
2408
#endif /* CONFIG_MODULES */
2409

2410 2411 2412 2413
/* Create a new event directory structure for a trace directory. */
static void
__trace_add_event_dirs(struct trace_array *tr)
{
2414
	struct trace_event_call *call;
2415 2416 2417
	int ret;

	list_for_each_entry(call, &ftrace_events, list) {
2418
		ret = __trace_add_new_event(call, tr);
2419
		if (ret < 0)
2420
			pr_warn("Could not create directory for event %s\n",
2421
				trace_event_name(call));
2422 2423 2424
	}
}

2425
struct trace_event_file *
2426 2427
find_event_file(struct trace_array *tr, const char *system,  const char *event)
{
2428
	struct trace_event_file *file;
2429
	struct trace_event_call *call;
2430
	const char *name;
2431 2432 2433 2434

	list_for_each_entry(file, &tr->events, list) {

		call = file->event_call;
2435
		name = trace_event_name(call);
2436

2437
		if (!name || !call->class || !call->class->reg)
2438 2439 2440 2441 2442
			continue;

		if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
			continue;

2443
		if (strcmp(event, name) == 0 &&
2444 2445 2446 2447 2448 2449
		    strcmp(system, call->class->system) == 0)
			return file;
	}
	return NULL;
}

2450 2451 2452 2453 2454 2455 2456
#ifdef CONFIG_DYNAMIC_FTRACE

/* Avoid typos */
#define ENABLE_EVENT_STR	"enable_event"
#define DISABLE_EVENT_STR	"disable_event"

struct event_probe_data {
2457
	struct trace_event_file	*file;
2458 2459 2460 2461 2462
	unsigned long			count;
	int				ref;
	bool				enable;
};

2463 2464 2465 2466 2467 2468 2469 2470
static void update_event_probe(struct event_probe_data *data)
{
	if (data->enable)
		clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
	else
		set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
}

2471
static void
2472
event_enable_probe(unsigned long ip, unsigned long parent_ip,
2473 2474
		   struct trace_array *tr, struct ftrace_probe_ops *ops,
		   void **_data)
2475
{
2476 2477 2478
	struct ftrace_func_mapper *mapper = ops->private_data;
	struct event_probe_data *data;
	void **pdata;
2479

2480 2481
	pdata = ftrace_func_mapper_find_ip(mapper, ip);
	if (!pdata || !*pdata)
2482 2483
		return;

2484 2485
	data = *pdata;
	update_event_probe(data);
2486 2487 2488
}

static void
2489
event_enable_count_probe(unsigned long ip, unsigned long parent_ip,
2490 2491
			 struct trace_array *tr, struct ftrace_probe_ops *ops,
			 void **_data)
2492
{
2493 2494 2495
	struct ftrace_func_mapper *mapper = ops->private_data;
	struct event_probe_data *data;
	void **pdata;
2496

2497 2498
	pdata = ftrace_func_mapper_find_ip(mapper, ip);
	if (!pdata || !*pdata)
2499 2500
		return;

2501 2502
	data = *pdata;

2503 2504 2505 2506
	if (!data->count)
		return;

	/* Skip if the event is in a state we want to switch to */
2507
	if (data->enable == !(data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
2508 2509 2510 2511 2512
		return;

	if (data->count != -1)
		(data->count)--;

2513
	update_event_probe(data);
2514 2515 2516 2517
}

static int
event_enable_print(struct seq_file *m, unsigned long ip,
2518
		   struct ftrace_probe_ops *ops, void *_data)
2519
{
2520 2521 2522 2523 2524 2525 2526 2527 2528 2529
	struct ftrace_func_mapper *mapper = ops->private_data;
	struct event_probe_data *data;
	void **pdata;

	pdata = ftrace_func_mapper_find_ip(mapper, ip);

	if (WARN_ON_ONCE(!pdata || !*pdata))
		return 0;

	data = *pdata;
2530 2531 2532 2533 2534 2535

	seq_printf(m, "%ps:", (void *)ip);

	seq_printf(m, "%s:%s:%s",
		   data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
		   data->file->event_call->class->system,
2536
		   trace_event_name(data->file->event_call));
2537 2538

	if (data->count == -1)
2539
		seq_puts(m, ":unlimited\n");
2540 2541 2542 2543 2544 2545 2546
	else
		seq_printf(m, ":count=%ld\n", data->count);

	return 0;
}

static int
2547 2548
event_enable_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
		  unsigned long ip, void *_data)
2549
{
2550
	struct ftrace_func_mapper *mapper = ops->private_data;
2551
	struct event_probe_data *data = _data;
2552 2553 2554 2555 2556
	int ret;

	ret = ftrace_func_mapper_add_ip(mapper, ip, data);
	if (ret < 0)
		return ret;
2557 2558

	data->ref++;
2559

2560 2561 2562 2563
	return 0;
}

static void
2564 2565
event_enable_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
		  unsigned long ip, void **_data)
2566
{
2567 2568 2569 2570 2571 2572 2573
	struct ftrace_func_mapper *mapper = ops->private_data;
	struct event_probe_data *data;

	data = ftrace_func_mapper_remove_ip(mapper, ip);

	if (WARN_ON_ONCE(!data))
		return;
2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615

	if (WARN_ON_ONCE(data->ref <= 0))
		return;

	data->ref--;
	if (!data->ref) {
		/* Remove the SOFT_MODE flag */
		__ftrace_event_enable_disable(data->file, 0, 1);
		module_put(data->file->event_call->mod);
		kfree(data);
	}
}

static struct ftrace_probe_ops event_enable_probe_ops = {
	.func			= event_enable_probe,
	.print			= event_enable_print,
	.init			= event_enable_init,
	.free			= event_enable_free,
};

static struct ftrace_probe_ops event_enable_count_probe_ops = {
	.func			= event_enable_count_probe,
	.print			= event_enable_print,
	.init			= event_enable_init,
	.free			= event_enable_free,
};

static struct ftrace_probe_ops event_disable_probe_ops = {
	.func			= event_enable_probe,
	.print			= event_enable_print,
	.init			= event_enable_init,
	.free			= event_enable_free,
};

static struct ftrace_probe_ops event_disable_count_probe_ops = {
	.func			= event_enable_count_probe,
	.print			= event_enable_print,
	.init			= event_enable_init,
	.free			= event_enable_free,
};

static int
2616
event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
2617 2618
		  char *glob, char *cmd, char *param, int enabled)
{
2619
	struct trace_event_file *file;
2620 2621 2622 2623 2624 2625 2626 2627
	struct ftrace_probe_ops *ops;
	struct event_probe_data *data;
	const char *system;
	const char *event;
	char *number;
	bool enable;
	int ret;

2628 2629 2630
	if (!tr)
		return -ENODEV;

2631
	/* hash funcs only work with set_ftrace_filter */
2632
	if (!enabled || !param)
2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655
		return -EINVAL;

	system = strsep(&param, ":");
	if (!param)
		return -EINVAL;

	event = strsep(&param, ":");

	mutex_lock(&event_mutex);

	ret = -EINVAL;
	file = find_event_file(tr, system, event);
	if (!file)
		goto out;

	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;

	if (enable)
		ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
	else
		ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;

	if (glob[0] == '!') {
2656
		ret = unregister_ftrace_function_probe_func(glob+1, tr, ops);
2657 2658 2659 2660
		goto out;
	}

	ret = -ENOMEM;
2661 2662 2663 2664 2665 2666 2667

	if (!ops->private_data) {
		ops->private_data = allocate_ftrace_func_mapper();
		if (!ops->private_data)
			goto out;
	}

2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695
	data = kzalloc(sizeof(*data), GFP_KERNEL);
	if (!data)
		goto out;

	data->enable = enable;
	data->count = -1;
	data->file = file;

	if (!param)
		goto out_reg;

	number = strsep(&param, ":");

	ret = -EINVAL;
	if (!strlen(number))
		goto out_free;

	/*
	 * We use the callback data field (which is a pointer)
	 * as our counter.
	 */
	ret = kstrtoul(number, 0, &data->count);
	if (ret)
		goto out_free;

 out_reg:
	/* Don't let event modules unload while probe registered */
	ret = try_module_get(file->event_call->mod);
2696 2697
	if (!ret) {
		ret = -EBUSY;
2698
		goto out_free;
2699
	}
2700 2701 2702 2703

	ret = __ftrace_event_enable_disable(file, 1, 1);
	if (ret < 0)
		goto out_put;
2704

2705
	ret = register_ftrace_function_probe(glob, tr, ops, data);
2706 2707 2708 2709 2710
	/*
	 * The above returns on success the # of functions enabled,
	 * but if it didn't find any functions it returns zero.
	 * Consider no functions a failure too.
	 */
2711 2712
	if (!ret) {
		ret = -ENOENT;
2713
		goto out_disable;
2714 2715 2716 2717
	} else if (ret < 0)
		goto out_disable;
	/* Just return zero, not the number of enabled functions */
	ret = 0;
2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756
 out:
	mutex_unlock(&event_mutex);
	return ret;

 out_disable:
	__ftrace_event_enable_disable(file, 0, 1);
 out_put:
	module_put(file->event_call->mod);
 out_free:
	kfree(data);
	goto out;
}

static struct ftrace_func_command event_enable_cmd = {
	.name			= ENABLE_EVENT_STR,
	.func			= event_enable_func,
};

static struct ftrace_func_command event_disable_cmd = {
	.name			= DISABLE_EVENT_STR,
	.func			= event_enable_func,
};

static __init int register_event_cmds(void)
{
	int ret;

	ret = register_ftrace_command(&event_enable_cmd);
	if (WARN_ON(ret < 0))
		return ret;
	ret = register_ftrace_command(&event_disable_cmd);
	if (WARN_ON(ret < 0))
		unregister_ftrace_command(&event_enable_cmd);
	return ret;
}
#else
static inline int register_event_cmds(void) { return 0; }
#endif /* CONFIG_DYNAMIC_FTRACE */

2757
/*
2758
 * The top level array has already had its trace_event_file
2759
 * descriptors created in order to allow for early events to
2760
 * be recorded. This function is called after the tracefs has been
2761 2762 2763 2764 2765 2766
 * initialized, and we now have to create the files associated
 * to the events.
 */
static __init void
__trace_early_add_event_dirs(struct trace_array *tr)
{
2767
	struct trace_event_file *file;
2768 2769 2770 2771
	int ret;


	list_for_each_entry(file, &tr->events, list) {
2772
		ret = event_create_dir(tr->event_dir, file);
2773
		if (ret < 0)
2774
			pr_warn("Could not create directory for event %s\n",
2775
				trace_event_name(file->event_call));
2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787
	}
}

/*
 * For early boot up, the top trace array requires to have
 * a list of events that can be enabled. This must be done before
 * the filesystem is set up in order to allow events to be traced
 * early.
 */
static __init void
__trace_early_add_events(struct trace_array *tr)
{
2788
	struct trace_event_call *call;
2789 2790 2791 2792 2793 2794 2795 2796 2797
	int ret;

	list_for_each_entry(call, &ftrace_events, list) {
		/* Early boot up should not have any modules loaded */
		if (WARN_ON_ONCE(call->mod))
			continue;

		ret = __trace_early_add_new_event(call, tr);
		if (ret < 0)
2798
			pr_warn("Could not create early event %s\n",
2799
				trace_event_name(call));
2800 2801 2802
	}
}

2803 2804 2805 2806
/* Remove the event directory structure for a trace directory. */
static void
__trace_remove_event_dirs(struct trace_array *tr)
{
2807
	struct trace_event_file *file, *next;
2808

2809 2810
	list_for_each_entry_safe(file, next, &tr->events, list)
		remove_event_file_dir(file);
2811 2812
}

2813
static void __add_event_to_tracers(struct trace_event_call *call)
2814 2815 2816
{
	struct trace_array *tr;

2817 2818
	list_for_each_entry(tr, &ftrace_trace_arrays, list)
		__trace_add_new_event(call, tr);
2819 2820
}

2821 2822
extern struct trace_event_call *__start_ftrace_events[];
extern struct trace_event_call *__stop_ftrace_events[];
2823

2824 2825 2826 2827 2828
static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;

static __init int setup_trace_event(char *str)
{
	strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
2829 2830
	ring_buffer_expanded = true;
	tracing_selftest_disabled = true;
2831 2832 2833 2834 2835

	return 1;
}
__setup("trace_event=", setup_trace_event);

2836 2837 2838
/* Expects to have event_mutex held when called */
static int
create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
2839 2840 2841 2842
{
	struct dentry *d_events;
	struct dentry *entry;

2843
	entry = tracefs_create_file("set_event", 0644, parent,
2844 2845
				    tr, &ftrace_set_event_fops);
	if (!entry) {
2846
		pr_warn("Could not create tracefs 'set_event' entry\n");
2847 2848 2849
		return -ENOMEM;
	}

2850
	d_events = tracefs_create_dir("events", parent);
2851
	if (!d_events) {
2852
		pr_warn("Could not create tracefs 'events' directory\n");
2853 2854
		return -ENOMEM;
	}
2855

2856 2857 2858 2859 2860 2861 2862 2863 2864
	entry = trace_create_file("enable", 0644, d_events,
				  tr, &ftrace_tr_enable_fops);
	if (!entry) {
		pr_warn("Could not create tracefs 'enable' entry\n");
		return -ENOMEM;
	}

	/* There are not as crucial, just warn if they are not created */

2865 2866
	entry = tracefs_create_file("set_event_pid", 0644, parent,
				    tr, &ftrace_set_event_pid_fops);
2867 2868
	if (!entry)
		pr_warn("Could not create tracefs 'set_event_pid' entry\n");
2869

2870
	/* ring buffer internal formats */
2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881
	entry = trace_create_file("header_page", 0444, d_events,
				  ring_buffer_print_page_header,
				  &ftrace_show_header_fops);
	if (!entry)
		pr_warn("Could not create tracefs 'header_page' entry\n");

	entry = trace_create_file("header_event", 0444, d_events,
				  ring_buffer_print_entry_header,
				  &ftrace_show_header_fops);
	if (!entry)
		pr_warn("Could not create tracefs 'header_event' entry\n");
2882 2883

	tr->event_dir = d_events;
2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908

	return 0;
}

/**
 * event_trace_add_tracer - add a instance of a trace_array to events
 * @parent: The parent dentry to place the files/directories for events in
 * @tr: The trace array associated with these events
 *
 * When a new instance is created, it needs to set up its events
 * directory, as well as other files associated with events. It also
 * creates the event hierachry in the @parent/events directory.
 *
 * Returns 0 on success.
 */
int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
{
	int ret;

	mutex_lock(&event_mutex);

	ret = create_event_toplevel_files(parent, tr);
	if (ret)
		goto out_unlock;

2909
	down_write(&trace_event_sem);
2910
	__trace_add_event_dirs(tr);
2911
	up_write(&trace_event_sem);
2912

2913
 out_unlock:
2914
	mutex_unlock(&event_mutex);
2915

2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933
	return ret;
}

/*
 * The top trace array already had its file descriptors created.
 * Now the files themselves need to be created.
 */
static __init int
early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
{
	int ret;

	mutex_lock(&event_mutex);

	ret = create_event_toplevel_files(parent, tr);
	if (ret)
		goto out_unlock;

2934
	down_write(&trace_event_sem);
2935
	__trace_early_add_event_dirs(tr);
2936
	up_write(&trace_event_sem);
2937 2938 2939 2940 2941

 out_unlock:
	mutex_unlock(&event_mutex);

	return ret;
2942 2943
}

2944 2945 2946 2947
int event_trace_del_tracer(struct trace_array *tr)
{
	mutex_lock(&event_mutex);

2948 2949 2950
	/* Disable any event triggers and associated soft-disabled events */
	clear_event_triggers(tr);

2951 2952 2953
	/* Clear the pid list */
	__ftrace_clear_event_pids(tr);

2954 2955 2956
	/* Disable any running events */
	__ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);

2957 2958 2959
	/* Access to events are within rcu_read_lock_sched() */
	synchronize_sched();

2960
	down_write(&trace_event_sem);
2961
	__trace_remove_event_dirs(tr);
2962
	tracefs_remove_recursive(tr->event_dir);
2963
	up_write(&trace_event_sem);
2964 2965 2966 2967 2968 2969 2970 2971

	tr->event_dir = NULL;

	mutex_unlock(&event_mutex);

	return 0;
}

2972 2973 2974
static __init int event_trace_memsetup(void)
{
	field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
2975
	file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC);
2976 2977 2978
	return 0;
}

2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991
static __init void
early_enable_events(struct trace_array *tr, bool disable_first)
{
	char *buf = bootup_event_buf;
	char *token;
	int ret;

	while (true) {
		token = strsep(&buf, ",");

		if (!token)
			break;

2992 2993 2994 2995
		if (*token) {
			/* Restarting syscalls requires that we stop them first */
			if (disable_first)
				ftrace_set_clr_event(tr, token, 0);
2996

2997 2998 2999 3000
			ret = ftrace_set_clr_event(tr, token, 1);
			if (ret)
				pr_warn("Failed to enable trace event: %s\n", token);
		}
3001 3002 3003 3004 3005 3006 3007

		/* Put back the comma to allow this to be called again */
		if (buf)
			*(buf - 1) = ',';
	}
}

3008 3009
static __init int event_trace_enable(void)
{
3010
	struct trace_array *tr = top_trace_array();
3011
	struct trace_event_call **iter, *call;
3012 3013
	int ret;

3014 3015 3016
	if (!tr)
		return -ENODEV;

3017 3018 3019 3020 3021 3022 3023 3024
	for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {

		call = *iter;
		ret = event_init(call);
		if (!ret)
			list_add(&call->list, &ftrace_events);
	}

3025 3026 3027 3028 3029 3030 3031 3032
	/*
	 * We need the top trace array to have a working set of trace
	 * points at early init, before the debug files and directories
	 * are created. Create the file entries now, and attach them
	 * to the actual file dentries later.
	 */
	__trace_early_add_events(tr);

3033
	early_enable_events(tr, false);
3034 3035 3036

	trace_printk_start_comm();

3037 3038
	register_event_cmds();

3039 3040
	register_trigger_cmds();

3041 3042 3043
	return 0;
}

3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068
/*
 * event_trace_enable() is called from trace_event_init() first to
 * initialize events and perhaps start any events that are on the
 * command line. Unfortunately, there are some events that will not
 * start this early, like the system call tracepoints that need
 * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable()
 * is called before pid 1 starts, and this flag is never set, making
 * the syscall tracepoint never get reached, but the event is enabled
 * regardless (and not doing anything).
 */
static __init int event_trace_enable_again(void)
{
	struct trace_array *tr;

	tr = top_trace_array();
	if (!tr)
		return -ENODEV;

	early_enable_events(tr, true);

	return 0;
}

early_initcall(event_trace_enable_again);

3069 3070
static __init int event_trace_init(void)
{
3071
	struct trace_array *tr;
3072 3073
	struct dentry *d_tracer;
	struct dentry *entry;
3074
	int ret;
3075

3076
	tr = top_trace_array();
3077 3078
	if (!tr)
		return -ENODEV;
3079

3080
	d_tracer = tracing_init_dentry();
3081
	if (IS_ERR(d_tracer))
3082 3083
		return 0;

3084
	entry = tracefs_create_file("available_events", 0444, d_tracer,
3085
				    tr, &ftrace_avail_fops);
3086
	if (!entry)
3087
		pr_warn("Could not create tracefs 'available_events' entry\n");
3088

3089 3090 3091
	if (trace_define_generic_fields())
		pr_warn("tracing: Failed to allocated generic fields");

3092
	if (trace_define_common_fields())
3093
		pr_warn("tracing: Failed to allocate common fields");
3094

3095
	ret = early_event_add_tracer(d_tracer, tr);
3096 3097
	if (ret)
		return ret;
3098

3099
#ifdef CONFIG_MODULES
3100
	ret = register_module_notifier(&trace_module_nb);
3101
	if (ret)
3102
		pr_warn("Failed to register trace events module notifier\n");
3103
#endif
3104 3105
	return 0;
}
3106 3107 3108 3109 3110 3111 3112 3113

void __init trace_event_init(void)
{
	event_trace_memsetup();
	init_ftrace_syscalls();
	event_trace_enable();
}

3114
fs_initcall(event_trace_init);
3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147

#ifdef CONFIG_FTRACE_STARTUP_TEST

static DEFINE_SPINLOCK(test_spinlock);
static DEFINE_SPINLOCK(test_spinlock_irq);
static DEFINE_MUTEX(test_mutex);

static __init void test_work(struct work_struct *dummy)
{
	spin_lock(&test_spinlock);
	spin_lock_irq(&test_spinlock_irq);
	udelay(1);
	spin_unlock_irq(&test_spinlock_irq);
	spin_unlock(&test_spinlock);

	mutex_lock(&test_mutex);
	msleep(1);
	mutex_unlock(&test_mutex);
}

static __init int event_test_thread(void *unused)
{
	void *test_malloc;

	test_malloc = kmalloc(1234, GFP_KERNEL);
	if (!test_malloc)
		pr_info("failed to kmalloc\n");

	schedule_on_each_cpu(test_work);

	kfree(test_malloc);

	set_current_state(TASK_INTERRUPTIBLE);
P
Peter Zijlstra 已提交
3148
	while (!kthread_should_stop()) {
3149
		schedule();
P
Peter Zijlstra 已提交
3150 3151 3152
		set_current_state(TASK_INTERRUPTIBLE);
	}
	__set_current_state(TASK_RUNNING);
3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172

	return 0;
}

/*
 * Do various things that may trigger events.
 */
static __init void event_test_stuff(void)
{
	struct task_struct *test_thread;

	test_thread = kthread_run(event_test_thread, NULL, "test-events");
	msleep(1);
	kthread_stop(test_thread);
}

/*
 * For every trace event defined, we will test each trace point separately,
 * and then by groups, and finally all trace points.
 */
3173
static __init void event_trace_self_tests(void)
3174
{
3175
	struct trace_subsystem_dir *dir;
3176
	struct trace_event_file *file;
3177
	struct trace_event_call *call;
3178
	struct event_subsystem *system;
3179
	struct trace_array *tr;
3180 3181
	int ret;

3182
	tr = top_trace_array();
3183 3184
	if (!tr)
		return;
3185

3186 3187
	pr_info("Running tests on trace events:\n");

3188 3189 3190
	list_for_each_entry(file, &tr->events, list) {

		call = file->event_call;
3191

3192 3193
		/* Only test those that have a probe */
		if (!call->class || !call->class->probe)
3194 3195
			continue;

3196 3197 3198 3199 3200 3201 3202
/*
 * Testing syscall events here is pretty useless, but
 * we still do it if configured. But this is time consuming.
 * What we really need is a user thread to perform the
 * syscalls as we test.
 */
#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
3203 3204
		if (call->class->system &&
		    strcmp(call->class->system, "syscalls") == 0)
3205 3206 3207
			continue;
#endif

3208
		pr_info("Testing event %s: ", trace_event_name(call));
3209 3210 3211 3212 3213

		/*
		 * If an event is already enabled, someone is using
		 * it and the self test should not be on.
		 */
3214
		if (file->flags & EVENT_FILE_FL_ENABLED) {
3215
			pr_warn("Enabled event during self test!\n");
3216 3217 3218 3219
			WARN_ON_ONCE(1);
			continue;
		}

3220
		ftrace_event_enable_disable(file, 1);
3221
		event_test_stuff();
3222
		ftrace_event_enable_disable(file, 0);
3223 3224 3225 3226 3227 3228 3229 3230

		pr_cont("OK\n");
	}

	/* Now test at the sub system level */

	pr_info("Running tests on trace event systems:\n");

3231 3232 3233
	list_for_each_entry(dir, &tr->systems, list) {

		system = dir->subsystem;
3234 3235 3236 3237 3238 3239 3240

		/* the ftrace system is special, skip it */
		if (strcmp(system->name, "ftrace") == 0)
			continue;

		pr_info("Testing event system %s: ", system->name);

3241
		ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
3242
		if (WARN_ON_ONCE(ret)) {
3243 3244
			pr_warn("error enabling system %s\n",
				system->name);
3245 3246 3247 3248 3249
			continue;
		}

		event_test_stuff();

3250
		ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
3251
		if (WARN_ON_ONCE(ret)) {
3252 3253
			pr_warn("error disabling system %s\n",
				system->name);
3254 3255
			continue;
		}
3256 3257 3258 3259 3260 3261 3262 3263 3264

		pr_cont("OK\n");
	}

	/* Test with all events enabled */

	pr_info("Running tests on all trace events:\n");
	pr_info("Testing all events: ");

3265
	ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
3266
	if (WARN_ON_ONCE(ret)) {
3267
		pr_warn("error enabling all events\n");
3268
		return;
3269 3270 3271 3272 3273
	}

	event_test_stuff();

	/* reset sysname */
3274
	ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
3275
	if (WARN_ON_ONCE(ret)) {
3276
		pr_warn("error disabling all events\n");
3277
		return;
3278 3279 3280
	}

	pr_cont("OK\n");
3281 3282 3283 3284
}

#ifdef CONFIG_FUNCTION_TRACER

3285
static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
3286

3287
static struct trace_event_file event_trace_file __initdata;
3288 3289

static void __init
3290
function_test_events_call(unsigned long ip, unsigned long parent_ip,
3291
			  struct ftrace_ops *op, struct pt_regs *pt_regs)
3292 3293
{
	struct ring_buffer_event *event;
3294
	struct ring_buffer *buffer;
3295 3296 3297 3298 3299 3300 3301
	struct ftrace_entry *entry;
	unsigned long flags;
	long disabled;
	int cpu;
	int pc;

	pc = preempt_count();
3302
	preempt_disable_notrace();
3303
	cpu = raw_smp_processor_id();
3304
	disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
3305 3306 3307 3308 3309 3310

	if (disabled != 1)
		goto out;

	local_save_flags(flags);

3311 3312 3313
	event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file,
						TRACE_FN, sizeof(*entry),
						flags, pc);
3314 3315 3316 3317 3318 3319
	if (!event)
		goto out;
	entry	= ring_buffer_event_data(event);
	entry->ip			= ip;
	entry->parent_ip		= parent_ip;

3320 3321
	event_trigger_unlock_commit(&event_trace_file, buffer, event,
				    entry, flags, pc);
3322
 out:
3323
	atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
3324
	preempt_enable_notrace();
3325 3326 3327 3328 3329
}

static struct ftrace_ops trace_ops __initdata  =
{
	.func = function_test_events_call,
3330
	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
3331 3332 3333 3334
};

static __init void event_trace_self_test_with_function(void)
{
3335
	int ret;
3336 3337 3338

	event_trace_file.tr = top_trace_array();
	if (WARN_ON(!event_trace_file.tr))
3339
		return;
3340

3341 3342 3343 3344 3345
	ret = register_ftrace_function(&trace_ops);
	if (WARN_ON(ret < 0)) {
		pr_info("Failed to enable function tracer for event tests\n");
		return;
	}
3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357
	pr_info("Running tests again, along with the function tracer\n");
	event_trace_self_tests();
	unregister_ftrace_function(&trace_ops);
}
#else
static __init void event_trace_self_test_with_function(void)
{
}
#endif

static __init int event_trace_self_tests_init(void)
{
3358 3359 3360 3361
	if (!tracing_selftest_disabled) {
		event_trace_self_tests();
		event_trace_self_test_with_function();
	}
3362 3363 3364 3365

	return 0;
}

3366
late_initcall(event_trace_self_tests_init);
3367 3368

#endif