ftrace.h 22.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * Stage 1 of the trace events.
 *
 * Override the macros in <trace/trace_events.h> to include the following:
 *
 * struct ftrace_raw_<call> {
 *	struct trace_entry		ent;
 *	<type>				<item>;
 *	<type2>				<item2>[<len>];
 *	[...]
 * };
 *
 * The <type> <item> is created by the __field(type, item) macro or
 * the __array(type2, item2, len) macro.
 * We simply do "type item;", and that will create the fields
 * in the structure.
 */

#include <linux/ftrace_event.h>

21
/*
22
 * DECLARE_EVENT_CLASS can be used to add a generic function
23 24 25
 * handlers for events. That is, if all events have the same
 * parameters and just have distinct trace points.
 * Each tracepoint can be defined with DEFINE_EVENT and that
26
 * will map the DECLARE_EVENT_CLASS to the tracepoint.
27 28 29 30 31
 *
 * TRACE_EVENT is a one to one mapping between tracepoint and template.
 */
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
32
	DECLARE_EVENT_CLASS(name,			       \
33 34 35 36 37 38 39 40
			     PARAMS(proto),		       \
			     PARAMS(args),		       \
			     PARAMS(tstruct),		       \
			     PARAMS(assign),		       \
			     PARAMS(print));		       \
	DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));


41 42 43
#undef __field
#define __field(type, item)		type	item;

44 45 46
#undef __field_ext
#define __field_ext(type, item, filter_type)	type	item;

47 48 49
#undef __array
#define __array(type, item, len)	type	item[len];

50
#undef __dynamic_array
51
#define __dynamic_array(type, item, len) u32 __data_loc_##item;
52

53
#undef __string
54
#define __string(item, src) __dynamic_array(char, item, -1)
55

56 57 58
#undef TP_STRUCT__entry
#define TP_STRUCT__entry(args...) args

59 60
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print)	\
61 62 63 64 65 66 67
	struct ftrace_raw_##name {					\
		struct trace_entry	ent;				\
		tstruct							\
		char			__data[0];			\
	};
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args)	\
68 69
	static struct ftrace_event_call			\
	__attribute__((__aligned__(4))) event_##name
70

71 72 73 74
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))

75 76 77
#undef __cpparg
#define __cpparg(arg...) arg

78 79
/* Callbacks are meaningless to ftrace. */
#undef TRACE_EVENT_FN
80 81 82 83
#define TRACE_EVENT_FN(name, proto, args, tstruct,			\
		assign, print, reg, unreg)				\
	TRACE_EVENT(name, __cpparg(proto), __cpparg(args),		\
		__cpparg(tstruct), __cpparg(assign), __cpparg(print))	\
84

85 86
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

87

88 89 90
/*
 * Stage 2 of the trace events.
 *
91 92
 * Include the following:
 *
93
 * struct ftrace_data_offsets_<call> {
94 95
 *	u32				<item1>;
 *	u32				<item2>;
96 97 98
 *	[...]
 * };
 *
99
 * The __dynamic_array() macro will create each u32 <item>, this is
100
 * to keep the offset of each array from the beginning of the event.
101
 * The size of an array is also encoded, in the higher 16 bits of <item>.
102 103
 */

104
#undef __field
105 106 107 108
#define __field(type, item)

#undef __field_ext
#define __field_ext(type, item, filter_type)
109

110 111 112
#undef __array
#define __array(type, item, len)

113
#undef __dynamic_array
114
#define __dynamic_array(type, item, len)	u32 item;
115 116

#undef __string
117
#define __string(item, src) __dynamic_array(char, item, -1)
118

119 120
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
121
	struct ftrace_data_offsets_##call {				\
122 123 124
		tstruct;						\
	};

125 126 127
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args)

128 129 130 131
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))

132 133 134 135 136
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

/*
 * Stage 3 of the trace events.
 *
137 138 139 140 141 142 143 144
 * Override the macros in <trace/trace_events.h> to include the following:
 *
 * enum print_line_t
 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
 * {
 *	struct trace_seq *s = &iter->seq;
 *	struct ftrace_raw_<call> *field; <-- defined in stage 1
 *	struct trace_entry *entry;
145
 *	struct trace_seq *p;
146 147 148 149 150 151 152 153 154 155 156
 *	int ret;
 *
 *	entry = iter->ent;
 *
 *	if (entry->type != event_<call>.id) {
 *		WARN_ON_ONCE(1);
 *		return TRACE_TYPE_UNHANDLED;
 *	}
 *
 *	field = (typeof(field))entry;
 *
157
 *	p = get_cpu_var(ftrace_event_seq);
158
 *	trace_seq_init(p);
159
 *	ret = trace_seq_printf(s, <TP_printk> "\n");
160
 *	put_cpu();
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
 *	if (!ret)
 *		return TRACE_TYPE_PARTIAL_LINE;
 *
 *	return TRACE_TYPE_HANDLED;
 * }
 *
 * This is the method used to print the raw event to the trace
 * output format. Note, this is not needed if the data is read
 * in binary.
 */

#undef __entry
#define __entry field

#undef TP_printk
#define TP_printk(fmt, args...) fmt "\n", args

178 179
#undef __get_dynamic_array
#define __get_dynamic_array(field)	\
180
		((void *)__entry + (__entry->__data_loc_##field & 0xffff))
181

182
#undef __get_str
183
#define __get_str(field) (char *)__get_dynamic_array(field)
184

185 186 187
#undef __print_flags
#define __print_flags(flag, delim, flag_array...)			\
	({								\
188
		static const struct trace_print_flags __flags[] =	\
189
			{ flag_array, { -1, NULL }};			\
190
		ftrace_print_flags_seq(p, delim, flag, __flags);	\
191 192
	})

193 194 195 196 197 198 199 200
#undef __print_symbolic
#define __print_symbolic(value, symbol_array...)			\
	({								\
		static const struct trace_print_flags symbols[] =	\
			{ symbol_array, { -1, NULL }};			\
		ftrace_print_symbols_seq(p, value, symbols);		\
	})

201 202
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
203
static notrace enum print_line_t					\
204 205
ftrace_raw_output_id_##call(int event_id, const char *name,		\
			    struct trace_iterator *iter, int flags)	\
206 207 208 209
{									\
	struct trace_seq *s = &iter->seq;				\
	struct ftrace_raw_##call *field;				\
	struct trace_entry *entry;					\
210
	struct trace_seq *p;						\
211 212 213 214
	int ret;							\
									\
	entry = iter->ent;						\
									\
215
	if (entry->type != event_id) {					\
216 217 218 219 220 221
		WARN_ON_ONCE(1);					\
		return TRACE_TYPE_UNHANDLED;				\
	}								\
									\
	field = (typeof(field))entry;					\
									\
222
	p = &get_cpu_var(ftrace_event_seq);				\
223
	trace_seq_init(p);						\
224 225 226
	ret = trace_seq_printf(s, "%s: ", name);			\
	if (ret)							\
		ret = trace_seq_printf(s, print);			\
227
	put_cpu();							\
228 229 230 231 232
	if (!ret)							\
		return TRACE_TYPE_PARTIAL_LINE;				\
									\
	return TRACE_TYPE_HANDLED;					\
}
233 234 235

#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args)			\
236
static notrace enum print_line_t					\
237 238 239 240 241 242
ftrace_raw_output_##name(struct trace_iterator *iter, int flags)	\
{									\
	return ftrace_raw_output_id_##template(event_##name.id,		\
					       #name, iter, flags);	\
}

243 244
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, call, proto, args, print)		\
245
static notrace enum print_line_t					\
246 247 248 249 250 251
ftrace_raw_output_##call(struct trace_iterator *iter, int flags)	\
{									\
	struct trace_seq *s = &iter->seq;				\
	struct ftrace_raw_##template *field;				\
	struct trace_entry *entry;					\
	struct trace_seq *p;						\
252 253 254 255 256 257 258 259 260 261 262
	int ret;							\
									\
	entry = iter->ent;						\
									\
	if (entry->type != event_##call.id) {				\
		WARN_ON_ONCE(1);					\
		return TRACE_TYPE_UNHANDLED;				\
	}								\
									\
	field = (typeof(field))entry;					\
									\
263
	p = &get_cpu_var(ftrace_event_seq);				\
264
	trace_seq_init(p);						\
265 266 267
	ret = trace_seq_printf(s, "%s: ", #call);			\
	if (ret)							\
		ret = trace_seq_printf(s, print);			\
268
	put_cpu();							\
269 270 271 272 273
	if (!ret)							\
		return TRACE_TYPE_PARTIAL_LINE;				\
									\
	return TRACE_TYPE_HANDLED;					\
}
274

275 276
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

277 278
#undef __field_ext
#define __field_ext(type, item, filter_type)				\
279 280
	ret = trace_define_field(event_call, #type, #item,		\
				 offsetof(typeof(field), item),		\
281 282
				 sizeof(field.item),			\
				 is_signed_type(type), filter_type);	\
283 284 285
	if (ret)							\
		return ret;

286 287 288
#undef __field
#define __field(type, item)	__field_ext(type, item, FILTER_OTHER)

289 290 291 292 293
#undef __array
#define __array(type, item, len)					\
	BUILD_BUG_ON(len > MAX_FILTER_STR_VAL);				\
	ret = trace_define_field(event_call, #type "[" #len "]", #item,	\
				 offsetof(typeof(field), item),		\
294 295
				 sizeof(field.item),			\
				 is_signed_type(type), FILTER_OTHER);	\
296 297 298
	if (ret)							\
		return ret;

299 300
#undef __dynamic_array
#define __dynamic_array(type, item, len)				       \
301
	ret = trace_define_field(event_call, "__data_loc " #type "[]", #item,  \
302
				 offsetof(typeof(field), __data_loc_##item),   \
303 304
				 sizeof(field.__data_loc_##item),	       \
				 is_signed_type(type), FILTER_OTHER);
305

306
#undef __string
307
#define __string(item, src) __dynamic_array(char, item, -1)
308

309 310
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print)	\
311
static int notrace							\
312
ftrace_define_fields_##call(struct ftrace_event_call *event_call)	\
313 314 315 316 317 318 319 320 321
{									\
	struct ftrace_raw_##call field;					\
	int ret;							\
									\
	tstruct;							\
									\
	return ret;							\
}

322 323 324
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args)

325 326 327 328
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))

329 330
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

331 332 333 334 335 336 337 338 339 340
/*
 * remember the offset of each array from the beginning of the event.
 */

#undef __entry
#define __entry entry

#undef __field
#define __field(type, item)

341 342 343
#undef __field_ext
#define __field_ext(type, item, filter_type)

344 345 346 347 348 349 350
#undef __array
#define __array(type, item, len)

#undef __dynamic_array
#define __dynamic_array(type, item, len)				\
	__data_offsets->item = __data_size +				\
			       offsetof(typeof(*entry), __data);	\
351
	__data_offsets->item |= (len * sizeof(type)) << 16;		\
352 353 354
	__data_size += (len) * sizeof(type);

#undef __string
355
#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
356

357 358
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
359
static inline notrace int ftrace_get_offsets_##call(			\
360 361 362 363 364 365 366 367 368 369
	struct ftrace_data_offsets_##call *__data_offsets, proto)       \
{									\
	int __data_size = 0;						\
	struct ftrace_raw_##call __maybe_unused *entry;			\
									\
	tstruct;							\
									\
	return __data_size;						\
}

370 371 372
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args)

373 374 375 376
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))

377 378
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

379
#ifdef CONFIG_PERF_EVENTS
380 381

/*
382
 * Generate the functions needed for tracepoint perf_event support.
383
 *
384
 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
385
 *
386
 * static int ftrace_profile_enable_<call>(void)
387
 * {
388
 * 	return register_trace_<call>(ftrace_profile_<call>);
389 390
 * }
 *
391
 * static void ftrace_profile_disable_<call>(void)
392
 * {
393
 * 	unregister_trace_<call>(ftrace_profile_<call>);
394 395 396 397
 * }
 *
 */

398 399
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)
400 401 402

#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args)			\
403
									\
404
static void perf_trace_##name(proto);					\
405
									\
406
static notrace int							\
407
perf_trace_enable_##name(struct ftrace_event_call *unused)		\
408
{									\
409
	return register_trace_##name(perf_trace_##name);		\
410 411
}									\
									\
412
static notrace void							\
413
perf_trace_disable_##name(struct ftrace_event_call *unused)		\
414
{									\
415
	unregister_trace_##name(perf_trace_##name);			\
416 417
}

418 419 420 421
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))

422 423
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

424
#endif /* CONFIG_PERF_EVENTS */
425

426
/*
427
 * Stage 4 of the trace events.
428
 *
429
 * Override the macros in <trace/trace_events.h> to include the following:
430 431 432
 *
 * static void ftrace_event_<call>(proto)
 * {
433
 *	event_trace_printk(_RET_IP_, "<call>: " <fmt>);
434 435
 * }
 *
436
 * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused)
437
 * {
438
 *	return register_trace_<call>(ftrace_event_<call>);
439 440
 * }
 *
441
 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
442
 * {
443
 *	unregister_trace_<call>(ftrace_event_<call>);
444 445 446
 * }
 *
 *
447
 * For those macros defined with TRACE_EVENT:
448 449 450 451 452
 *
 * static struct ftrace_event_call event_<call>;
 *
 * static void ftrace_raw_event_<call>(proto)
 * {
453 454
 *	struct ring_buffer_event *event;
 *	struct ftrace_raw_<call> *entry; <-- defined in stage 1
455
 *	struct ring_buffer *buffer;
456 457 458 459 460 461
 *	unsigned long irq_flags;
 *	int pc;
 *
 *	local_save_flags(irq_flags);
 *	pc = preempt_count();
 *
462 463
 *	event = trace_current_buffer_lock_reserve(&buffer,
 *				  event_<call>.id,
464 465 466 467 468 469 470
 *				  sizeof(struct ftrace_raw_<call>),
 *				  irq_flags, pc);
 *	if (!event)
 *		return;
 *	entry	= ring_buffer_event_data(event);
 *
 *	<assign>;  <-- Here we assign the entries by the __field and
471
 *			__array macros.
472
 *
473
 *	trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
474 475
 * }
 *
476
 * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
477
 * {
478
 *	int ret;
479
 *
480 481 482 483 484
 *	ret = register_trace_<call>(ftrace_raw_event_<call>);
 *	if (!ret)
 *		pr_info("event trace: Could not activate trace point "
 *			"probe to <call>");
 *	return ret;
485 486
 * }
 *
487
 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
488
 * {
489
 *	unregister_trace_<call>(ftrace_raw_event_<call>);
490 491 492
 * }
 *
 * static struct trace_event ftrace_event_type_<call> = {
493
 *	.trace			= ftrace_raw_output_<call>, <-- stage 2
494 495 496 497 498
 * };
 *
 * static struct ftrace_event_call __used
 * __attribute__((__aligned__(4)))
 * __attribute__((section("_ftrace_events"))) event_<call> = {
499
 *	.name			= "<call>",
500
 *	.system			= "<system>",
501
 *	.raw_init		= trace_event_raw_init,
502 503
 *	.regfunc		= ftrace_reg_event_<call>,
 *	.unregfunc		= ftrace_unreg_event_<call>,
504 505 506 507
 * }
 *
 */

508
#ifdef CONFIG_PERF_EVENTS
P
Peter Zijlstra 已提交
509

510 511 512
#define _TRACE_PERF_INIT(call)						\
	.perf_event_enable = perf_trace_enable_##call,			\
	.perf_event_disable = perf_trace_disable_##call,
P
Peter Zijlstra 已提交
513 514

#else
515
#define _TRACE_PERF_INIT(call)
516
#endif /* CONFIG_PERF_EVENTS */
P
Peter Zijlstra 已提交
517

518 519
#undef __entry
#define __entry entry
520

521 522 523 524 525 526
#undef __field
#define __field(type, item)

#undef __array
#define __array(type, item, len)

527 528 529 530
#undef __dynamic_array
#define __dynamic_array(type, item, len)				\
	__entry->__data_loc_##item = __data_offsets.item;

531
#undef __string
532
#define __string(item, src) __dynamic_array(char, item, -1)       	\
533 534 535 536 537

#undef __assign_str
#define __assign_str(dst, src)						\
	strcpy(__get_str(dst), src);

538 539 540 541 542 543
#undef TP_fast_assign
#define TP_fast_assign(args...) args

#undef TP_perf_assign
#define TP_perf_assign(args...)

544 545
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
546
									\
547 548
static notrace void							\
ftrace_raw_event_id_##call(struct ftrace_event_call *event_call,	\
549
				       proto)				\
550
{									\
551
	struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
552 553
	struct ring_buffer_event *event;				\
	struct ftrace_raw_##call *entry;				\
554
	struct ring_buffer *buffer;					\
555
	unsigned long irq_flags;					\
556
	int __data_size;						\
557 558 559 560 561
	int pc;								\
									\
	local_save_flags(irq_flags);					\
	pc = preempt_count();						\
									\
562
	__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
563
									\
564
	event = trace_current_buffer_lock_reserve(&buffer,		\
565
				 event_call->id,			\
566
				 sizeof(*entry) + __data_size,		\
567
				 irq_flags, pc);			\
568 569 570 571
	if (!event)							\
		return;							\
	entry	= ring_buffer_event_data(event);			\
									\
572 573 574
									\
	tstruct								\
									\
575
	{ assign; }							\
576
									\
577 578 579
	if (!filter_current_check_discard(buffer, event_call, entry, event)) \
		trace_nowake_buffer_unlock_commit(buffer,		\
						  event, irq_flags, pc); \
580 581 582 583 584
}

#undef DEFINE_EVENT
#define DEFINE_EVENT(template, call, proto, args)			\
									\
585
static notrace void ftrace_raw_event_##call(proto)			\
586 587
{									\
	ftrace_raw_event_id_##template(&event_##call, args);		\
588 589
}									\
									\
590 591
static notrace int							\
ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)		\
592
{									\
593
	return register_trace_##call(ftrace_raw_event_##call);		\
594 595
}									\
									\
596 597
static notrace void							\
ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)		\
598 599 600 601 602 603
{									\
	unregister_trace_##call(ftrace_raw_event_##call);		\
}									\
									\
static struct trace_event ftrace_event_type_##call = {			\
	.trace			= ftrace_raw_output_##call,		\
604
};
605 606 607 608 609 610 611

#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

L
Lai Jiangshan 已提交
612 613 614 615 616 617 618 619 620 621 622
#undef __entry
#define __entry REC

#undef __print_flags
#undef __print_symbolic
#undef __get_dynamic_array
#undef __get_str

#undef TP_printk
#define TP_printk(fmt, args...) "\"" fmt "\", "  __stringify(args)

623
#undef DECLARE_EVENT_CLASS
L
Lai Jiangshan 已提交
624 625
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
static const char print_fmt_##call[] = print;
626 627 628

#undef DEFINE_EVENT
#define DEFINE_EVENT(template, call, proto, args)			\
629 630 631 632
									\
static struct ftrace_event_call __used					\
__attribute__((__aligned__(4)))						\
__attribute__((section("_ftrace_events"))) event_##call = {		\
633
	.name			= #call,				\
634
	.system			= __stringify(TRACE_SYSTEM),		\
635
	.event			= &ftrace_event_type_##call,		\
636
	.raw_init		= trace_event_raw_init,			\
637 638
	.regfunc		= ftrace_raw_reg_event_##call,		\
	.unregfunc		= ftrace_raw_unreg_event_##call,	\
L
Lai Jiangshan 已提交
639
	.print_fmt		= print_fmt_##template,			\
640
	.define_fields		= ftrace_define_fields_##template,	\
641
	_TRACE_PERF_INIT(call)					\
642
}
P
Peter Zijlstra 已提交
643

644 645
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, call, proto, args, print)		\
646
									\
L
Lai Jiangshan 已提交
647 648
static const char print_fmt_##call[] = print;				\
									\
649 650 651
static struct ftrace_event_call __used					\
__attribute__((__aligned__(4)))						\
__attribute__((section("_ftrace_events"))) event_##call = {		\
652
	.name			= #call,				\
653
	.system			= __stringify(TRACE_SYSTEM),		\
654
	.event			= &ftrace_event_type_##call,		\
655
	.raw_init		= trace_event_raw_init,			\
656 657
	.regfunc		= ftrace_raw_reg_event_##call,		\
	.unregfunc		= ftrace_raw_unreg_event_##call,	\
L
Lai Jiangshan 已提交
658
	.print_fmt		= print_fmt_##call,			\
659
	.define_fields		= ftrace_define_fields_##template,	\
660
	_TRACE_PERF_INIT(call)					\
661
}
P
Peter Zijlstra 已提交
662

663
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
P
Peter Zijlstra 已提交
664

665
/*
666
 * Define the insertion callback to perf events
667 668 669 670
 *
 * The job is very similar to ftrace_raw_event_<call> except that we don't
 * insert in the ring buffer but in a perf counter.
 *
671
 * static void ftrace_perf_<call>(proto)
672 673 674
 * {
 *	struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
 *	struct ftrace_event_call *event_call = &event_<call>;
675
 *	extern void perf_tp_event(int, u64, u64, void *, int);
676
 *	struct ftrace_raw_##call *entry;
677
 *	struct perf_trace_buf *trace_buf;
678 679
 *	u64 __addr = 0, __count = 1;
 *	unsigned long irq_flags;
680
 *	struct trace_entry *ent;
681 682
 *	int __entry_size;
 *	int __data_size;
683
 *	int __cpu
684 685 686 687 688
 *	int pc;
 *
 *	pc = preempt_count();
 *
 *	__data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
689 690 691 692 693 694
 *
 *	// Below we want to get the aligned size by taking into account
 *	// the u32 field that will later store the buffer size
 *	__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
 *			     sizeof(u64));
 *	__entry_size -= sizeof(u32);
695
 *
696 697 698 699 700 701
 *	// Protect the non nmi buffer
 *	// This also protects the rcu read side
 *	local_irq_save(irq_flags);
 *	__cpu = smp_processor_id();
 *
 *	if (in_nmi())
702
 *		trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
703
 *	else
704
 *		trace_buf = rcu_dereference_sched(perf_trace_buf);
705
 *
706
 *	if (!trace_buf)
707
 *		goto end;
708
 *
709 710 711 712 713 714 715 716 717 718 719 720
 *	trace_buf = per_cpu_ptr(trace_buf, __cpu);
 *
 * 	// Avoid recursion from perf that could mess up the buffer
 * 	if (trace_buf->recursion++)
 *		goto end_recursion;
 *
 * 	raw_data = trace_buf->buf;
 *
 *	// Make recursion update visible before entering perf_tp_event
 *	// so that we protect from perf recursions.
 *
 *	barrier();
721
 *
722 723 724 725 726 727
 *	//zero dead bytes from alignment to avoid stack leak to userspace:
 *	*(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
 *	entry = (struct ftrace_raw_<call> *)raw_data;
 *	ent = &entry->ent;
 *	tracing_generic_entry_update(ent, irq_flags, pc);
 *	ent->type = event_call->id;
728
 *
729
 *	<tstruct> <- do some jobs with dynamic arrays
730
 *
731
 *	<assign>  <- affect our values
732
 *
733
 *	perf_tp_event(event_call->id, __addr, __count, entry,
734
 *		     __entry_size);  <- submit them to perf counter
735 736 737 738
 *
 * }
 */

739
#ifdef CONFIG_PERF_EVENTS
740

L
Lai Jiangshan 已提交
741 742 743 744 745 746 747 748 749 750
#undef __entry
#define __entry entry

#undef __get_dynamic_array
#define __get_dynamic_array(field)	\
		((void *)__entry + (__entry->__data_loc_##field & 0xffff))

#undef __get_str
#define __get_str(field) (char *)__get_dynamic_array(field)

751 752 753 754 755 756
#undef __perf_addr
#define __perf_addr(a) __addr = (a)

#undef __perf_count
#define __perf_count(c) __count = (c)

757 758
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
759
static notrace void							\
760
perf_trace_templ_##call(struct ftrace_event_call *event_call,		\
761
			    proto)					\
762 763 764 765 766
{									\
	struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
	struct ftrace_raw_##call *entry;				\
	u64 __addr = 0, __count = 1;					\
	unsigned long irq_flags;					\
767
	struct pt_regs *__regs;						\
768 769
	int __entry_size;						\
	int __data_size;						\
770
	int rctx;							\
771 772
									\
	__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
773 774
	__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
			     sizeof(u64));				\
775
	__entry_size -= sizeof(u32);					\
776
									\
777
	if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE,		\
778 779
		      "profile buffer not large enough"))		\
		return;							\
780
	entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare(	\
781 782 783
		__entry_size, event_call->id, &rctx, &irq_flags);	\
	if (!entry)							\
		return;							\
784 785 786 787
	tstruct								\
									\
	{ assign; }							\
									\
788 789 790
	__regs = &__get_cpu_var(perf_trace_regs);			\
	perf_fetch_caller_regs(__regs, 2);				\
									\
791
	perf_trace_buf_submit(entry, __entry_size, rctx, __addr,	\
792
			       __count, irq_flags, __regs);		\
793 794
}

795 796
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, call, proto, args)		\
797
static notrace void perf_trace_##call(proto)			\
798 799 800
{								\
	struct ftrace_event_call *event_call = &event_##call;	\
								\
801
	perf_trace_templ_##template(event_call, args);		\
802 803
}

804 805 806 807
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))

808
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
809
#endif /* CONFIG_PERF_EVENTS */
810

P
Peter Zijlstra 已提交
811 812
#undef _TRACE_PROFILE_INIT