ftrace.h 14.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * Stage 1 of the trace events.
 *
 * Override the macros in <trace/trace_events.h> to include the following:
 *
 * struct ftrace_raw_<call> {
 *	struct trace_entry		ent;
 *	<type>				<item>;
 *	<type2>				<item2>[<len>];
 *	[...]
 * };
 *
 * The <type> <item> is created by the __field(type, item) macro or
 * the __array(type2, item2, len) macro.
 * We simply do "type item;", and that will create the fields
 * in the structure.
 */

#include <linux/ftrace_event.h>

21 22 23
#undef __field
#define __field(type, item)		type	item;

24 25 26
#undef __array
#define __array(type, item, len)	type	item[len];

27 28
#undef __dynamic_array
#define __dynamic_array(type, item, len) unsigned short __data_loc_##item;
29

30
#undef __string
31
#define __string(item, src) __dynamic_array(char, item, -1)
32

33 34 35 36 37 38 39 40
#undef TP_STRUCT__entry
#define TP_STRUCT__entry(args...) args

#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, args, tstruct, assign, print)	\
	struct ftrace_raw_##name {				\
		struct trace_entry	ent;			\
		tstruct						\
41
		char			__data[0];		\
42 43 44 45 46
	};							\
	static struct ftrace_event_call event_##name

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

47

48 49 50
/*
 * Stage 2 of the trace events.
 *
51 52
 * Include the following:
 *
53 54 55
 * struct ftrace_data_offsets_<call> {
 *	int				<item1>;
 *	int				<item2>;
56 57 58
 *	[...]
 * };
 *
59 60
 * The __dynamic_array() macro will create each int <item>, this is
 * to keep the offset of each array from the beginning of the event.
61 62
 */

63 64 65
#undef __field
#define __field(type, item);

66 67 68
#undef __array
#define __array(type, item, len)

69 70
#undef __dynamic_array
#define __dynamic_array(type, item, len)	int item;
71 72

#undef __string
73
#define __string(item, src) __dynamic_array(char, item, -1)
74 75 76

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
77
	struct ftrace_data_offsets_##call {				\
78 79 80 81 82 83 84 85
		tstruct;						\
	};

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

/*
 * Stage 3 of the trace events.
 *
86 87 88 89 90 91 92 93
 * Override the macros in <trace/trace_events.h> to include the following:
 *
 * enum print_line_t
 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
 * {
 *	struct trace_seq *s = &iter->seq;
 *	struct ftrace_raw_<call> *field; <-- defined in stage 1
 *	struct trace_entry *entry;
94
 *	struct trace_seq *p;
95 96 97 98 99 100 101 102 103 104 105
 *	int ret;
 *
 *	entry = iter->ent;
 *
 *	if (entry->type != event_<call>.id) {
 *		WARN_ON_ONCE(1);
 *		return TRACE_TYPE_UNHANDLED;
 *	}
 *
 *	field = (typeof(field))entry;
 *
106
 *	p = get_cpu_var(ftrace_event_seq);
107
 *	ret = trace_seq_printf(s, <TP_printk> "\n");
108
 *	put_cpu();
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
 *	if (!ret)
 *		return TRACE_TYPE_PARTIAL_LINE;
 *
 *	return TRACE_TYPE_HANDLED;
 * }
 *
 * This is the method used to print the raw event to the trace
 * output format. Note, this is not needed if the data is read
 * in binary.
 */

#undef __entry
#define __entry field

#undef TP_printk
#define TP_printk(fmt, args...) fmt "\n", args

126 127 128 129
#undef __get_dynamic_array
#define __get_dynamic_array(field)	\
		((void *)__entry + __entry->__data_loc_##field)

130
#undef __get_str
131
#define __get_str(field) (char *)__get_dynamic_array(field)
132

133 134 135 136 137 138 139 140
#undef __print_flags
#define __print_flags(flag, delim, flag_array...)			\
	({								\
		static const struct trace_print_flags flags[] =		\
			{ flag_array, { -1, NULL }};			\
		ftrace_print_flags_seq(p, delim, flag, flags);		\
	})

141 142 143 144 145 146 147 148
#undef __print_symbolic
#define __print_symbolic(value, symbol_array...)			\
	({								\
		static const struct trace_print_flags symbols[] =	\
			{ symbol_array, { -1, NULL }};			\
		ftrace_print_symbols_seq(p, value, symbols);		\
	})

149 150 151 152 153 154 155 156
#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
enum print_line_t							\
ftrace_raw_output_##call(struct trace_iterator *iter, int flags)	\
{									\
	struct trace_seq *s = &iter->seq;				\
	struct ftrace_raw_##call *field;				\
	struct trace_entry *entry;					\
157
	struct trace_seq *p;						\
158 159 160 161 162 163 164 165 166 167 168
	int ret;							\
									\
	entry = iter->ent;						\
									\
	if (entry->type != event_##call.id) {				\
		WARN_ON_ONCE(1);					\
		return TRACE_TYPE_UNHANDLED;				\
	}								\
									\
	field = (typeof(field))entry;					\
									\
169
	p = &get_cpu_var(ftrace_event_seq);				\
170
	ret = trace_seq_printf(s, #call ": " print);			\
171
	put_cpu();							\
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
	if (!ret)							\
		return TRACE_TYPE_PARTIAL_LINE;				\
									\
	return TRACE_TYPE_HANDLED;					\
}
	
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

/*
 * Setup the showing format of trace point.
 *
 * int
 * ftrace_format_##call(struct trace_seq *s)
 * {
 *	struct ftrace_raw_##call field;
 *	int ret;
 *
 *	ret = trace_seq_printf(s, #type " " #item ";"
 *			       " offset:%u; size:%u;\n",
 *			       offsetof(struct ftrace_raw_##call, item),
 *			       sizeof(field.type));
 *
 * }
 */

#undef TP_STRUCT__entry
#define TP_STRUCT__entry(args...) args

#undef __field
#define __field(type, item)					\
	ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t"	\
			       "offset:%u;\tsize:%u;\n",		\
			       (unsigned int)offsetof(typeof(field), item), \
			       (unsigned int)sizeof(field.item));	\
	if (!ret)							\
		return 0;

#undef __array
#define __array(type, item, len)						\
	ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t"	\
			       "offset:%u;\tsize:%u;\n",		\
			       (unsigned int)offsetof(typeof(field), item), \
			       (unsigned int)sizeof(field.item));	\
	if (!ret)							\
		return 0;

218 219 220
#undef __dynamic_array
#define __dynamic_array(type, item, len)				       \
	ret = trace_seq_printf(s, "\tfield:__data_loc " #item ";\t"	       \
221
			       "offset:%u;\tsize:%u;\n",		       \
222
			       (unsigned int)offsetof(typeof(field),	       \
223 224
					__data_loc_##item),		       \
			       (unsigned int)sizeof(field.__data_loc_##item)); \
225 226 227
	if (!ret)							       \
		return 0;

228 229 230
#undef __string
#define __string(item, src) __dynamic_array(char, item, -1)

231 232 233 234 235 236 237 238 239 240 241 242 243 244
#undef __entry
#define __entry REC

#undef TP_printk
#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)

#undef TP_fast_assign
#define TP_fast_assign(args...) args

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, func, print)		\
static int								\
ftrace_format_##call(struct trace_seq *s)				\
{									\
245 246
	struct ftrace_raw_##call field __attribute__((unused));		\
	int ret = 0;							\
247 248 249 250 251 252 253 254 255 256 257 258 259 260
									\
	tstruct;							\
									\
	trace_seq_printf(s, "\nprint fmt: " print);			\
									\
	return ret;							\
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

#undef __field
#define __field(type, item)						\
	ret = trace_define_field(event_call, #type, #item,		\
				 offsetof(typeof(field), item),		\
261
				 sizeof(field.item), is_signed_type(type));	\
262 263 264 265 266 267 268 269
	if (ret)							\
		return ret;

#undef __array
#define __array(type, item, len)					\
	BUILD_BUG_ON(len > MAX_FILTER_STR_VAL);				\
	ret = trace_define_field(event_call, #type "[" #len "]", #item,	\
				 offsetof(typeof(field), item),		\
270
				 sizeof(field.item), 0);		\
271 272 273
	if (ret)							\
		return ret;

274 275 276 277 278 279
#undef __dynamic_array
#define __dynamic_array(type, item, len)				       \
	ret = trace_define_field(event_call, "__data_loc" "[" #type "]", #item,\
				offsetof(typeof(field), __data_loc_##item),    \
				 sizeof(field.__data_loc_##item), 0);

280
#undef __string
281
#define __string(item, src) __dynamic_array(char, item, -1)
282

283 284 285 286 287 288 289 290 291
#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, func, print)		\
int									\
ftrace_define_fields_##call(void)					\
{									\
	struct ftrace_raw_##call field;					\
	struct ftrace_event_call *event_call = &event_##call;		\
	int ret;							\
									\
292 293 294 295 296
	__common_field(int, type, 1);					\
	__common_field(unsigned char, flags, 0);			\
	__common_field(unsigned char, preempt_count, 0);		\
	__common_field(int, pid, 1);					\
	__common_field(int, tgid, 1);					\
297 298 299 300 301 302 303 304
									\
	tstruct;							\
									\
	return ret;							\
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
/*
 * remember the offset of each array from the beginning of the event.
 */

#undef __entry
#define __entry entry

#undef __field
#define __field(type, item)

#undef __array
#define __array(type, item, len)

#undef __dynamic_array
#define __dynamic_array(type, item, len)				\
	__data_offsets->item = __data_size +				\
			       offsetof(typeof(*entry), __data);	\
	__data_size += (len) * sizeof(type);

#undef __string
#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)       \

#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
static inline int ftrace_get_offsets_##call(				\
	struct ftrace_data_offsets_##call *__data_offsets, proto)       \
{									\
	int __data_size = 0;						\
	struct ftrace_raw_##call __maybe_unused *entry;			\
									\
	tstruct;							\
									\
	return __data_size;						\
}

#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

342
/*
343
 * Stage 4 of the trace events.
344
 *
345
 * Override the macros in <trace/trace_events.h> to include the following:
346 347 348
 *
 * static void ftrace_event_<call>(proto)
 * {
349
 *	event_trace_printk(_RET_IP_, "<call>: " <fmt>);
350 351 352 353
 * }
 *
 * static int ftrace_reg_event_<call>(void)
 * {
354
 *	int ret;
355
 *
356 357 358 359 360
 *	ret = register_trace_<call>(ftrace_event_<call>);
 *	if (!ret)
 *		pr_info("event trace: Could not activate trace point "
 *			"probe to  <call>");
 *	return ret;
361 362 363 364
 * }
 *
 * static void ftrace_unreg_event_<call>(void)
 * {
365
 *	unregister_trace_<call>(ftrace_event_<call>);
366 367 368
 * }
 *
 *
369
 * For those macros defined with TRACE_EVENT:
370 371 372 373 374
 *
 * static struct ftrace_event_call event_<call>;
 *
 * static void ftrace_raw_event_<call>(proto)
 * {
375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
 *	struct ring_buffer_event *event;
 *	struct ftrace_raw_<call> *entry; <-- defined in stage 1
 *	unsigned long irq_flags;
 *	int pc;
 *
 *	local_save_flags(irq_flags);
 *	pc = preempt_count();
 *
 *	event = trace_current_buffer_lock_reserve(event_<call>.id,
 *				  sizeof(struct ftrace_raw_<call>),
 *				  irq_flags, pc);
 *	if (!event)
 *		return;
 *	entry	= ring_buffer_event_data(event);
 *
 *	<assign>;  <-- Here we assign the entries by the __field and
391
 *			__array macros.
392
 *
393
 *	trace_current_buffer_unlock_commit(event, irq_flags, pc);
394 395 396 397
 * }
 *
 * static int ftrace_raw_reg_event_<call>(void)
 * {
398
 *	int ret;
399
 *
400 401 402 403 404
 *	ret = register_trace_<call>(ftrace_raw_event_<call>);
 *	if (!ret)
 *		pr_info("event trace: Could not activate trace point "
 *			"probe to <call>");
 *	return ret;
405 406 407 408
 * }
 *
 * static void ftrace_unreg_event_<call>(void)
 * {
409
 *	unregister_trace_<call>(ftrace_raw_event_<call>);
410 411 412
 * }
 *
 * static struct trace_event ftrace_event_type_<call> = {
413
 *	.trace			= ftrace_raw_output_<call>, <-- stage 2
414 415 416 417
 * };
 *
 * static int ftrace_raw_init_event_<call>(void)
 * {
418
 *	int id;
419
 *
420 421 422 423 424
 *	id = register_ftrace_event(&ftrace_event_type_<call>);
 *	if (!id)
 *		return -ENODEV;
 *	event_<call>.id = id;
 *	return 0;
425 426 427 428 429
 * }
 *
 * static struct ftrace_event_call __used
 * __attribute__((__aligned__(4)))
 * __attribute__((section("_ftrace_events"))) event_<call> = {
430
 *	.name			= "<call>",
431
 *	.system			= "<system>",
432 433 434
 *	.raw_init		= ftrace_raw_init_event_<call>,
 *	.regfunc		= ftrace_reg_event_<call>,
 *	.unregfunc		= ftrace_unreg_event_<call>,
435
 *	.show_format		= ftrace_format_<call>,
436 437 438 439
 * }
 *
 */

440 441
#undef TP_FMT
#define TP_FMT(fmt, args...)	fmt "\n", ##args
442

P
Peter Zijlstra 已提交
443 444 445 446 447 448 449 450
#ifdef CONFIG_EVENT_PROFILE
#define _TRACE_PROFILE(call, proto, args)				\
static void ftrace_profile_##call(proto)				\
{									\
	extern void perf_tpcounter_event(int);				\
	perf_tpcounter_event(event_##call.id);				\
}									\
									\
451
static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
P
Peter Zijlstra 已提交
452 453 454
{									\
	int ret = 0;							\
									\
455
	if (!atomic_inc_return(&event_call->profile_count))		\
P
Peter Zijlstra 已提交
456 457 458 459 460
		ret = register_trace_##call(ftrace_profile_##call);	\
									\
	return ret;							\
}									\
									\
461
static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
P
Peter Zijlstra 已提交
462
{									\
463
	if (atomic_add_negative(-1, &event_call->profile_count))	\
P
Peter Zijlstra 已提交
464 465 466 467 468 469 470 471 472 473 474 475 476
		unregister_trace_##call(ftrace_profile_##call);		\
}

#define _TRACE_PROFILE_INIT(call)					\
	.profile_count = ATOMIC_INIT(-1),				\
	.profile_enable = ftrace_profile_enable_##call,			\
	.profile_disable = ftrace_profile_disable_##call,

#else
#define _TRACE_PROFILE(call, proto, args)
#define _TRACE_PROFILE_INIT(call)
#endif

477 478
#undef __entry
#define __entry entry
479

480 481 482 483 484 485
#undef __field
#define __field(type, item)

#undef __array
#define __array(type, item, len)

486 487 488 489
#undef __dynamic_array
#define __dynamic_array(type, item, len)				\
	__entry->__data_loc_##item = __data_offsets.item;

490
#undef __string
491
#define __string(item, src) __dynamic_array(char, item, -1)       	\
492 493 494 495 496

#undef __assign_str
#define __assign_str(dst, src)						\
	strcpy(__get_str(dst), src);

497
#undef TRACE_EVENT
498
#define TRACE_EVENT(call, proto, args, tstruct, assign, print)		\
P
Peter Zijlstra 已提交
499
_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args))			\
500 501 502 503 504
									\
static struct ftrace_event_call event_##call;				\
									\
static void ftrace_raw_event_##call(proto)				\
{									\
505
	struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
506
	struct ftrace_event_call *event_call = &event_##call;		\
507 508 509
	struct ring_buffer_event *event;				\
	struct ftrace_raw_##call *entry;				\
	unsigned long irq_flags;					\
510
	int __data_size;						\
511 512 513 514 515
	int pc;								\
									\
	local_save_flags(irq_flags);					\
	pc = preempt_count();						\
									\
516
	__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
517
									\
518
	event = trace_current_buffer_lock_reserve(event_##call.id,	\
519
				 sizeof(*entry) + __data_size,		\
520
				 irq_flags, pc);			\
521 522 523 524
	if (!event)							\
		return;							\
	entry	= ring_buffer_event_data(event);			\
									\
525 526 527
									\
	tstruct								\
									\
528
	{ assign; }							\
529
									\
530
	if (!filter_current_check_discard(event_call, entry, event))	\
531
		trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \
532 533 534 535 536 537 538
}									\
									\
static int ftrace_raw_reg_event_##call(void)				\
{									\
	int ret;							\
									\
	ret = register_trace_##call(ftrace_raw_event_##call);		\
539
	if (ret)							\
540
		pr_info("event trace: Could not activate trace point "	\
541
			"probe to " #call "\n");			\
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
	return ret;							\
}									\
									\
static void ftrace_raw_unreg_event_##call(void)				\
{									\
	unregister_trace_##call(ftrace_raw_event_##call);		\
}									\
									\
static struct trace_event ftrace_event_type_##call = {			\
	.trace			= ftrace_raw_output_##call,		\
};									\
									\
static int ftrace_raw_init_event_##call(void)				\
{									\
	int id;								\
									\
	id = register_ftrace_event(&ftrace_event_type_##call);		\
	if (!id)							\
		return -ENODEV;						\
	event_##call.id = id;						\
562
	INIT_LIST_HEAD(&event_##call.fields);				\
563
	init_preds(&event_##call);					\
564 565 566 567 568 569
	return 0;							\
}									\
									\
static struct ftrace_event_call __used					\
__attribute__((__aligned__(4)))						\
__attribute__((section("_ftrace_events"))) event_##call = {		\
570
	.name			= #call,				\
571
	.system			= __stringify(TRACE_SYSTEM),		\
572
	.event			= &ftrace_event_type_##call,		\
573
	.raw_init		= ftrace_raw_init_event_##call,		\
574 575
	.regfunc		= ftrace_raw_reg_event_##call,		\
	.unregfunc		= ftrace_raw_unreg_event_##call,	\
576
	.show_format		= ftrace_format_##call,			\
577
	.define_fields		= ftrace_define_fields_##call,		\
P
Peter Zijlstra 已提交
578
	_TRACE_PROFILE_INIT(call)					\
579
}
P
Peter Zijlstra 已提交
580

581
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
P
Peter Zijlstra 已提交
582 583 584 585

#undef _TRACE_PROFILE
#undef _TRACE_PROFILE_INIT