sched.h 8.7 KB
Newer Older
1 2 3
#undef TRACE_SYSTEM
#define TRACE_SYSTEM sched

4
#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5 6 7 8 9
#define _TRACE_SCHED_H

#include <linux/sched.h>
#include <linux/tracepoint.h>

10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Tracepoint for calling kthread_stop, performed to end a kthread:
 */
TRACE_EVENT(sched_kthread_stop,

	TP_PROTO(struct task_struct *t),

	TP_ARGS(t),

	TP_STRUCT__entry(
		__array(	char,	comm,	TASK_COMM_LEN	)
		__field(	pid_t,	pid			)
	),

	TP_fast_assign(
		memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
		__entry->pid	= t->pid;
	),

29
	TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
);

/*
 * Tracepoint for the return value of the kthread stopping:
 */
TRACE_EVENT(sched_kthread_stop_ret,

	TP_PROTO(int ret),

	TP_ARGS(ret),

	TP_STRUCT__entry(
		__field(	int,	ret	)
	),

	TP_fast_assign(
		__entry->ret	= ret;
	),

49
	TP_printk("ret=%d", __entry->ret)
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
);

/*
 * Tracepoint for waiting on task to unschedule:
 *
 * (NOTE: the 'rq' argument is not used by generic trace events,
 *        but used by the latency tracer plugin. )
 */
TRACE_EVENT(sched_wait_task,

	TP_PROTO(struct rq *rq, struct task_struct *p),

	TP_ARGS(rq, p),

	TP_STRUCT__entry(
		__array(	char,	comm,	TASK_COMM_LEN	)
		__field(	pid_t,	pid			)
		__field(	int,	prio			)
	),

	TP_fast_assign(
		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
		__entry->pid	= p->pid;
		__entry->prio	= p->prio;
	),

76
	TP_printk("comm=%s pid=%d prio=%d",
77 78 79 80 81 82 83 84 85
		  __entry->comm, __entry->pid, __entry->prio)
);

/*
 * Tracepoint for waking up a task:
 *
 * (NOTE: the 'rq' argument is not used by generic trace events,
 *        but used by the latency tracer plugin. )
 */
86
DECLARE_EVENT_CLASS(sched_wakeup_template,
87 88 89 90 91 92 93 94 95 96

	TP_PROTO(struct rq *rq, struct task_struct *p, int success),

	TP_ARGS(rq, p, success),

	TP_STRUCT__entry(
		__array(	char,	comm,	TASK_COMM_LEN	)
		__field(	pid_t,	pid			)
		__field(	int,	prio			)
		__field(	int,	success			)
97
		__field(	int,	target_cpu		)
98 99 100 101 102 103 104
	),

	TP_fast_assign(
		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
		__entry->pid		= p->pid;
		__entry->prio		= p->prio;
		__entry->success	= success;
105
		__entry->target_cpu	= task_cpu(p);
106 107
	),

108
	TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
109
		  __entry->comm, __entry->pid, __entry->prio,
110
		  __entry->success, __entry->target_cpu)
111 112
);

113 114 115 116
DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
	     TP_PROTO(struct rq *rq, struct task_struct *p, int success),
	     TP_ARGS(rq, p, success));

117 118 119 120 121 122
/*
 * Tracepoint for waking up a new task:
 *
 * (NOTE: the 'rq' argument is not used by generic trace events,
 *        but used by the latency tracer plugin. )
 */
123 124 125
DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
	     TP_PROTO(struct rq *rq, struct task_struct *p, int success),
	     TP_ARGS(rq, p, success));
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143

/*
 * Tracepoint for task switches, performed by the scheduler:
 *
 * (NOTE: the 'rq' argument is not used by generic trace events,
 *        but used by the latency tracer plugin. )
 */
TRACE_EVENT(sched_switch,

	TP_PROTO(struct rq *rq, struct task_struct *prev,
		 struct task_struct *next),

	TP_ARGS(rq, prev, next),

	TP_STRUCT__entry(
		__array(	char,	prev_comm,	TASK_COMM_LEN	)
		__field(	pid_t,	prev_pid			)
		__field(	int,	prev_prio			)
144
		__field(	long,	prev_state			)
145 146 147 148 149 150 151 152 153
		__array(	char,	next_comm,	TASK_COMM_LEN	)
		__field(	pid_t,	next_pid			)
		__field(	int,	next_prio			)
	),

	TP_fast_assign(
		memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
		__entry->prev_pid	= prev->pid;
		__entry->prev_prio	= prev->prio;
154
		__entry->prev_state	= prev->state;
155 156 157 158 159
		memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
		__entry->next_pid	= next->pid;
		__entry->next_prio	= next->prio;
	),

160
	TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_pid=%d next_prio=%d",
161
		__entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
162 163 164 165 166
		__entry->prev_state ?
		  __print_flags(__entry->prev_state, "|",
				{ 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
				{ 16, "Z" }, { 32, "X" }, { 64, "x" },
				{ 128, "W" }) : "R",
167 168 169 170 171 172 173 174
		__entry->next_comm, __entry->next_pid, __entry->next_prio)
);

/*
 * Tracepoint for a task being migrated:
 */
TRACE_EVENT(sched_migrate_task,

175
	TP_PROTO(struct task_struct *p, int dest_cpu),
176

177
	TP_ARGS(p, dest_cpu),
178 179 180 181 182 183 184 185 186 187 188 189 190

	TP_STRUCT__entry(
		__array(	char,	comm,	TASK_COMM_LEN	)
		__field(	pid_t,	pid			)
		__field(	int,	prio			)
		__field(	int,	orig_cpu		)
		__field(	int,	dest_cpu		)
	),

	TP_fast_assign(
		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
		__entry->pid		= p->pid;
		__entry->prio		= p->prio;
191
		__entry->orig_cpu	= task_cpu(p);
192 193 194
		__entry->dest_cpu	= dest_cpu;
	),

195
	TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
196 197 198 199
		  __entry->comm, __entry->pid, __entry->prio,
		  __entry->orig_cpu, __entry->dest_cpu)
);

200
DECLARE_EVENT_CLASS(sched_process_template,
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217

	TP_PROTO(struct task_struct *p),

	TP_ARGS(p),

	TP_STRUCT__entry(
		__array(	char,	comm,	TASK_COMM_LEN	)
		__field(	pid_t,	pid			)
		__field(	int,	prio			)
	),

	TP_fast_assign(
		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
		__entry->pid		= p->pid;
		__entry->prio		= p->prio;
	),

218
	TP_printk("comm=%s pid=%d prio=%d",
219 220 221 222
		  __entry->comm, __entry->pid, __entry->prio)
);

/*
223
 * Tracepoint for freeing a task:
224
 */
225 226 227 228
DEFINE_EVENT(sched_process_template, sched_process_free,
	     TP_PROTO(struct task_struct *p),
	     TP_ARGS(p));
	     
229

230 231 232 233 234 235
/*
 * Tracepoint for a task exiting:
 */
DEFINE_EVENT(sched_process_template, sched_process_exit,
	     TP_PROTO(struct task_struct *p),
	     TP_ARGS(p));
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257

/*
 * Tracepoint for a waiting task:
 */
TRACE_EVENT(sched_process_wait,

	TP_PROTO(struct pid *pid),

	TP_ARGS(pid),

	TP_STRUCT__entry(
		__array(	char,	comm,	TASK_COMM_LEN	)
		__field(	pid_t,	pid			)
		__field(	int,	prio			)
	),

	TP_fast_assign(
		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
		__entry->pid		= pid_nr(pid);
		__entry->prio		= current->prio;
	),

258
	TP_printk("comm=%s pid=%d prio=%d",
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
		  __entry->comm, __entry->pid, __entry->prio)
);

/*
 * Tracepoint for do_fork:
 */
TRACE_EVENT(sched_process_fork,

	TP_PROTO(struct task_struct *parent, struct task_struct *child),

	TP_ARGS(parent, child),

	TP_STRUCT__entry(
		__array(	char,	parent_comm,	TASK_COMM_LEN	)
		__field(	pid_t,	parent_pid			)
		__array(	char,	child_comm,	TASK_COMM_LEN	)
		__field(	pid_t,	child_pid			)
	),

	TP_fast_assign(
		memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
		__entry->parent_pid	= parent->pid;
		memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
		__entry->child_pid	= child->pid;
	),

285
	TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
286 287 288 289
		__entry->parent_comm, __entry->parent_pid,
		__entry->child_comm, __entry->child_pid)
);

290 291 292 293
/*
 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
 *     adding sched_stat support to SCHED_FIFO/RR would be welcome.
 */
294
DECLARE_EVENT_CLASS(sched_stat_template,
295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314

	TP_PROTO(struct task_struct *tsk, u64 delay),

	TP_ARGS(tsk, delay),

	TP_STRUCT__entry(
		__array( char,	comm,	TASK_COMM_LEN	)
		__field( pid_t,	pid			)
		__field( u64,	delay			)
	),

	TP_fast_assign(
		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
		__entry->pid	= tsk->pid;
		__entry->delay	= delay;
	)
	TP_perf_assign(
		__perf_count(delay);
	),

315
	TP_printk("comm=%s pid=%d delay=%Lu [ns]",
316 317 318 319
			__entry->comm, __entry->pid,
			(unsigned long long)__entry->delay)
);

320 321 322 323 324 325 326 327 328 329 330 331 332

/*
 * Tracepoint for accounting wait time (time the task is runnable
 * but not actually running due to scheduler contention).
 */
DEFINE_EVENT(sched_stat_template, sched_stat_wait,
	     TP_PROTO(struct task_struct *tsk, u64 delay),
	     TP_ARGS(tsk, delay));

/*
 * Tracepoint for accounting sleep time (time the task is not runnable,
 * including iowait, see below).
 */
333 334 335
DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
	     TP_PROTO(struct task_struct *tsk, u64 delay),
	     TP_ARGS(tsk, delay));
336 337 338 339 340

/*
 * Tracepoint for accounting iowait time (time the task is not runnable
 * due to waiting on IO to complete).
 */
341 342 343
DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
	     TP_PROTO(struct task_struct *tsk, u64 delay),
	     TP_ARGS(tsk, delay));
344

345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
/*
 * Tracepoint for accounting runtime (time the task is executing
 * on a CPU).
 */
TRACE_EVENT(sched_stat_runtime,

	TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),

	TP_ARGS(tsk, runtime, vruntime),

	TP_STRUCT__entry(
		__array( char,	comm,	TASK_COMM_LEN	)
		__field( pid_t,	pid			)
		__field( u64,	runtime			)
		__field( u64,	vruntime			)
	),

	TP_fast_assign(
		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
		__entry->pid		= tsk->pid;
		__entry->runtime	= runtime;
		__entry->vruntime	= vruntime;
	)
	TP_perf_assign(
		__perf_count(runtime);
	),

372
	TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
373 374 375 376 377
			__entry->comm, __entry->pid,
			(unsigned long long)__entry->runtime,
			(unsigned long long)__entry->vruntime)
);

378
#endif /* _TRACE_SCHED_H */
379 380 381

/* This part must be outside protection */
#include <trace/define_trace.h>