builtin-sched.c 44.2 KB
Newer Older
I
Ingo Molnar 已提交
1
#include "builtin.h"
2
#include "perf.h"
I
Ingo Molnar 已提交
3 4

#include "util/util.h"
5
#include "util/evlist.h"
I
Ingo Molnar 已提交
6
#include "util/cache.h"
7
#include "util/evsel.h"
I
Ingo Molnar 已提交
8 9 10
#include "util/symbol.h"
#include "util/thread.h"
#include "util/header.h"
11
#include "util/session.h"
12
#include "util/tool.h"
I
Ingo Molnar 已提交
13 14

#include "util/parse-options.h"
15
#include "util/trace-event.h"
I
Ingo Molnar 已提交
16 17 18

#include "util/debug.h"

19
#include <sys/prctl.h>
20
#include <sys/resource.h>
I
Ingo Molnar 已提交
21

22 23 24
#include <semaphore.h>
#include <pthread.h>
#include <math.h>
25

26
static const char		*input_name;
I
Ingo Molnar 已提交
27

28
static char			default_sort_order[] = "avg, max, switch, runtime";
29
static const char		*sort_order = default_sort_order;
30

31 32
static int			profile_cpu = -1;

33 34
#define PR_SET_NAME		15               /* Set process name */
#define MAX_CPUS		4096
I
Ingo Molnar 已提交
35

36 37
static u64			run_measurement_overhead;
static u64			sleep_measurement_overhead;
I
Ingo Molnar 已提交
38

39 40
#define COMM_LEN		20
#define SYM_LEN			129
I
Ingo Molnar 已提交
41

42
#define MAX_PID			65536
I
Ingo Molnar 已提交
43

44
static unsigned long		nr_tasks;
I
Ingo Molnar 已提交
45

46
struct sched_atom;
I
Ingo Molnar 已提交
47

48 49 50 51
struct task_desc {
	unsigned long		nr;
	unsigned long		pid;
	char			comm[COMM_LEN];
I
Ingo Molnar 已提交
52

53 54
	unsigned long		nr_events;
	unsigned long		curr_event;
55
	struct sched_atom	**atoms;
56 57 58

	pthread_t		thread;
	sem_t			sleep_sem;
I
Ingo Molnar 已提交
59

60 61 62 63 64 65 66 67 68 69
	sem_t			ready_for_work;
	sem_t			work_done_sem;

	u64			cpu_usage;
};

enum sched_event_type {
	SCHED_EVENT_RUN,
	SCHED_EVENT_SLEEP,
	SCHED_EVENT_WAKEUP,
70
	SCHED_EVENT_MIGRATION,
71 72
};

73
struct sched_atom {
74
	enum sched_event_type	type;
75
	int			specific_wait;
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
	u64			timestamp;
	u64			duration;
	unsigned long		nr;
	sem_t			*wait_sem;
	struct task_desc	*wakee;
};

static struct task_desc		*pid_to_task[MAX_PID];

static struct task_desc		**tasks;

static pthread_mutex_t		start_work_mutex = PTHREAD_MUTEX_INITIALIZER;
static u64			start_time;

static pthread_mutex_t		work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER;
I
Ingo Molnar 已提交
91

92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
static unsigned long		nr_run_events;
static unsigned long		nr_sleep_events;
static unsigned long		nr_wakeup_events;

static unsigned long		nr_sleep_corrections;
static unsigned long		nr_run_events_optimized;

static unsigned long		targetless_wakeups;
static unsigned long		multitarget_wakeups;

static u64			cpu_usage;
static u64			runavg_cpu_usage;
static u64			parent_cpu_usage;
static u64			runavg_parent_cpu_usage;

static unsigned long		nr_runs;
static u64			sum_runtime;
static u64			sum_fluct;
static u64			run_avg;

112
static unsigned int		replay_repeat = 10;
113
static unsigned long		nr_timestamps;
114 115
static unsigned long		nr_unordered_timestamps;
static unsigned long		nr_state_machine_bugs;
116
static unsigned long		nr_context_switch_bugs;
117 118 119
static unsigned long		nr_events;
static unsigned long		nr_lost_chunks;
static unsigned long		nr_lost_events;
120 121 122 123 124 125 126 127 128 129 130 131 132

#define TASK_STATE_TO_CHAR_STR "RSDTtZX"

enum thread_state {
	THREAD_SLEEPING = 0,
	THREAD_WAIT_CPU,
	THREAD_SCHED_IN,
	THREAD_IGNORE
};

struct work_atom {
	struct list_head	list;
	enum thread_state	state;
133
	u64			sched_out_time;
134 135 136 137 138
	u64			wake_up_time;
	u64			sched_in_time;
	u64			runtime;
};

139 140
struct work_atoms {
	struct list_head	work_list;
141 142 143
	struct thread		*thread;
	struct rb_node		node;
	u64			max_lat;
144
	u64			max_lat_at;
145 146 147 148 149
	u64			total_lat;
	u64			nb_atoms;
	u64			total_runtime;
};

150
typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
151 152 153 154 155 156 157 158

static struct rb_root		atom_root, sorted_atom_root;

static u64			all_runtime;
static u64			all_count;


static u64 get_nsecs(void)
I
Ingo Molnar 已提交
159 160 161 162 163 164 165 166
{
	struct timespec ts;

	clock_gettime(CLOCK_MONOTONIC, &ts);

	return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
}

167
static void burn_nsecs(u64 nsecs)
I
Ingo Molnar 已提交
168
{
169
	u64 T0 = get_nsecs(), T1;
I
Ingo Molnar 已提交
170 171 172 173 174 175

	do {
		T1 = get_nsecs();
	} while (T1 + run_measurement_overhead < T0 + nsecs);
}

176
static void sleep_nsecs(u64 nsecs)
I
Ingo Molnar 已提交
177 178 179 180 181 182 183 184 185 186 187
{
	struct timespec ts;

	ts.tv_nsec = nsecs % 999999999;
	ts.tv_sec = nsecs / 999999999;

	nanosleep(&ts, NULL);
}

static void calibrate_run_measurement_overhead(void)
{
188
	u64 T0, T1, delta, min_delta = 1000000000ULL;
I
Ingo Molnar 已提交
189 190 191 192 193 194 195 196 197 198 199
	int i;

	for (i = 0; i < 10; i++) {
		T0 = get_nsecs();
		burn_nsecs(0);
		T1 = get_nsecs();
		delta = T1-T0;
		min_delta = min(min_delta, delta);
	}
	run_measurement_overhead = min_delta;

200
	printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
I
Ingo Molnar 已提交
201 202 203 204
}

static void calibrate_sleep_measurement_overhead(void)
{
205
	u64 T0, T1, delta, min_delta = 1000000000ULL;
I
Ingo Molnar 已提交
206 207 208 209 210 211 212 213 214 215 216 217
	int i;

	for (i = 0; i < 10; i++) {
		T0 = get_nsecs();
		sleep_nsecs(10000);
		T1 = get_nsecs();
		delta = T1-T0;
		min_delta = min(min_delta, delta);
	}
	min_delta -= 10000;
	sleep_measurement_overhead = min_delta;

218
	printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
I
Ingo Molnar 已提交
219 220
}

221
static struct sched_atom *
222
get_new_event(struct task_desc *task, u64 timestamp)
I
Ingo Molnar 已提交
223
{
224
	struct sched_atom *event = zalloc(sizeof(*event));
I
Ingo Molnar 已提交
225 226 227 228 229 230 231
	unsigned long idx = task->nr_events;
	size_t size;

	event->timestamp = timestamp;
	event->nr = idx;

	task->nr_events++;
232 233 234
	size = sizeof(struct sched_atom *) * task->nr_events;
	task->atoms = realloc(task->atoms, size);
	BUG_ON(!task->atoms);
I
Ingo Molnar 已提交
235

236
	task->atoms[idx] = event;
I
Ingo Molnar 已提交
237 238 239 240

	return event;
}

241
static struct sched_atom *last_event(struct task_desc *task)
I
Ingo Molnar 已提交
242 243 244 245
{
	if (!task->nr_events)
		return NULL;

246
	return task->atoms[task->nr_events - 1];
I
Ingo Molnar 已提交
247 248 249
}

static void
250
add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration)
I
Ingo Molnar 已提交
251
{
252
	struct sched_atom *event, *curr_event = last_event(task);
I
Ingo Molnar 已提交
253 254

	/*
255 256 257
	 * optimize an existing RUN event by merging this one
	 * to it:
	 */
I
Ingo Molnar 已提交
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
	if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
		nr_run_events_optimized++;
		curr_event->duration += duration;
		return;
	}

	event = get_new_event(task, timestamp);

	event->type = SCHED_EVENT_RUN;
	event->duration = duration;

	nr_run_events++;
}

static void
273
add_sched_event_wakeup(struct task_desc *task, u64 timestamp,
I
Ingo Molnar 已提交
274 275
		       struct task_desc *wakee)
{
276
	struct sched_atom *event, *wakee_event;
I
Ingo Molnar 已提交
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291

	event = get_new_event(task, timestamp);
	event->type = SCHED_EVENT_WAKEUP;
	event->wakee = wakee;

	wakee_event = last_event(wakee);
	if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
		targetless_wakeups++;
		return;
	}
	if (wakee_event->wait_sem) {
		multitarget_wakeups++;
		return;
	}

292
	wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
I
Ingo Molnar 已提交
293 294 295 296 297 298 299 300
	sem_init(wakee_event->wait_sem, 0, 0);
	wakee_event->specific_wait = 1;
	event->wait_sem = wakee_event->wait_sem;

	nr_wakeup_events++;
}

static void
301
add_sched_event_sleep(struct task_desc *task, u64 timestamp,
302
		      u64 task_state __maybe_unused)
I
Ingo Molnar 已提交
303
{
304
	struct sched_atom *event = get_new_event(task, timestamp);
I
Ingo Molnar 已提交
305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321

	event->type = SCHED_EVENT_SLEEP;

	nr_sleep_events++;
}

static struct task_desc *register_pid(unsigned long pid, const char *comm)
{
	struct task_desc *task;

	BUG_ON(pid >= MAX_PID);

	task = pid_to_task[pid];

	if (task)
		return task;

322
	task = zalloc(sizeof(*task));
I
Ingo Molnar 已提交
323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
	task->pid = pid;
	task->nr = nr_tasks;
	strcpy(task->comm, comm);
	/*
	 * every task starts in sleeping state - this gets ignored
	 * if there's no wakeup pointing to this sleep state:
	 */
	add_sched_event_sleep(task, 0, 0);

	pid_to_task[pid] = task;
	nr_tasks++;
	tasks = realloc(tasks, nr_tasks*sizeof(struct task_task *));
	BUG_ON(!tasks);
	tasks[task->nr] = task;

I
Ingo Molnar 已提交
338 339
	if (verbose)
		printf("registered task #%ld, PID %ld (%s)\n", nr_tasks, pid, comm);
I
Ingo Molnar 已提交
340 341 342 343 344 345 346 347 348 349 350 351

	return task;
}


static void print_task_traces(void)
{
	struct task_desc *task;
	unsigned long i;

	for (i = 0; i < nr_tasks; i++) {
		task = tasks[i];
I
Ingo Molnar 已提交
352
		printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
I
Ingo Molnar 已提交
353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
			task->nr, task->comm, task->pid, task->nr_events);
	}
}

static void add_cross_task_wakeups(void)
{
	struct task_desc *task1, *task2;
	unsigned long i, j;

	for (i = 0; i < nr_tasks; i++) {
		task1 = tasks[i];
		j = i + 1;
		if (j == nr_tasks)
			j = 0;
		task2 = tasks[j];
		add_sched_event_wakeup(task1, 0, task2);
	}
}

372 373
static void process_sched_event(struct task_desc *this_task __maybe_unused,
				struct sched_atom *atom)
I
Ingo Molnar 已提交
374 375 376
{
	int ret = 0;

377
	switch (atom->type) {
I
Ingo Molnar 已提交
378
		case SCHED_EVENT_RUN:
379
			burn_nsecs(atom->duration);
I
Ingo Molnar 已提交
380 381
			break;
		case SCHED_EVENT_SLEEP:
382 383
			if (atom->wait_sem)
				ret = sem_wait(atom->wait_sem);
I
Ingo Molnar 已提交
384 385 386
			BUG_ON(ret);
			break;
		case SCHED_EVENT_WAKEUP:
387 388
			if (atom->wait_sem)
				ret = sem_post(atom->wait_sem);
I
Ingo Molnar 已提交
389 390
			BUG_ON(ret);
			break;
391 392
		case SCHED_EVENT_MIGRATION:
			break;
I
Ingo Molnar 已提交
393 394 395 396 397
		default:
			BUG_ON(1);
	}
}

398
static u64 get_cpu_usage_nsec_parent(void)
I
Ingo Molnar 已提交
399 400
{
	struct rusage ru;
401
	u64 sum;
I
Ingo Molnar 已提交
402 403 404 405 406 407 408 409 410 411 412
	int err;

	err = getrusage(RUSAGE_SELF, &ru);
	BUG_ON(err);

	sum =  ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3;
	sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3;

	return sum;
}

413
static int self_open_counters(void)
I
Ingo Molnar 已提交
414
{
415 416
	struct perf_event_attr attr;
	int fd;
I
Ingo Molnar 已提交
417

418
	memset(&attr, 0, sizeof(attr));
I
Ingo Molnar 已提交
419

420 421
	attr.type = PERF_TYPE_SOFTWARE;
	attr.config = PERF_COUNT_SW_TASK_CLOCK;
I
Ingo Molnar 已提交
422

423 424 425
	fd = sys_perf_event_open(&attr, 0, -1, -1, 0);

	if (fd < 0)
426 427
		pr_debug("Error: sys_perf_event_open() syscall returned"
			 "with %d (%s)\n", fd, strerror(errno));
428 429 430 431 432 433 434 435 436 437 438 439
	return fd;
}

static u64 get_cpu_usage_nsec_self(int fd)
{
	u64 runtime;
	int ret;

	ret = read(fd, &runtime, sizeof(runtime));
	BUG_ON(ret != sizeof(runtime));

	return runtime;
I
Ingo Molnar 已提交
440 441 442 443 444
}

static void *thread_func(void *ctx)
{
	struct task_desc *this_task = ctx;
445
	u64 cpu_usage_0, cpu_usage_1;
I
Ingo Molnar 已提交
446 447
	unsigned long i, ret;
	char comm2[22];
448
	int fd;
I
Ingo Molnar 已提交
449 450 451

	sprintf(comm2, ":%s", this_task->comm);
	prctl(PR_SET_NAME, comm2);
452
	fd = self_open_counters();
453 454
	if (fd < 0)
		return NULL;
I
Ingo Molnar 已提交
455 456 457 458 459 460 461 462
again:
	ret = sem_post(&this_task->ready_for_work);
	BUG_ON(ret);
	ret = pthread_mutex_lock(&start_work_mutex);
	BUG_ON(ret);
	ret = pthread_mutex_unlock(&start_work_mutex);
	BUG_ON(ret);

463
	cpu_usage_0 = get_cpu_usage_nsec_self(fd);
I
Ingo Molnar 已提交
464 465 466

	for (i = 0; i < this_task->nr_events; i++) {
		this_task->curr_event = i;
467
		process_sched_event(this_task, this_task->atoms[i]);
I
Ingo Molnar 已提交
468 469
	}

470
	cpu_usage_1 = get_cpu_usage_nsec_self(fd);
I
Ingo Molnar 已提交
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
	this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
	ret = sem_post(&this_task->work_done_sem);
	BUG_ON(ret);

	ret = pthread_mutex_lock(&work_done_wait_mutex);
	BUG_ON(ret);
	ret = pthread_mutex_unlock(&work_done_wait_mutex);
	BUG_ON(ret);

	goto again;
}

static void create_tasks(void)
{
	struct task_desc *task;
	pthread_attr_t attr;
	unsigned long i;
	int err;

	err = pthread_attr_init(&attr);
	BUG_ON(err);
492 493
	err = pthread_attr_setstacksize(&attr,
			(size_t) max(16 * 1024, PTHREAD_STACK_MIN));
I
Ingo Molnar 已提交
494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511
	BUG_ON(err);
	err = pthread_mutex_lock(&start_work_mutex);
	BUG_ON(err);
	err = pthread_mutex_lock(&work_done_wait_mutex);
	BUG_ON(err);
	for (i = 0; i < nr_tasks; i++) {
		task = tasks[i];
		sem_init(&task->sleep_sem, 0, 0);
		sem_init(&task->ready_for_work, 0, 0);
		sem_init(&task->work_done_sem, 0, 0);
		task->curr_event = 0;
		err = pthread_create(&task->thread, &attr, thread_func, task);
		BUG_ON(err);
	}
}

static void wait_for_tasks(void)
{
512
	u64 cpu_usage_0, cpu_usage_1;
I
Ingo Molnar 已提交
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
	struct task_desc *task;
	unsigned long i, ret;

	start_time = get_nsecs();
	cpu_usage = 0;
	pthread_mutex_unlock(&work_done_wait_mutex);

	for (i = 0; i < nr_tasks; i++) {
		task = tasks[i];
		ret = sem_wait(&task->ready_for_work);
		BUG_ON(ret);
		sem_init(&task->ready_for_work, 0, 0);
	}
	ret = pthread_mutex_lock(&work_done_wait_mutex);
	BUG_ON(ret);

	cpu_usage_0 = get_cpu_usage_nsec_parent();

	pthread_mutex_unlock(&start_work_mutex);

	for (i = 0; i < nr_tasks; i++) {
		task = tasks[i];
		ret = sem_wait(&task->work_done_sem);
		BUG_ON(ret);
		sem_init(&task->work_done_sem, 0, 0);
		cpu_usage += task->cpu_usage;
		task->cpu_usage = 0;
	}

	cpu_usage_1 = get_cpu_usage_nsec_parent();
	if (!runavg_cpu_usage)
		runavg_cpu_usage = cpu_usage;
	runavg_cpu_usage = (runavg_cpu_usage*9 + cpu_usage)/10;

	parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
	if (!runavg_parent_cpu_usage)
		runavg_parent_cpu_usage = parent_cpu_usage;
	runavg_parent_cpu_usage = (runavg_parent_cpu_usage*9 +
				   parent_cpu_usage)/10;

	ret = pthread_mutex_lock(&start_work_mutex);
	BUG_ON(ret);

	for (i = 0; i < nr_tasks; i++) {
		task = tasks[i];
		sem_init(&task->sleep_sem, 0, 0);
		task->curr_event = 0;
	}
}

static void run_one_test(void)
{
K
Kyle McMartin 已提交
565
	u64 T0, T1, delta, avg_delta, fluct;
I
Ingo Molnar 已提交
566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584

	T0 = get_nsecs();
	wait_for_tasks();
	T1 = get_nsecs();

	delta = T1 - T0;
	sum_runtime += delta;
	nr_runs++;

	avg_delta = sum_runtime / nr_runs;
	if (delta < avg_delta)
		fluct = avg_delta - delta;
	else
		fluct = delta - avg_delta;
	sum_fluct += fluct;
	if (!run_avg)
		run_avg = delta;
	run_avg = (run_avg*9 + delta)/10;

I
Ingo Molnar 已提交
585
	printf("#%-3ld: %0.3f, ",
I
Ingo Molnar 已提交
586 587
		nr_runs, (double)delta/1000000.0);

I
Ingo Molnar 已提交
588
	printf("ravg: %0.2f, ",
I
Ingo Molnar 已提交
589 590
		(double)run_avg/1e6);

I
Ingo Molnar 已提交
591
	printf("cpu: %0.2f / %0.2f",
I
Ingo Molnar 已提交
592 593 594 595
		(double)cpu_usage/1e6, (double)runavg_cpu_usage/1e6);

#if 0
	/*
596 597 598
	 * rusage statistics done by the parent, these are less
	 * accurate than the sum_exec_runtime based statistics:
	 */
I
Ingo Molnar 已提交
599
	printf(" [%0.2f / %0.2f]",
I
Ingo Molnar 已提交
600 601 602 603
		(double)parent_cpu_usage/1e6,
		(double)runavg_parent_cpu_usage/1e6);
#endif

I
Ingo Molnar 已提交
604
	printf("\n");
I
Ingo Molnar 已提交
605 606

	if (nr_sleep_corrections)
I
Ingo Molnar 已提交
607
		printf(" (%ld sleep corrections)\n", nr_sleep_corrections);
I
Ingo Molnar 已提交
608 609 610 611 612
	nr_sleep_corrections = 0;
}

static void test_calibrations(void)
{
613
	u64 T0, T1;
I
Ingo Molnar 已提交
614 615 616 617 618

	T0 = get_nsecs();
	burn_nsecs(1e6);
	T1 = get_nsecs();

619
	printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
I
Ingo Molnar 已提交
620 621 622 623 624

	T0 = get_nsecs();
	sleep_nsecs(1e6);
	T1 = get_nsecs();

625
	printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
I
Ingo Molnar 已提交
626 627
}

628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
#define FILL_FIELD(ptr, field, event, data)	\
	ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)

#define FILL_ARRAY(ptr, array, event, data)			\
do {								\
	void *__array = raw_field_ptr(event, #array, data);	\
	memcpy(ptr.array, __array, sizeof(ptr.array));	\
} while(0)

#define FILL_COMMON_FIELDS(ptr, event, data)			\
do {								\
	FILL_FIELD(ptr, common_type, event, data);		\
	FILL_FIELD(ptr, common_flags, event, data);		\
	FILL_FIELD(ptr, common_preempt_count, event, data);	\
	FILL_FIELD(ptr, common_pid, event, data);		\
	FILL_FIELD(ptr, common_tgid, event, data);		\
} while (0)

646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665


struct trace_switch_event {
	u32 size;

	u16 common_type;
	u8 common_flags;
	u8 common_preempt_count;
	u32 common_pid;
	u32 common_tgid;

	char prev_comm[16];
	u32 prev_pid;
	u32 prev_prio;
	u64 prev_state;
	char next_comm[16];
	u32 next_pid;
	u32 next_prio;
};

666 667 668 669 670 671 672 673 674 675 676 677 678 679
struct trace_runtime_event {
	u32 size;

	u16 common_type;
	u8 common_flags;
	u8 common_preempt_count;
	u32 common_pid;
	u32 common_tgid;

	char comm[16];
	u32 pid;
	u64 runtime;
	u64 vruntime;
};
680

681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697
struct trace_wakeup_event {
	u32 size;

	u16 common_type;
	u8 common_flags;
	u8 common_preempt_count;
	u32 common_pid;
	u32 common_tgid;

	char comm[16];
	u32 pid;

	u32 prio;
	u32 success;
	u32 cpu;
};

698 699
struct trace_fork_event {
	u32 size;
700

701 702 703 704 705 706 707 708 709 710 711 712
	u16 common_type;
	u8 common_flags;
	u8 common_preempt_count;
	u32 common_pid;
	u32 common_tgid;

	char parent_comm[16];
	u32 parent_pid;
	char child_comm[16];
	u32 child_pid;
};

713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728
struct trace_migrate_task_event {
	u32 size;

	u16 common_type;
	u8 common_flags;
	u8 common_preempt_count;
	u32 common_pid;
	u32 common_tgid;

	char comm[16];
	u32 pid;

	u32 prio;
	u32 cpu;
};

729
struct trace_sched_handler {
730 731 732 733
	int (*switch_event)(struct trace_switch_event *event,
			    struct machine *machine,
			    struct event_format *tp_format,
			    struct perf_sample *sample);
734

735 736
	int (*runtime_event)(struct trace_runtime_event *event,
			     struct machine *machine,
737
			     struct perf_sample *sample);
738

739 740 741 742
	int (*wakeup_event)(struct trace_wakeup_event *event,
			    struct machine *machine,
			    struct event_format *tp_format,
			    struct perf_sample *sample);
743

744 745 746 747 748 749
	int (*fork_event)(struct trace_fork_event *event,
			  struct event_format *tp_format);

	int (*migrate_task_event)(struct trace_migrate_task_event *event,
				  struct machine *machine,
				  struct perf_sample *sample);
750
};
751 752


753
static int
754
replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
755
		    struct machine *machine __maybe_unused,
756
		    struct event_format *event, struct perf_sample *sample)
757 758
{
	struct task_desc *waker, *wakee;
759

I
Ingo Molnar 已提交
760 761
	if (verbose) {
		printf("sched_wakeup event %p\n", event);
762

I
Ingo Molnar 已提交
763
		printf(" ... pid %d woke up %s/%d\n",
764 765 766
			wakeup_event->common_pid,
			wakeup_event->comm,
			wakeup_event->pid);
I
Ingo Molnar 已提交
767
	}
768

769 770
	waker = register_pid(wakeup_event->common_pid, "<unknown>");
	wakee = register_pid(wakeup_event->pid, wakeup_event->comm);
771

772
	add_sched_event_wakeup(waker, sample->time, wakee);
773
	return 0;
I
Ingo Molnar 已提交
774 775
}

776
static u64 cpu_last_switched[MAX_CPUS];
777

778
static int
779
replay_switch_event(struct trace_switch_event *switch_event,
780
		    struct machine *machine __maybe_unused,
781
		    struct event_format *event,
782
		    struct perf_sample *sample)
I
Ingo Molnar 已提交
783
{
784
	struct task_desc *prev, __maybe_unused *next;
785 786
	u64 timestamp0, timestamp = sample->time;
	int cpu = sample->cpu;
787 788
	s64 delta;

I
Ingo Molnar 已提交
789 790 791
	if (verbose)
		printf("sched_switch event %p\n", event);

792
	if (cpu >= MAX_CPUS || cpu < 0)
793
		return 0;
794 795 796 797 798 799 800

	timestamp0 = cpu_last_switched[cpu];
	if (timestamp0)
		delta = timestamp - timestamp0;
	else
		delta = 0;

801 802 803 804
	if (delta < 0) {
		pr_debug("hm, delta: %" PRIu64 " < 0 ?\n", delta);
		return -1;
	}
805

I
Ingo Molnar 已提交
806
	if (verbose) {
807
		printf(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
808 809
			switch_event->prev_comm, switch_event->prev_pid,
			switch_event->next_comm, switch_event->next_pid,
I
Ingo Molnar 已提交
810 811
			delta);
	}
812

813 814
	prev = register_pid(switch_event->prev_pid, switch_event->prev_comm);
	next = register_pid(switch_event->next_pid, switch_event->next_comm);
815 816 817 818

	cpu_last_switched[cpu] = timestamp;

	add_sched_event_run(prev, timestamp, delta);
819
	add_sched_event_sleep(prev, timestamp, switch_event->prev_state);
820 821

	return 0;
822 823 824
}


825
static int
826
replay_fork_event(struct trace_fork_event *fork_event,
827
		  struct event_format *event)
828 829 830 831 832 833 834 835
{
	if (verbose) {
		printf("sched_fork event %p\n", event);
		printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid);
		printf("...  child: %s/%d\n", fork_event->child_comm, fork_event->child_pid);
	}
	register_pid(fork_event->parent_pid, fork_event->parent_comm);
	register_pid(fork_event->child_pid, fork_event->child_comm);
836
	return 0;
837
}
838

839
static struct trace_sched_handler replay_ops  = {
I
Ingo Molnar 已提交
840 841 842
	.wakeup_event		= replay_wakeup_event,
	.switch_event		= replay_switch_event,
	.fork_event		= replay_fork_event,
843 844
};

845 846
struct sort_dimension {
	const char		*name;
847
	sort_fn_t		cmp;
848 849 850 851 852
	struct list_head	list;
};

static LIST_HEAD(cmp_pid);

853
static int
854
thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
855 856 857 858
{
	struct sort_dimension *sort;
	int ret = 0;

859 860
	BUG_ON(list_empty(list));

861 862 863 864 865 866 867 868 869
	list_for_each_entry(sort, list, list) {
		ret = sort->cmp(l, r);
		if (ret)
			return ret;
	}

	return ret;
}

870
static struct work_atoms *
871 872 873 874
thread_atoms_search(struct rb_root *root, struct thread *thread,
			 struct list_head *sort_list)
{
	struct rb_node *node = root->rb_node;
875
	struct work_atoms key = { .thread = thread };
876 877

	while (node) {
878
		struct work_atoms *atoms;
879 880
		int cmp;

881
		atoms = container_of(node, struct work_atoms, node);
882 883 884 885 886 887 888 889 890 891 892 893 894 895

		cmp = thread_lat_cmp(sort_list, &key, atoms);
		if (cmp > 0)
			node = node->rb_left;
		else if (cmp < 0)
			node = node->rb_right;
		else {
			BUG_ON(thread != atoms->thread);
			return atoms;
		}
	}
	return NULL;
}

896
static void
897
__thread_latency_insert(struct rb_root *root, struct work_atoms *data,
898
			 struct list_head *sort_list)
899 900 901 902
{
	struct rb_node **new = &(root->rb_node), *parent = NULL;

	while (*new) {
903
		struct work_atoms *this;
904
		int cmp;
905

906
		this = container_of(*new, struct work_atoms, node);
907
		parent = *new;
908 909 910 911

		cmp = thread_lat_cmp(sort_list, data, this);

		if (cmp > 0)
912 913
			new = &((*new)->rb_left);
		else
914
			new = &((*new)->rb_right);
915 916 917 918 919 920
	}

	rb_link_node(&data->node, parent, new);
	rb_insert_color(&data->node, root);
}

921
static int thread_atoms_insert(struct thread *thread)
922
{
923
	struct work_atoms *atoms = zalloc(sizeof(*atoms));
924 925 926 927
	if (!atoms) {
		pr_err("No memory at %s\n", __func__);
		return -1;
	}
928

929
	atoms->thread = thread;
930
	INIT_LIST_HEAD(&atoms->work_list);
931
	__thread_latency_insert(&atom_root, atoms, &cmp_pid);
932
	return 0;
933 934
}

935 936
static int latency_fork_event(struct trace_fork_event *fork_event __maybe_unused,
			      struct event_format *event __maybe_unused)
937 938
{
	/* should insert the newcomer */
939
	return 0;
940 941 942 943 944 945 946 947 948
}

static char sched_out_state(struct trace_switch_event *switch_event)
{
	const char *str = TASK_STATE_TO_CHAR_STR;

	return str[switch_event->prev_state];
}

949
static int
950 951 952
add_sched_out_event(struct work_atoms *atoms,
		    char run_state,
		    u64 timestamp)
953
{
954
	struct work_atom *atom = zalloc(sizeof(*atom));
955 956 957 958
	if (!atom) {
		pr_err("Non memory at %s", __func__);
		return -1;
	}
959

960 961
	atom->sched_out_time = timestamp;

962
	if (run_state == 'R') {
963
		atom->state = THREAD_WAIT_CPU;
964
		atom->wake_up_time = atom->sched_out_time;
965 966
	}

967
	list_add_tail(&atom->list, &atoms->work_list);
968
	return 0;
969 970 971
}

static void
972 973
add_runtime_event(struct work_atoms *atoms, u64 delta,
		  u64 timestamp __maybe_unused)
974 975 976 977 978 979 980 981 982 983 984 985 986
{
	struct work_atom *atom;

	BUG_ON(list_empty(&atoms->work_list));

	atom = list_entry(atoms->work_list.prev, struct work_atom, list);

	atom->runtime += delta;
	atoms->total_runtime += delta;
}

static void
add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
987
{
988
	struct work_atom *atom;
989
	u64 delta;
990

991
	if (list_empty(&atoms->work_list))
992 993
		return;

994
	atom = list_entry(atoms->work_list.prev, struct work_atom, list);
995

996
	if (atom->state != THREAD_WAIT_CPU)
997 998
		return;

999 1000
	if (timestamp < atom->wake_up_time) {
		atom->state = THREAD_IGNORE;
1001 1002 1003
		return;
	}

1004 1005
	atom->state = THREAD_SCHED_IN;
	atom->sched_in_time = timestamp;
1006

1007
	delta = atom->sched_in_time - atom->wake_up_time;
1008
	atoms->total_lat += delta;
1009
	if (delta > atoms->max_lat) {
1010
		atoms->max_lat = delta;
1011 1012
		atoms->max_lat_at = timestamp;
	}
1013
	atoms->nb_atoms++;
1014 1015
}

1016
static int
1017
latency_switch_event(struct trace_switch_event *switch_event,
1018
		     struct machine *machine,
1019
		     struct event_format *event __maybe_unused,
1020
		     struct perf_sample *sample)
1021
{
1022
	struct work_atoms *out_events, *in_events;
1023
	struct thread *sched_out, *sched_in;
1024 1025
	u64 timestamp0, timestamp = sample->time;
	int cpu = sample->cpu;
I
Ingo Molnar 已提交
1026 1027
	s64 delta;

1028
	BUG_ON(cpu >= MAX_CPUS || cpu < 0);
I
Ingo Molnar 已提交
1029 1030 1031 1032 1033 1034 1035 1036

	timestamp0 = cpu_last_switched[cpu];
	cpu_last_switched[cpu] = timestamp;
	if (timestamp0)
		delta = timestamp - timestamp0;
	else
		delta = 0;

1037 1038 1039 1040
	if (delta < 0) {
		pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
		return -1;
	}
1041

1042 1043
	sched_out = machine__findnew_thread(machine, switch_event->prev_pid);
	sched_in = machine__findnew_thread(machine, switch_event->next_pid);
1044

1045 1046
	out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
	if (!out_events) {
1047 1048
		if (thread_atoms_insert(sched_out))
			return -1;
1049
		out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1050 1051 1052 1053
		if (!out_events) {
			pr_err("out-event: Internal tree error");
			return -1;
		}
1054
	}
1055 1056
	if (add_sched_out_event(out_events, sched_out_state(switch_event), timestamp))
		return -1;
1057 1058 1059

	in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
	if (!in_events) {
1060 1061
		if (thread_atoms_insert(sched_in))
			return -1;
1062
		in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1063 1064 1065 1066
		if (!in_events) {
			pr_err("in-event: Internal tree error");
			return -1;
		}
1067 1068 1069 1070
		/*
		 * Take came in we have not heard about yet,
		 * add in an initial atom in runnable state:
		 */
1071 1072
		if (add_sched_out_event(in_events, 'R', timestamp))
			return -1;
1073
	}
1074
	add_sched_in_event(in_events, timestamp);
1075 1076

	return 0;
1077
}
1078

1079
static int
1080
latency_runtime_event(struct trace_runtime_event *runtime_event,
1081
		      struct machine *machine, struct perf_sample *sample)
1082
{
1083
	struct thread *thread = machine__findnew_thread(machine, runtime_event->pid);
1084
	struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1085 1086
	u64 timestamp = sample->time;
	int cpu = sample->cpu;
1087 1088 1089

	BUG_ON(cpu >= MAX_CPUS || cpu < 0);
	if (!atoms) {
1090 1091
		if (thread_atoms_insert(thread))
			return -1;
1092
		atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1093 1094 1095 1096 1097 1098
		if (!atoms) {
			pr_debug("in-event: Internal tree error");
			return -1;
		}
		if (add_sched_out_event(atoms, 'R', timestamp))
			return -1;
1099 1100
	}

1101
	add_runtime_event(atoms, runtime_event->runtime, timestamp);
1102
	return 0;
1103 1104
}

1105
static int
1106
latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1107 1108
		     struct machine *machine,
		     struct event_format *event __maybe_unused,
1109
		     struct perf_sample *sample)
1110
{
1111
	struct work_atoms *atoms;
1112
	struct work_atom *atom;
1113
	struct thread *wakee;
1114
	u64 timestamp = sample->time;
1115 1116 1117

	/* Note for later, it may be interesting to observe the failing cases */
	if (!wakeup_event->success)
1118
		return 0;
1119

1120
	wakee = machine__findnew_thread(machine, wakeup_event->pid);
1121
	atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1122
	if (!atoms) {
1123 1124
		if (thread_atoms_insert(wakee))
			return -1;
1125
		atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1126 1127 1128 1129 1130 1131
		if (!atoms) {
			pr_debug("wakeup-event: Internal tree error");
			return -1;
		}
		if (add_sched_out_event(atoms, 'S', timestamp))
			return -1;
1132 1133
	}

1134
	BUG_ON(list_empty(&atoms->work_list));
1135

1136
	atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1137

1138 1139 1140 1141 1142 1143
	/*
	 * You WILL be missing events if you've recorded only
	 * one CPU, or are only looking at only one, so don't
	 * make useless noise.
	 */
	if (profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1144
		nr_state_machine_bugs++;
1145

1146 1147
	nr_timestamps++;
	if (atom->sched_out_time > timestamp) {
1148
		nr_unordered_timestamps++;
1149
		return 0;
1150
	}
1151

1152 1153
	atom->state = THREAD_WAIT_CPU;
	atom->wake_up_time = timestamp;
1154
	return 0;
1155 1156
}

1157
static int
1158
latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
1159
			   struct machine *machine, struct perf_sample *sample)
1160
{
1161
	u64 timestamp = sample->time;
1162 1163 1164 1165 1166 1167 1168 1169
	struct work_atoms *atoms;
	struct work_atom *atom;
	struct thread *migrant;

	/*
	 * Only need to worry about migration when profiling one CPU.
	 */
	if (profile_cpu == -1)
1170
		return 0;
1171

1172
	migrant = machine__findnew_thread(machine, migrate_task_event->pid);
1173 1174
	atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
	if (!atoms) {
1175 1176
		if (thread_atoms_insert(migrant))
			return -1;
1177 1178
		register_pid(migrant->pid, migrant->comm);
		atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1179 1180 1181 1182 1183 1184
		if (!atoms) {
			pr_debug("migration-event: Internal tree error");
			return -1;
		}
		if (add_sched_out_event(atoms, 'R', timestamp))
			return -1;
1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195
	}

	BUG_ON(list_empty(&atoms->work_list));

	atom = list_entry(atoms->work_list.prev, struct work_atom, list);
	atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;

	nr_timestamps++;

	if (atom->sched_out_time > timestamp)
		nr_unordered_timestamps++;
1196 1197

	return 0;
1198 1199
}

1200
static struct trace_sched_handler lat_ops  = {
I
Ingo Molnar 已提交
1201 1202
	.wakeup_event		= latency_wakeup_event,
	.switch_event		= latency_switch_event,
1203
	.runtime_event		= latency_runtime_event,
I
Ingo Molnar 已提交
1204
	.fork_event		= latency_fork_event,
1205
	.migrate_task_event	= latency_migrate_task_event,
1206 1207
};

1208
static void output_lat_thread(struct work_atoms *work_list)
1209 1210 1211
{
	int i;
	int ret;
1212
	u64 avg;
1213

1214
	if (!work_list->nb_atoms)
1215
		return;
1216 1217 1218
	/*
	 * Ignore idle threads:
	 */
1219
	if (!strcmp(work_list->thread->comm, "swapper"))
1220
		return;
1221

1222 1223
	all_runtime += work_list->total_runtime;
	all_count += work_list->nb_atoms;
1224

1225
	ret = printf("  %s:%d ", work_list->thread->comm, work_list->thread->pid);
1226

M
mingo 已提交
1227
	for (i = 0; i < 24 - ret; i++)
1228 1229
		printf(" ");

1230
	avg = work_list->total_lat / work_list->nb_atoms;
1231

1232
	printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n",
1233 1234
	      (double)work_list->total_runtime / 1e6,
		 work_list->nb_atoms, (double)avg / 1e6,
1235 1236
		 (double)work_list->max_lat / 1e6,
		 (double)work_list->max_lat_at / 1e9);
1237 1238
}

1239
static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1240 1241 1242 1243 1244 1245 1246 1247 1248 1249
{
	if (l->thread->pid < r->thread->pid)
		return -1;
	if (l->thread->pid > r->thread->pid)
		return 1;

	return 0;
}

static struct sort_dimension pid_sort_dimension = {
1250 1251
	.name			= "pid",
	.cmp			= pid_cmp,
1252 1253
};

1254
static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275
{
	u64 avgl, avgr;

	if (!l->nb_atoms)
		return -1;

	if (!r->nb_atoms)
		return 1;

	avgl = l->total_lat / l->nb_atoms;
	avgr = r->total_lat / r->nb_atoms;

	if (avgl < avgr)
		return -1;
	if (avgl > avgr)
		return 1;

	return 0;
}

static struct sort_dimension avg_sort_dimension = {
1276 1277
	.name			= "avg",
	.cmp			= avg_cmp,
1278 1279
};

1280
static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1281 1282 1283 1284 1285 1286 1287 1288 1289 1290
{
	if (l->max_lat < r->max_lat)
		return -1;
	if (l->max_lat > r->max_lat)
		return 1;

	return 0;
}

static struct sort_dimension max_sort_dimension = {
1291 1292
	.name			= "max",
	.cmp			= max_cmp,
1293 1294
};

1295
static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1296 1297 1298 1299 1300 1301 1302 1303 1304 1305
{
	if (l->nb_atoms < r->nb_atoms)
		return -1;
	if (l->nb_atoms > r->nb_atoms)
		return 1;

	return 0;
}

static struct sort_dimension switch_sort_dimension = {
1306 1307
	.name			= "switch",
	.cmp			= switch_cmp,
1308 1309
};

1310
static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1311 1312 1313 1314 1315 1316 1317 1318 1319 1320
{
	if (l->total_runtime < r->total_runtime)
		return -1;
	if (l->total_runtime > r->total_runtime)
		return 1;

	return 0;
}

static struct sort_dimension runtime_sort_dimension = {
1321 1322
	.name			= "runtime",
	.cmp			= runtime_cmp,
1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336
};

static struct sort_dimension *available_sorts[] = {
	&pid_sort_dimension,
	&avg_sort_dimension,
	&max_sort_dimension,
	&switch_sort_dimension,
	&runtime_sort_dimension,
};

#define NB_AVAILABLE_SORTS	(int)(sizeof(available_sorts) / sizeof(struct sort_dimension *))

static LIST_HEAD(sort_list);

1337
static int sort_dimension__add(const char *tok, struct list_head *list)
1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358
{
	int i;

	for (i = 0; i < NB_AVAILABLE_SORTS; i++) {
		if (!strcmp(available_sorts[i]->name, tok)) {
			list_add_tail(&available_sorts[i]->list, list);

			return 0;
		}
	}

	return -1;
}

static void setup_sorting(void);

static void sort_lat(void)
{
	struct rb_node *node;

	for (;;) {
1359
		struct work_atoms *data;
1360
		node = rb_first(&atom_root);
1361 1362 1363
		if (!node)
			break;

1364
		rb_erase(node, &atom_root);
1365
		data = rb_entry(node, struct work_atoms, node);
1366
		__thread_latency_insert(&sorted_atom_root, data, &sort_list);
1367 1368 1369
	}
}

1370 1371
static struct trace_sched_handler *trace_handler;

1372 1373 1374
static int process_sched_wakeup_event(struct perf_tool *tool __maybe_unused,
				      struct event_format *event,
				      struct perf_sample *sample,
1375
				      struct machine *machine)
1376
{
1377
	void *data = sample->raw_data;
1378
	struct trace_wakeup_event wakeup_event;
1379
	int err = 0;
1380

X
Xiao Guangrong 已提交
1381
	FILL_COMMON_FIELDS(wakeup_event, event, data);
1382

X
Xiao Guangrong 已提交
1383 1384 1385 1386 1387
	FILL_ARRAY(wakeup_event, comm, event, data);
	FILL_FIELD(wakeup_event, pid, event, data);
	FILL_FIELD(wakeup_event, prio, event, data);
	FILL_FIELD(wakeup_event, success, event, data);
	FILL_FIELD(wakeup_event, cpu, event, data);
1388

1389
	if (trace_handler->wakeup_event)
1390 1391 1392
		err = trace_handler->wakeup_event(&wakeup_event, machine, event, sample);

	return err;
1393 1394
}

1395 1396 1397 1398
/*
 * Track the current task - that way we can know whether there's any
 * weird events, such as a task being switched away that is not current.
 */
1399
static int max_cpu;
1400

1401 1402
static u32 curr_pid[MAX_CPUS] = { [0 ... MAX_CPUS-1] = -1 };

1403 1404 1405 1406 1407
static struct thread *curr_thread[MAX_CPUS];

static char next_shortname1 = 'A';
static char next_shortname2 = '0';

1408
static int
1409
map_switch_event(struct trace_switch_event *switch_event,
1410
		 struct machine *machine,
1411
		 struct event_format *event __maybe_unused,
1412
		 struct perf_sample *sample)
1413
{
1414
	struct thread *sched_out __maybe_unused, *sched_in;
1415
	int new_shortname;
1416
	u64 timestamp0, timestamp = sample->time;
1417
	s64 delta;
1418
	int cpu, this_cpu = sample->cpu;
1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431

	BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);

	if (this_cpu > max_cpu)
		max_cpu = this_cpu;

	timestamp0 = cpu_last_switched[this_cpu];
	cpu_last_switched[this_cpu] = timestamp;
	if (timestamp0)
		delta = timestamp - timestamp0;
	else
		delta = 0;

1432 1433 1434 1435
	if (delta < 0) {
		pr_debug("hm, delta: %" PRIu64 " < 0 ?\n", delta);
		return -1;
	}
1436

1437 1438
	sched_out = machine__findnew_thread(machine, switch_event->prev_pid);
	sched_in = machine__findnew_thread(machine, switch_event->next_pid);
1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483

	curr_thread[this_cpu] = sched_in;

	printf("  ");

	new_shortname = 0;
	if (!sched_in->shortname[0]) {
		sched_in->shortname[0] = next_shortname1;
		sched_in->shortname[1] = next_shortname2;

		if (next_shortname1 < 'Z') {
			next_shortname1++;
		} else {
			next_shortname1='A';
			if (next_shortname2 < '9') {
				next_shortname2++;
			} else {
				next_shortname2='0';
			}
		}
		new_shortname = 1;
	}

	for (cpu = 0; cpu <= max_cpu; cpu++) {
		if (cpu != this_cpu)
			printf(" ");
		else
			printf("*");

		if (curr_thread[cpu]) {
			if (curr_thread[cpu]->pid)
				printf("%2s ", curr_thread[cpu]->shortname);
			else
				printf(".  ");
		} else
			printf("   ");
	}

	printf("  %12.6f secs ", (double)timestamp/1e9);
	if (new_shortname) {
		printf("%s => %s:%d\n",
			sched_in->shortname, sched_in->comm, sched_in->pid);
	} else {
		printf("\n");
	}
1484 1485

	return 0;
1486 1487
}

1488 1489 1490
static int process_sched_switch_event(struct perf_tool *tool __maybe_unused,
				      struct event_format *event,
				      struct perf_sample *sample,
1491
				      struct machine *machine)
1492
{
1493
	int this_cpu = sample->cpu, err = 0;
1494
	void *data = sample->raw_data;
1495 1496
	struct trace_switch_event switch_event;

X
Xiao Guangrong 已提交
1497
	FILL_COMMON_FIELDS(switch_event, event, data);
1498

X
Xiao Guangrong 已提交
1499 1500 1501 1502 1503 1504 1505
	FILL_ARRAY(switch_event, prev_comm, event, data);
	FILL_FIELD(switch_event, prev_pid, event, data);
	FILL_FIELD(switch_event, prev_prio, event, data);
	FILL_FIELD(switch_event, prev_state, event, data);
	FILL_ARRAY(switch_event, next_comm, event, data);
	FILL_FIELD(switch_event, next_pid, event, data);
	FILL_FIELD(switch_event, next_prio, event, data);
1506

1507
	if (curr_pid[this_cpu] != (u32)-1) {
1508 1509 1510 1511
		/*
		 * Are we trying to switch away a PID that is
		 * not current?
		 */
1512
		if (curr_pid[this_cpu] != switch_event.prev_pid)
1513 1514
			nr_context_switch_bugs++;
	}
1515
	if (trace_handler->switch_event)
1516
		err = trace_handler->switch_event(&switch_event, machine, event, sample);
1517

1518
	curr_pid[this_cpu] = switch_event.next_pid;
1519
	return err;
1520 1521
}

1522 1523 1524
static int process_sched_runtime_event(struct perf_tool *tool __maybe_unused,
				       struct event_format *event,
				       struct perf_sample *sample,
1525
				       struct machine *machine)
1526
{
1527
	void *data = sample->raw_data;
1528
	struct trace_runtime_event runtime_event;
1529
	int err = 0;
1530

X
Xiao Guangrong 已提交
1531 1532 1533 1534
	FILL_ARRAY(runtime_event, comm, event, data);
	FILL_FIELD(runtime_event, pid, event, data);
	FILL_FIELD(runtime_event, runtime, event, data);
	FILL_FIELD(runtime_event, vruntime, event, data);
1535

1536
	if (trace_handler->runtime_event)
1537 1538 1539
		err = trace_handler->runtime_event(&runtime_event, machine, sample);

	return err;
1540 1541
}

1542 1543 1544
static int process_sched_fork_event(struct perf_tool *tool __maybe_unused,
				    struct event_format *event,
				    struct perf_sample *sample,
1545
				    struct machine *machine __maybe_unused)
1546
{
1547
	void *data = sample->raw_data;
1548
	struct trace_fork_event fork_event;
1549
	int err = 0;
1550

X
Xiao Guangrong 已提交
1551
	FILL_COMMON_FIELDS(fork_event, event, data);
1552

X
Xiao Guangrong 已提交
1553 1554 1555 1556
	FILL_ARRAY(fork_event, parent_comm, event, data);
	FILL_FIELD(fork_event, parent_pid, event, data);
	FILL_ARRAY(fork_event, child_comm, event, data);
	FILL_FIELD(fork_event, child_pid, event, data);
1557

1558
	if (trace_handler->fork_event)
1559 1560 1561
		err = trace_handler->fork_event(&fork_event, event);

	return err;
1562 1563
}

1564 1565 1566
static int process_sched_exit_event(struct perf_tool *tool __maybe_unused,
				    struct event_format *event,
				    struct perf_sample *sample __maybe_unused,
1567
				    struct machine *machine __maybe_unused)
1568
{
I
Ingo Molnar 已提交
1569 1570
	if (verbose)
		printf("sched_exit event %p\n", event);
1571 1572

	return 0;
I
Ingo Molnar 已提交
1573 1574
}

1575 1576 1577
static int process_sched_migrate_task_event(struct perf_tool *tool __maybe_unused,
					    struct event_format *event,
					    struct perf_sample *sample,
1578
					    struct machine *machine)
1579
{
1580
	void *data = sample->raw_data;
1581
	struct trace_migrate_task_event migrate_task_event;
1582
	int err = 0;
1583

X
Xiao Guangrong 已提交
1584
	FILL_COMMON_FIELDS(migrate_task_event, event, data);
1585

X
Xiao Guangrong 已提交
1586 1587 1588 1589
	FILL_ARRAY(migrate_task_event, comm, event, data);
	FILL_FIELD(migrate_task_event, pid, event, data);
	FILL_FIELD(migrate_task_event, prio, event, data);
	FILL_FIELD(migrate_task_event, cpu, event, data);
1590 1591

	if (trace_handler->migrate_task_event)
1592 1593 1594
		err = trace_handler->migrate_task_event(&migrate_task_event, machine, sample);

	return err;
1595 1596
}

1597 1598 1599
typedef int (*tracepoint_handler)(struct perf_tool *tool,
				  struct event_format *tp_format,
				  struct perf_sample *sample,
1600
				  struct machine *machine);
I
Ingo Molnar 已提交
1601

1602 1603
static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused,
						 union perf_event *event __maybe_unused,
1604 1605 1606
						 struct perf_sample *sample,
						 struct perf_evsel *evsel,
						 struct machine *machine)
I
Ingo Molnar 已提交
1607
{
1608
	struct thread *thread = machine__findnew_thread(machine, sample->pid);
1609
	int err = 0;
I
Ingo Molnar 已提交
1610 1611

	if (thread == NULL) {
1612
		pr_debug("problem processing %s event, skipping it.\n",
1613
			 perf_evsel__name(evsel));
I
Ingo Molnar 已提交
1614 1615 1616
		return -1;
	}

1617 1618
	evsel->hists.stats.total_period += sample->period;
	hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
1619

1620 1621
	if (evsel->handler.func != NULL) {
		tracepoint_handler f = evsel->handler.func;
1622
		err = f(tool, evsel->tp_format, sample, machine);
1623
	}
I
Ingo Molnar 已提交
1624

1625
	return err;
I
Ingo Molnar 已提交
1626 1627
}

1628 1629 1630 1631 1632 1633
static struct perf_tool perf_sched = {
	.sample		 = perf_sched__process_tracepoint_sample,
	.comm		 = perf_event__process_comm,
	.lost		 = perf_event__process_lost,
	.fork		 = perf_event__process_task,
	.ordered_samples = true,
1634 1635
};

1636
static int read_events(bool destroy, struct perf_session **psession)
I
Ingo Molnar 已提交
1637
{
1638 1639 1640 1641 1642 1643 1644 1645 1646
	const struct perf_evsel_str_handler handlers[] = {
		{ "sched:sched_switch",	      process_sched_switch_event, },
		{ "sched:sched_stat_runtime", process_sched_runtime_event, },
		{ "sched:sched_wakeup",	      process_sched_wakeup_event, },
		{ "sched:sched_wakeup_new",   process_sched_wakeup_event, },
		{ "sched:sched_process_fork", process_sched_fork_event, },
		{ "sched:sched_process_exit", process_sched_exit_event, },
		{ "sched:sched_migrate_task", process_sched_migrate_task_event, },
	};
1647 1648
	struct perf_session *session;

1649
	session = perf_session__new(input_name, O_RDONLY, 0, false, &perf_sched);
1650 1651 1652 1653
	if (session == NULL) {
		pr_debug("No Memory for session\n");
		return -1;
	}
1654

1655 1656
	if (perf_session__set_tracepoints_handlers(session, handlers))
		goto out_delete;
1657

1658
	if (perf_session__has_traces(session, "record -R")) {
1659 1660 1661 1662 1663
		int err = perf_session__process_events(session, &perf_sched);
		if (err) {
			pr_err("Failed to process events, error %d", err);
			goto out_delete;
		}
1664

1665 1666 1667 1668
		nr_events      = session->hists.stats.nr_events[0];
		nr_lost_events = session->hists.stats.total_lost;
		nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST];
	}
1669

1670 1671 1672 1673 1674
	if (destroy)
		perf_session__delete(session);

	if (psession)
		*psession = session;
1675 1676 1677 1678 1679 1680

	return 0;

out_delete:
	perf_session__delete(session);
	return -1;
I
Ingo Molnar 已提交
1681 1682
}

1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712
static void print_bad_events(void)
{
	if (nr_unordered_timestamps && nr_timestamps) {
		printf("  INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
			(double)nr_unordered_timestamps/(double)nr_timestamps*100.0,
			nr_unordered_timestamps, nr_timestamps);
	}
	if (nr_lost_events && nr_events) {
		printf("  INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
			(double)nr_lost_events/(double)nr_events*100.0,
			nr_lost_events, nr_events, nr_lost_chunks);
	}
	if (nr_state_machine_bugs && nr_timestamps) {
		printf("  INFO: %.3f%% state machine bugs (%ld out of %ld)",
			(double)nr_state_machine_bugs/(double)nr_timestamps*100.0,
			nr_state_machine_bugs, nr_timestamps);
		if (nr_lost_events)
			printf(" (due to lost events?)");
		printf("\n");
	}
	if (nr_context_switch_bugs && nr_timestamps) {
		printf("  INFO: %.3f%% context switch bugs (%ld out of %ld)",
			(double)nr_context_switch_bugs/(double)nr_timestamps*100.0,
			nr_context_switch_bugs, nr_timestamps);
		if (nr_lost_events)
			printf(" (due to lost events?)");
		printf("\n");
	}
}

1713
static int __cmd_lat(void)
1714 1715
{
	struct rb_node *next;
1716
	struct perf_session *session;
1717 1718

	setup_pager();
1719 1720
	if (read_events(false, &session))
		return -1;
1721 1722
	sort_lat();

1723 1724 1725
	printf("\n ---------------------------------------------------------------------------------------------------------------\n");
	printf("  Task                  |   Runtime ms  | Switches | Average delay ms | Maximum delay ms | Maximum delay at     |\n");
	printf(" ---------------------------------------------------------------------------------------------------------------\n");
1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737

	next = rb_first(&sorted_atom_root);

	while (next) {
		struct work_atoms *work_list;

		work_list = rb_entry(next, struct work_atoms, node);
		output_lat_thread(work_list);
		next = rb_next(next);
	}

	printf(" -----------------------------------------------------------------------------------------\n");
1738
	printf("  TOTAL:                |%11.3f ms |%9" PRIu64 " |\n",
1739 1740 1741 1742 1743 1744 1745
		(double)all_runtime/1e6, all_count);

	printf(" ---------------------------------------------------\n");

	print_bad_events();
	printf("\n");

1746
	perf_session__delete(session);
1747
	return 0;
1748 1749 1750 1751 1752 1753 1754 1755 1756
}

static struct trace_sched_handler map_ops  = {
	.wakeup_event		= NULL,
	.switch_event		= map_switch_event,
	.runtime_event		= NULL,
	.fork_event		= NULL,
};

1757
static int __cmd_map(void)
1758
{
1759 1760
	max_cpu = sysconf(_SC_NPROCESSORS_CONF);

1761
	setup_pager();
1762 1763
	if (read_events(true, NULL))
		return -1;
1764
	print_bad_events();
1765
	return 0;
1766 1767
}

1768
static int __cmd_replay(void)
1769 1770 1771 1772 1773 1774 1775 1776
{
	unsigned long i;

	calibrate_run_measurement_overhead();
	calibrate_sleep_measurement_overhead();

	test_calibrations();

1777 1778
	if (read_events(true, NULL))
		return -1;
1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798

	printf("nr_run_events:        %ld\n", nr_run_events);
	printf("nr_sleep_events:      %ld\n", nr_sleep_events);
	printf("nr_wakeup_events:     %ld\n", nr_wakeup_events);

	if (targetless_wakeups)
		printf("target-less wakeups:  %ld\n", targetless_wakeups);
	if (multitarget_wakeups)
		printf("multi-target wakeups: %ld\n", multitarget_wakeups);
	if (nr_run_events_optimized)
		printf("run atoms optimized: %ld\n",
			nr_run_events_optimized);

	print_task_traces();
	add_cross_task_wakeups();

	create_tasks();
	printf("------------------------------------------------------------\n");
	for (i = 0; i < replay_repeat; i++)
		run_one_test();
1799 1800

	return 0;
1801 1802 1803
}


1804
static const char * const sched_usage[] = {
1805
	"perf sched [<options>] {record|latency|map|replay|script}",
I
Ingo Molnar 已提交
1806 1807 1808
	NULL
};

1809
static const struct option sched_options[] = {
1810 1811
	OPT_STRING('i', "input", &input_name, "file",
		    "input file name"),
1812
	OPT_INCR('v', "verbose", &verbose,
1813
		    "be more verbose (show symbol address, etc)"),
I
Ingo Molnar 已提交
1814 1815
	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
		    "dump raw trace in ASCII"),
1816 1817 1818 1819 1820 1821 1822 1823 1824
	OPT_END()
};

static const char * const latency_usage[] = {
	"perf sched latency [<options>]",
	NULL
};

static const struct option latency_options[] = {
1825 1826
	OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
		   "sort by key(s): runtime, switch, avg, max"),
1827
	OPT_INCR('v', "verbose", &verbose,
I
Ingo Molnar 已提交
1828
		    "be more verbose (show symbol address, etc)"),
1829 1830
	OPT_INTEGER('C', "CPU", &profile_cpu,
		    "CPU to profile on"),
1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841
	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
		    "dump raw trace in ASCII"),
	OPT_END()
};

static const char * const replay_usage[] = {
	"perf sched replay [<options>]",
	NULL
};

static const struct option replay_options[] = {
1842 1843
	OPT_UINTEGER('r', "repeat", &replay_repeat,
		     "repeat the workload replay N times (-1: infinite)"),
1844
	OPT_INCR('v', "verbose", &verbose,
1845 1846 1847
		    "be more verbose (show symbol address, etc)"),
	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
		    "dump raw trace in ASCII"),
I
Ingo Molnar 已提交
1848 1849 1850
	OPT_END()
};

1851 1852 1853 1854 1855 1856 1857 1858
static void setup_sorting(void)
{
	char *tmp, *tok, *str = strdup(sort_order);

	for (tok = strtok_r(str, ", ", &tmp);
			tok; tok = strtok_r(NULL, ", ", &tmp)) {
		if (sort_dimension__add(tok, &sort_list) < 0) {
			error("Unknown --sort key: `%s'", tok);
1859
			usage_with_options(latency_usage, latency_options);
1860 1861 1862 1863 1864
		}
	}

	free(str);

1865
	sort_dimension__add("pid", &cmp_pid);
1866 1867
}

1868 1869 1870 1871
static const char *record_args[] = {
	"record",
	"-a",
	"-R",
1872
	"-f",
1873
	"-m", "1024",
1874
	"-c", "1",
1875 1876 1877 1878 1879 1880 1881 1882 1883
	"-e", "sched:sched_switch",
	"-e", "sched:sched_stat_wait",
	"-e", "sched:sched_stat_sleep",
	"-e", "sched:sched_stat_iowait",
	"-e", "sched:sched_stat_runtime",
	"-e", "sched:sched_process_exit",
	"-e", "sched:sched_process_fork",
	"-e", "sched:sched_wakeup",
	"-e", "sched:sched_migrate_task",
1884 1885 1886 1887 1888 1889 1890 1891 1892 1893
};

static int __cmd_record(int argc, const char **argv)
{
	unsigned int rec_argc, i, j;
	const char **rec_argv;

	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
	rec_argv = calloc(rec_argc + 1, sizeof(char *));

1894
	if (rec_argv == NULL)
1895 1896
		return -ENOMEM;

1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907
	for (i = 0; i < ARRAY_SIZE(record_args); i++)
		rec_argv[i] = strdup(record_args[i]);

	for (j = 1; j < (unsigned int)argc; j++, i++)
		rec_argv[i] = argv[j];

	BUG_ON(i != rec_argc);

	return cmd_record(i, rec_argv, NULL);
}

1908
int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
I
Ingo Molnar 已提交
1909
{
1910 1911 1912 1913
	argc = parse_options(argc, argv, sched_options, sched_usage,
			     PARSE_OPT_STOP_AT_NON_OPTION);
	if (!argc)
		usage_with_options(sched_usage, sched_options);
I
Ingo Molnar 已提交
1914

1915
	/*
1916
	 * Aliased to 'perf script' for now:
1917
	 */
1918 1919
	if (!strcmp(argv[0], "script"))
		return cmd_script(argc, argv, prefix);
1920

1921
	symbol__init();
1922 1923 1924
	if (!strncmp(argv[0], "rec", 3)) {
		return __cmd_record(argc, argv);
	} else if (!strncmp(argv[0], "lat", 3)) {
1925
		trace_handler = &lat_ops;
1926 1927 1928 1929 1930
		if (argc > 1) {
			argc = parse_options(argc, argv, latency_options, latency_usage, 0);
			if (argc)
				usage_with_options(latency_usage, latency_options);
		}
1931
		setup_sorting();
1932
		return __cmd_lat();
1933 1934 1935
	} else if (!strcmp(argv[0], "map")) {
		trace_handler = &map_ops;
		setup_sorting();
1936
		return __cmd_map();
1937 1938 1939 1940 1941 1942 1943
	} else if (!strncmp(argv[0], "rep", 3)) {
		trace_handler = &replay_ops;
		if (argc) {
			argc = parse_options(argc, argv, replay_options, replay_usage, 0);
			if (argc)
				usage_with_options(replay_usage, replay_options);
		}
1944
		return __cmd_replay();
1945 1946 1947 1948
	} else {
		usage_with_options(sched_usage, sched_options);
	}

I
Ingo Molnar 已提交
1949
	return 0;
I
Ingo Molnar 已提交
1950
}