builtin-sched.c 65.4 KB
Newer Older
I
Ingo Molnar 已提交
1
#include "builtin.h"
2
#include "perf.h"
I
Ingo Molnar 已提交
3 4

#include "util/util.h"
5
#include "util/evlist.h"
I
Ingo Molnar 已提交
6
#include "util/cache.h"
7
#include "util/evsel.h"
I
Ingo Molnar 已提交
8 9 10
#include "util/symbol.h"
#include "util/thread.h"
#include "util/header.h"
11
#include "util/session.h"
12
#include "util/tool.h"
13
#include "util/cloexec.h"
J
Jiri Olsa 已提交
14
#include "util/thread_map.h"
15
#include "util/color.h"
16
#include "util/stat.h"
I
Ingo Molnar 已提交
17

18
#include <subcmd/parse-options.h>
19
#include "util/trace-event.h"
I
Ingo Molnar 已提交
20 21 22

#include "util/debug.h"

23
#include <linux/log2.h>
24
#include <sys/prctl.h>
25
#include <sys/resource.h>
I
Ingo Molnar 已提交
26

27 28 29
#include <semaphore.h>
#include <pthread.h>
#include <math.h>
30
#include <api/fs/fs.h>
31
#include <linux/time64.h>
32

33 34 35 36
#define PR_SET_NAME		15               /* Set process name */
#define MAX_CPUS		4096
#define COMM_LEN		20
#define SYM_LEN			129
37
#define MAX_PID			1024000
I
Ingo Molnar 已提交
38

39
struct sched_atom;
I
Ingo Molnar 已提交
40

41 42 43 44
struct task_desc {
	unsigned long		nr;
	unsigned long		pid;
	char			comm[COMM_LEN];
I
Ingo Molnar 已提交
45

46 47
	unsigned long		nr_events;
	unsigned long		curr_event;
48
	struct sched_atom	**atoms;
49 50 51

	pthread_t		thread;
	sem_t			sleep_sem;
I
Ingo Molnar 已提交
52

53 54 55 56 57 58 59 60 61 62
	sem_t			ready_for_work;
	sem_t			work_done_sem;

	u64			cpu_usage;
};

enum sched_event_type {
	SCHED_EVENT_RUN,
	SCHED_EVENT_SLEEP,
	SCHED_EVENT_WAKEUP,
63
	SCHED_EVENT_MIGRATION,
64 65
};

66
struct sched_atom {
67
	enum sched_event_type	type;
68
	int			specific_wait;
69 70 71 72 73 74 75
	u64			timestamp;
	u64			duration;
	unsigned long		nr;
	sem_t			*wait_sem;
	struct task_desc	*wakee;
};

76
#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
77 78 79 80 81 82 83 84 85 86 87

enum thread_state {
	THREAD_SLEEPING = 0,
	THREAD_WAIT_CPU,
	THREAD_SCHED_IN,
	THREAD_IGNORE
};

struct work_atom {
	struct list_head	list;
	enum thread_state	state;
88
	u64			sched_out_time;
89 90 91 92 93
	u64			wake_up_time;
	u64			sched_in_time;
	u64			runtime;
};

94 95
struct work_atoms {
	struct list_head	work_list;
96 97 98
	struct thread		*thread;
	struct rb_node		node;
	u64			max_lat;
99
	u64			max_lat_at;
100 101 102
	u64			total_lat;
	u64			nb_atoms;
	u64			total_runtime;
103
	int			num_merged;
104 105
};

106
typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
107

108
struct perf_sched;
109

110 111 112
struct trace_sched_handler {
	int (*switch_event)(struct perf_sched *sched, struct perf_evsel *evsel,
			    struct perf_sample *sample, struct machine *machine);
113

114 115
	int (*runtime_event)(struct perf_sched *sched, struct perf_evsel *evsel,
			     struct perf_sample *sample, struct machine *machine);
116

117 118
	int (*wakeup_event)(struct perf_sched *sched, struct perf_evsel *evsel,
			    struct perf_sample *sample, struct machine *machine);
119

120 121 122
	/* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
	int (*fork_event)(struct perf_sched *sched, union perf_event *event,
			  struct machine *machine);
123 124

	int (*migrate_task_event)(struct perf_sched *sched,
125 126 127
				  struct perf_evsel *evsel,
				  struct perf_sample *sample,
				  struct machine *machine);
128 129
};

J
Jiri Olsa 已提交
130
#define COLOR_PIDS PERF_COLOR_BLUE
J
Jiri Olsa 已提交
131
#define COLOR_CPUS PERF_COLOR_BG_RED
J
Jiri Olsa 已提交
132

133 134 135 136
struct perf_sched_map {
	DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS);
	int			*comp_cpus;
	bool			 comp;
J
Jiri Olsa 已提交
137 138
	struct thread_map	*color_pids;
	const char		*color_pids_str;
J
Jiri Olsa 已提交
139 140
	struct cpu_map		*color_cpus;
	const char		*color_cpus_str;
141 142
	struct cpu_map		*cpus;
	const char		*cpus_str;
143 144
};

145 146 147 148
struct perf_sched {
	struct perf_tool tool;
	const char	 *sort_order;
	unsigned long	 nr_tasks;
149
	struct task_desc **pid_to_task;
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
	struct task_desc **tasks;
	const struct trace_sched_handler *tp_handler;
	pthread_mutex_t	 start_work_mutex;
	pthread_mutex_t	 work_done_wait_mutex;
	int		 profile_cpu;
/*
 * Track the current task - that way we can know whether there's any
 * weird events, such as a task being switched away that is not current.
 */
	int		 max_cpu;
	u32		 curr_pid[MAX_CPUS];
	struct thread	 *curr_thread[MAX_CPUS];
	char		 next_shortname1;
	char		 next_shortname2;
	unsigned int	 replay_repeat;
	unsigned long	 nr_run_events;
	unsigned long	 nr_sleep_events;
	unsigned long	 nr_wakeup_events;
	unsigned long	 nr_sleep_corrections;
	unsigned long	 nr_run_events_optimized;
	unsigned long	 targetless_wakeups;
	unsigned long	 multitarget_wakeups;
	unsigned long	 nr_runs;
	unsigned long	 nr_timestamps;
	unsigned long	 nr_unordered_timestamps;
	unsigned long	 nr_context_switch_bugs;
	unsigned long	 nr_events;
	unsigned long	 nr_lost_chunks;
	unsigned long	 nr_lost_events;
	u64		 run_measurement_overhead;
	u64		 sleep_measurement_overhead;
	u64		 start_time;
	u64		 cpu_usage;
	u64		 runavg_cpu_usage;
	u64		 parent_cpu_usage;
	u64		 runavg_parent_cpu_usage;
	u64		 sum_runtime;
	u64		 sum_fluct;
	u64		 run_avg;
	u64		 all_runtime;
	u64		 all_count;
	u64		 cpu_last_switched[MAX_CPUS];
192
	struct rb_root	 atom_root, sorted_atom_root, merged_atom_root;
193
	struct list_head sort_list, cmp_pid;
194
	bool force;
195
	bool skip_merge;
196
	struct perf_sched_map map;
197
};
198

199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
/* per thread run time data */
struct thread_runtime {
	u64 last_time;      /* time of previous sched in/out event */
	u64 dt_run;         /* run time */
	u64 dt_wait;        /* time between CPU access (off cpu) */
	u64 dt_delay;       /* time between wakeup and sched-in */
	u64 ready_to_run;   /* time of wakeup */

	struct stats run_stats;
	u64 total_run_time;
};

/* per event run time data */
struct evsel_runtime {
	u64 *last_time; /* time this event was last seen per cpu */
	u32 ncpu;       /* highest cpu slot allocated */
};

/* track idle times per cpu */
static struct thread **idle_threads;
static int idle_max_cpu;
static char idle_comm[] = "<idle>";

222
static u64 get_nsecs(void)
I
Ingo Molnar 已提交
223 224 225 226 227
{
	struct timespec ts;

	clock_gettime(CLOCK_MONOTONIC, &ts);

228
	return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
I
Ingo Molnar 已提交
229 230
}

231
static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
I
Ingo Molnar 已提交
232
{
233
	u64 T0 = get_nsecs(), T1;
I
Ingo Molnar 已提交
234 235 236

	do {
		T1 = get_nsecs();
237
	} while (T1 + sched->run_measurement_overhead < T0 + nsecs);
I
Ingo Molnar 已提交
238 239
}

240
static void sleep_nsecs(u64 nsecs)
I
Ingo Molnar 已提交
241 242 243 244 245 246 247 248 249
{
	struct timespec ts;

	ts.tv_nsec = nsecs % 999999999;
	ts.tv_sec = nsecs / 999999999;

	nanosleep(&ts, NULL);
}

250
static void calibrate_run_measurement_overhead(struct perf_sched *sched)
I
Ingo Molnar 已提交
251
{
252
	u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
I
Ingo Molnar 已提交
253 254 255 256
	int i;

	for (i = 0; i < 10; i++) {
		T0 = get_nsecs();
257
		burn_nsecs(sched, 0);
I
Ingo Molnar 已提交
258 259 260 261
		T1 = get_nsecs();
		delta = T1-T0;
		min_delta = min(min_delta, delta);
	}
262
	sched->run_measurement_overhead = min_delta;
I
Ingo Molnar 已提交
263

264
	printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
I
Ingo Molnar 已提交
265 266
}

267
static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
I
Ingo Molnar 已提交
268
{
269
	u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
I
Ingo Molnar 已提交
270 271 272 273 274 275 276 277 278 279
	int i;

	for (i = 0; i < 10; i++) {
		T0 = get_nsecs();
		sleep_nsecs(10000);
		T1 = get_nsecs();
		delta = T1-T0;
		min_delta = min(min_delta, delta);
	}
	min_delta -= 10000;
280
	sched->sleep_measurement_overhead = min_delta;
I
Ingo Molnar 已提交
281

282
	printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
I
Ingo Molnar 已提交
283 284
}

285
static struct sched_atom *
286
get_new_event(struct task_desc *task, u64 timestamp)
I
Ingo Molnar 已提交
287
{
288
	struct sched_atom *event = zalloc(sizeof(*event));
I
Ingo Molnar 已提交
289 290 291 292 293 294 295
	unsigned long idx = task->nr_events;
	size_t size;

	event->timestamp = timestamp;
	event->nr = idx;

	task->nr_events++;
296 297 298
	size = sizeof(struct sched_atom *) * task->nr_events;
	task->atoms = realloc(task->atoms, size);
	BUG_ON(!task->atoms);
I
Ingo Molnar 已提交
299

300
	task->atoms[idx] = event;
I
Ingo Molnar 已提交
301 302 303 304

	return event;
}

305
static struct sched_atom *last_event(struct task_desc *task)
I
Ingo Molnar 已提交
306 307 308 309
{
	if (!task->nr_events)
		return NULL;

310
	return task->atoms[task->nr_events - 1];
I
Ingo Molnar 已提交
311 312
}

313 314
static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
				u64 timestamp, u64 duration)
I
Ingo Molnar 已提交
315
{
316
	struct sched_atom *event, *curr_event = last_event(task);
I
Ingo Molnar 已提交
317 318

	/*
319 320 321
	 * optimize an existing RUN event by merging this one
	 * to it:
	 */
I
Ingo Molnar 已提交
322
	if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
323
		sched->nr_run_events_optimized++;
I
Ingo Molnar 已提交
324 325 326 327 328 329 330 331 332
		curr_event->duration += duration;
		return;
	}

	event = get_new_event(task, timestamp);

	event->type = SCHED_EVENT_RUN;
	event->duration = duration;

333
	sched->nr_run_events++;
I
Ingo Molnar 已提交
334 335
}

336 337
static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
				   u64 timestamp, struct task_desc *wakee)
I
Ingo Molnar 已提交
338
{
339
	struct sched_atom *event, *wakee_event;
I
Ingo Molnar 已提交
340 341 342 343 344 345 346

	event = get_new_event(task, timestamp);
	event->type = SCHED_EVENT_WAKEUP;
	event->wakee = wakee;

	wakee_event = last_event(wakee);
	if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
347
		sched->targetless_wakeups++;
I
Ingo Molnar 已提交
348 349 350
		return;
	}
	if (wakee_event->wait_sem) {
351
		sched->multitarget_wakeups++;
I
Ingo Molnar 已提交
352 353 354
		return;
	}

355
	wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
I
Ingo Molnar 已提交
356 357 358 359
	sem_init(wakee_event->wait_sem, 0, 0);
	wakee_event->specific_wait = 1;
	event->wait_sem = wakee_event->wait_sem;

360
	sched->nr_wakeup_events++;
I
Ingo Molnar 已提交
361 362
}

363 364
static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
				  u64 timestamp, u64 task_state __maybe_unused)
I
Ingo Molnar 已提交
365
{
366
	struct sched_atom *event = get_new_event(task, timestamp);
I
Ingo Molnar 已提交
367 368 369

	event->type = SCHED_EVENT_SLEEP;

370
	sched->nr_sleep_events++;
I
Ingo Molnar 已提交
371 372
}

373 374
static struct task_desc *register_pid(struct perf_sched *sched,
				      unsigned long pid, const char *comm)
I
Ingo Molnar 已提交
375 376
{
	struct task_desc *task;
377
	static int pid_max;
I
Ingo Molnar 已提交
378

379 380 381 382 383
	if (sched->pid_to_task == NULL) {
		if (sysctl__read_int("kernel/pid_max", &pid_max) < 0)
			pid_max = MAX_PID;
		BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL);
	}
384 385 386 387 388 389
	if (pid >= (unsigned long)pid_max) {
		BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) *
			sizeof(struct task_desc *))) == NULL);
		while (pid >= (unsigned long)pid_max)
			sched->pid_to_task[pid_max++] = NULL;
	}
I
Ingo Molnar 已提交
390

391
	task = sched->pid_to_task[pid];
I
Ingo Molnar 已提交
392 393 394 395

	if (task)
		return task;

396
	task = zalloc(sizeof(*task));
I
Ingo Molnar 已提交
397
	task->pid = pid;
398
	task->nr = sched->nr_tasks;
I
Ingo Molnar 已提交
399 400 401 402 403
	strcpy(task->comm, comm);
	/*
	 * every task starts in sleeping state - this gets ignored
	 * if there's no wakeup pointing to this sleep state:
	 */
404
	add_sched_event_sleep(sched, task, 0, 0);
I
Ingo Molnar 已提交
405

406 407
	sched->pid_to_task[pid] = task;
	sched->nr_tasks++;
408
	sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *));
409 410
	BUG_ON(!sched->tasks);
	sched->tasks[task->nr] = task;
I
Ingo Molnar 已提交
411

I
Ingo Molnar 已提交
412
	if (verbose)
413
		printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
I
Ingo Molnar 已提交
414 415 416 417 418

	return task;
}


419
static void print_task_traces(struct perf_sched *sched)
I
Ingo Molnar 已提交
420 421 422 423
{
	struct task_desc *task;
	unsigned long i;

424 425
	for (i = 0; i < sched->nr_tasks; i++) {
		task = sched->tasks[i];
I
Ingo Molnar 已提交
426
		printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
I
Ingo Molnar 已提交
427 428 429 430
			task->nr, task->comm, task->pid, task->nr_events);
	}
}

431
static void add_cross_task_wakeups(struct perf_sched *sched)
I
Ingo Molnar 已提交
432 433 434 435
{
	struct task_desc *task1, *task2;
	unsigned long i, j;

436 437
	for (i = 0; i < sched->nr_tasks; i++) {
		task1 = sched->tasks[i];
I
Ingo Molnar 已提交
438
		j = i + 1;
439
		if (j == sched->nr_tasks)
I
Ingo Molnar 已提交
440
			j = 0;
441 442
		task2 = sched->tasks[j];
		add_sched_event_wakeup(sched, task1, 0, task2);
I
Ingo Molnar 已提交
443 444 445
	}
}

446 447
static void perf_sched__process_event(struct perf_sched *sched,
				      struct sched_atom *atom)
I
Ingo Molnar 已提交
448 449 450
{
	int ret = 0;

451
	switch (atom->type) {
I
Ingo Molnar 已提交
452
		case SCHED_EVENT_RUN:
453
			burn_nsecs(sched, atom->duration);
I
Ingo Molnar 已提交
454 455
			break;
		case SCHED_EVENT_SLEEP:
456 457
			if (atom->wait_sem)
				ret = sem_wait(atom->wait_sem);
I
Ingo Molnar 已提交
458 459 460
			BUG_ON(ret);
			break;
		case SCHED_EVENT_WAKEUP:
461 462
			if (atom->wait_sem)
				ret = sem_post(atom->wait_sem);
I
Ingo Molnar 已提交
463 464
			BUG_ON(ret);
			break;
465 466
		case SCHED_EVENT_MIGRATION:
			break;
I
Ingo Molnar 已提交
467 468 469 470 471
		default:
			BUG_ON(1);
	}
}

472
static u64 get_cpu_usage_nsec_parent(void)
I
Ingo Molnar 已提交
473 474
{
	struct rusage ru;
475
	u64 sum;
I
Ingo Molnar 已提交
476 477 478 479 480
	int err;

	err = getrusage(RUSAGE_SELF, &ru);
	BUG_ON(err);

481 482
	sum =  ru.ru_utime.tv_sec * NSEC_PER_SEC + ru.ru_utime.tv_usec * NSEC_PER_USEC;
	sum += ru.ru_stime.tv_sec * NSEC_PER_SEC + ru.ru_stime.tv_usec * NSEC_PER_USEC;
I
Ingo Molnar 已提交
483 484 485 486

	return sum;
}

487
static int self_open_counters(struct perf_sched *sched, unsigned long cur_task)
I
Ingo Molnar 已提交
488
{
489
	struct perf_event_attr attr;
490
	char sbuf[STRERR_BUFSIZE], info[STRERR_BUFSIZE];
491
	int fd;
492 493
	struct rlimit limit;
	bool need_privilege = false;
I
Ingo Molnar 已提交
494

495
	memset(&attr, 0, sizeof(attr));
I
Ingo Molnar 已提交
496

497 498
	attr.type = PERF_TYPE_SOFTWARE;
	attr.config = PERF_COUNT_SW_TASK_CLOCK;
I
Ingo Molnar 已提交
499

500
force_again:
501 502
	fd = sys_perf_event_open(&attr, 0, -1, -1,
				 perf_event_open_cloexec_flag());
503

504
	if (fd < 0) {
505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
		if (errno == EMFILE) {
			if (sched->force) {
				BUG_ON(getrlimit(RLIMIT_NOFILE, &limit) == -1);
				limit.rlim_cur += sched->nr_tasks - cur_task;
				if (limit.rlim_cur > limit.rlim_max) {
					limit.rlim_max = limit.rlim_cur;
					need_privilege = true;
				}
				if (setrlimit(RLIMIT_NOFILE, &limit) == -1) {
					if (need_privilege && errno == EPERM)
						strcpy(info, "Need privilege\n");
				} else
					goto force_again;
			} else
				strcpy(info, "Have a try with -f option\n");
		}
521
		pr_err("Error: sys_perf_event_open() syscall returned "
522
		       "with %d (%s)\n%s", fd,
523
		       str_error_r(errno, sbuf, sizeof(sbuf)), info);
524 525
		exit(EXIT_FAILURE);
	}
526 527 528 529 530 531 532 533 534 535 536 537
	return fd;
}

static u64 get_cpu_usage_nsec_self(int fd)
{
	u64 runtime;
	int ret;

	ret = read(fd, &runtime, sizeof(runtime));
	BUG_ON(ret != sizeof(runtime));

	return runtime;
I
Ingo Molnar 已提交
538 539
}

540 541 542
struct sched_thread_parms {
	struct task_desc  *task;
	struct perf_sched *sched;
543
	int fd;
544 545
};

I
Ingo Molnar 已提交
546 547
static void *thread_func(void *ctx)
{
548 549 550
	struct sched_thread_parms *parms = ctx;
	struct task_desc *this_task = parms->task;
	struct perf_sched *sched = parms->sched;
551
	u64 cpu_usage_0, cpu_usage_1;
I
Ingo Molnar 已提交
552 553
	unsigned long i, ret;
	char comm2[22];
554
	int fd = parms->fd;
I
Ingo Molnar 已提交
555

556
	zfree(&parms);
557

I
Ingo Molnar 已提交
558 559
	sprintf(comm2, ":%s", this_task->comm);
	prctl(PR_SET_NAME, comm2);
560 561
	if (fd < 0)
		return NULL;
I
Ingo Molnar 已提交
562 563 564
again:
	ret = sem_post(&this_task->ready_for_work);
	BUG_ON(ret);
565
	ret = pthread_mutex_lock(&sched->start_work_mutex);
I
Ingo Molnar 已提交
566
	BUG_ON(ret);
567
	ret = pthread_mutex_unlock(&sched->start_work_mutex);
I
Ingo Molnar 已提交
568 569
	BUG_ON(ret);

570
	cpu_usage_0 = get_cpu_usage_nsec_self(fd);
I
Ingo Molnar 已提交
571 572 573

	for (i = 0; i < this_task->nr_events; i++) {
		this_task->curr_event = i;
574
		perf_sched__process_event(sched, this_task->atoms[i]);
I
Ingo Molnar 已提交
575 576
	}

577
	cpu_usage_1 = get_cpu_usage_nsec_self(fd);
I
Ingo Molnar 已提交
578 579 580 581
	this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
	ret = sem_post(&this_task->work_done_sem);
	BUG_ON(ret);

582
	ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
I
Ingo Molnar 已提交
583
	BUG_ON(ret);
584
	ret = pthread_mutex_unlock(&sched->work_done_wait_mutex);
I
Ingo Molnar 已提交
585 586 587 588 589
	BUG_ON(ret);

	goto again;
}

590
static void create_tasks(struct perf_sched *sched)
I
Ingo Molnar 已提交
591 592 593 594 595 596 597 598
{
	struct task_desc *task;
	pthread_attr_t attr;
	unsigned long i;
	int err;

	err = pthread_attr_init(&attr);
	BUG_ON(err);
599 600
	err = pthread_attr_setstacksize(&attr,
			(size_t) max(16 * 1024, PTHREAD_STACK_MIN));
I
Ingo Molnar 已提交
601
	BUG_ON(err);
602
	err = pthread_mutex_lock(&sched->start_work_mutex);
I
Ingo Molnar 已提交
603
	BUG_ON(err);
604
	err = pthread_mutex_lock(&sched->work_done_wait_mutex);
I
Ingo Molnar 已提交
605
	BUG_ON(err);
606 607 608 609 610
	for (i = 0; i < sched->nr_tasks; i++) {
		struct sched_thread_parms *parms = malloc(sizeof(*parms));
		BUG_ON(parms == NULL);
		parms->task = task = sched->tasks[i];
		parms->sched = sched;
611
		parms->fd = self_open_counters(sched, i);
I
Ingo Molnar 已提交
612 613 614 615
		sem_init(&task->sleep_sem, 0, 0);
		sem_init(&task->ready_for_work, 0, 0);
		sem_init(&task->work_done_sem, 0, 0);
		task->curr_event = 0;
616
		err = pthread_create(&task->thread, &attr, thread_func, parms);
I
Ingo Molnar 已提交
617 618 619 620
		BUG_ON(err);
	}
}

621
static void wait_for_tasks(struct perf_sched *sched)
I
Ingo Molnar 已提交
622
{
623
	u64 cpu_usage_0, cpu_usage_1;
I
Ingo Molnar 已提交
624 625 626
	struct task_desc *task;
	unsigned long i, ret;

627 628 629
	sched->start_time = get_nsecs();
	sched->cpu_usage = 0;
	pthread_mutex_unlock(&sched->work_done_wait_mutex);
I
Ingo Molnar 已提交
630

631 632
	for (i = 0; i < sched->nr_tasks; i++) {
		task = sched->tasks[i];
I
Ingo Molnar 已提交
633 634 635 636
		ret = sem_wait(&task->ready_for_work);
		BUG_ON(ret);
		sem_init(&task->ready_for_work, 0, 0);
	}
637
	ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
I
Ingo Molnar 已提交
638 639 640 641
	BUG_ON(ret);

	cpu_usage_0 = get_cpu_usage_nsec_parent();

642
	pthread_mutex_unlock(&sched->start_work_mutex);
I
Ingo Molnar 已提交
643

644 645
	for (i = 0; i < sched->nr_tasks; i++) {
		task = sched->tasks[i];
I
Ingo Molnar 已提交
646 647 648
		ret = sem_wait(&task->work_done_sem);
		BUG_ON(ret);
		sem_init(&task->work_done_sem, 0, 0);
649
		sched->cpu_usage += task->cpu_usage;
I
Ingo Molnar 已提交
650 651 652 653
		task->cpu_usage = 0;
	}

	cpu_usage_1 = get_cpu_usage_nsec_parent();
654 655
	if (!sched->runavg_cpu_usage)
		sched->runavg_cpu_usage = sched->cpu_usage;
656
	sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat;
I
Ingo Molnar 已提交
657

658 659 660
	sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
	if (!sched->runavg_parent_cpu_usage)
		sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
661 662
	sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) +
					 sched->parent_cpu_usage)/sched->replay_repeat;
I
Ingo Molnar 已提交
663

664
	ret = pthread_mutex_lock(&sched->start_work_mutex);
I
Ingo Molnar 已提交
665 666
	BUG_ON(ret);

667 668
	for (i = 0; i < sched->nr_tasks; i++) {
		task = sched->tasks[i];
I
Ingo Molnar 已提交
669 670 671 672 673
		sem_init(&task->sleep_sem, 0, 0);
		task->curr_event = 0;
	}
}

674
static void run_one_test(struct perf_sched *sched)
I
Ingo Molnar 已提交
675
{
K
Kyle McMartin 已提交
676
	u64 T0, T1, delta, avg_delta, fluct;
I
Ingo Molnar 已提交
677 678

	T0 = get_nsecs();
679
	wait_for_tasks(sched);
I
Ingo Molnar 已提交
680 681 682
	T1 = get_nsecs();

	delta = T1 - T0;
683 684
	sched->sum_runtime += delta;
	sched->nr_runs++;
I
Ingo Molnar 已提交
685

686
	avg_delta = sched->sum_runtime / sched->nr_runs;
I
Ingo Molnar 已提交
687 688 689 690
	if (delta < avg_delta)
		fluct = avg_delta - delta;
	else
		fluct = delta - avg_delta;
691 692 693
	sched->sum_fluct += fluct;
	if (!sched->run_avg)
		sched->run_avg = delta;
694
	sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat;
I
Ingo Molnar 已提交
695

696
	printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / NSEC_PER_MSEC);
I
Ingo Molnar 已提交
697

698
	printf("ravg: %0.2f, ", (double)sched->run_avg / NSEC_PER_MSEC);
I
Ingo Molnar 已提交
699

I
Ingo Molnar 已提交
700
	printf("cpu: %0.2f / %0.2f",
701
		(double)sched->cpu_usage / NSEC_PER_MSEC, (double)sched->runavg_cpu_usage / NSEC_PER_MSEC);
I
Ingo Molnar 已提交
702 703 704

#if 0
	/*
705
	 * rusage statistics done by the parent, these are less
706
	 * accurate than the sched->sum_exec_runtime based statistics:
707
	 */
I
Ingo Molnar 已提交
708
	printf(" [%0.2f / %0.2f]",
709 710
		(double)sched->parent_cpu_usage / NSEC_PER_MSEC,
		(double)sched->runavg_parent_cpu_usage / NSEC_PER_MSEC);
I
Ingo Molnar 已提交
711 712
#endif

I
Ingo Molnar 已提交
713
	printf("\n");
I
Ingo Molnar 已提交
714

715 716 717
	if (sched->nr_sleep_corrections)
		printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
	sched->nr_sleep_corrections = 0;
I
Ingo Molnar 已提交
718 719
}

720
static void test_calibrations(struct perf_sched *sched)
I
Ingo Molnar 已提交
721
{
722
	u64 T0, T1;
I
Ingo Molnar 已提交
723 724

	T0 = get_nsecs();
725
	burn_nsecs(sched, NSEC_PER_MSEC);
I
Ingo Molnar 已提交
726 727
	T1 = get_nsecs();

728
	printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
I
Ingo Molnar 已提交
729 730

	T0 = get_nsecs();
731
	sleep_nsecs(NSEC_PER_MSEC);
I
Ingo Molnar 已提交
732 733
	T1 = get_nsecs();

734
	printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
I
Ingo Molnar 已提交
735 736
}

737
static int
738
replay_wakeup_event(struct perf_sched *sched,
739 740
		    struct perf_evsel *evsel, struct perf_sample *sample,
		    struct machine *machine __maybe_unused)
741
{
742 743
	const char *comm = perf_evsel__strval(evsel, sample, "comm");
	const u32 pid	 = perf_evsel__intval(evsel, sample, "pid");
744
	struct task_desc *waker, *wakee;
745

I
Ingo Molnar 已提交
746
	if (verbose) {
747
		printf("sched_wakeup event %p\n", evsel);
748

749
		printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid);
I
Ingo Molnar 已提交
750
	}
751

752
	waker = register_pid(sched, sample->tid, "<unknown>");
753
	wakee = register_pid(sched, pid, comm);
754

755
	add_sched_event_wakeup(sched, waker, sample->time, wakee);
756
	return 0;
I
Ingo Molnar 已提交
757 758
}

759 760 761 762
static int replay_switch_event(struct perf_sched *sched,
			       struct perf_evsel *evsel,
			       struct perf_sample *sample,
			       struct machine *machine __maybe_unused)
I
Ingo Molnar 已提交
763
{
764 765 766 767 768
	const char *prev_comm  = perf_evsel__strval(evsel, sample, "prev_comm"),
		   *next_comm  = perf_evsel__strval(evsel, sample, "next_comm");
	const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
		  next_pid = perf_evsel__intval(evsel, sample, "next_pid");
	const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
769
	struct task_desc *prev, __maybe_unused *next;
770 771
	u64 timestamp0, timestamp = sample->time;
	int cpu = sample->cpu;
772 773
	s64 delta;

I
Ingo Molnar 已提交
774
	if (verbose)
775
		printf("sched_switch event %p\n", evsel);
I
Ingo Molnar 已提交
776

777
	if (cpu >= MAX_CPUS || cpu < 0)
778
		return 0;
779

780
	timestamp0 = sched->cpu_last_switched[cpu];
781 782 783 784 785
	if (timestamp0)
		delta = timestamp - timestamp0;
	else
		delta = 0;

786
	if (delta < 0) {
787
		pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
788 789
		return -1;
	}
790

791 792
	pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
		 prev_comm, prev_pid, next_comm, next_pid, delta);
793

794 795
	prev = register_pid(sched, prev_pid, prev_comm);
	next = register_pid(sched, next_pid, next_comm);
796

797
	sched->cpu_last_switched[cpu] = timestamp;
798

799
	add_sched_event_run(sched, prev, timestamp, delta);
800
	add_sched_event_sleep(sched, prev, timestamp, prev_state);
801 802

	return 0;
803 804
}

805 806 807
static int replay_fork_event(struct perf_sched *sched,
			     union perf_event *event,
			     struct machine *machine)
808
{
809 810
	struct thread *child, *parent;

811 812 813 814
	child = machine__findnew_thread(machine, event->fork.pid,
					event->fork.tid);
	parent = machine__findnew_thread(machine, event->fork.ppid,
					 event->fork.ptid);
815 816 817 818

	if (child == NULL || parent == NULL) {
		pr_debug("thread does not exist on fork event: child %p, parent %p\n",
				 child, parent);
819
		goto out_put;
820
	}
821

822
	if (verbose) {
823
		printf("fork event\n");
824 825
		printf("... parent: %s/%d\n", thread__comm_str(parent), parent->tid);
		printf("...  child: %s/%d\n", thread__comm_str(child), child->tid);
826
	}
827

828 829
	register_pid(sched, parent->tid, thread__comm_str(parent));
	register_pid(sched, child->tid, thread__comm_str(child));
830 831 832
out_put:
	thread__put(child);
	thread__put(parent);
833
	return 0;
834
}
835

836 837
struct sort_dimension {
	const char		*name;
838
	sort_fn_t		cmp;
839 840 841
	struct list_head	list;
};

842
static int
843
thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
844 845 846 847
{
	struct sort_dimension *sort;
	int ret = 0;

848 849
	BUG_ON(list_empty(list));

850 851 852 853 854 855 856 857 858
	list_for_each_entry(sort, list, list) {
		ret = sort->cmp(l, r);
		if (ret)
			return ret;
	}

	return ret;
}

859
static struct work_atoms *
860 861 862 863
thread_atoms_search(struct rb_root *root, struct thread *thread,
			 struct list_head *sort_list)
{
	struct rb_node *node = root->rb_node;
864
	struct work_atoms key = { .thread = thread };
865 866

	while (node) {
867
		struct work_atoms *atoms;
868 869
		int cmp;

870
		atoms = container_of(node, struct work_atoms, node);
871 872 873 874 875 876 877 878 879 880 881 882 883 884

		cmp = thread_lat_cmp(sort_list, &key, atoms);
		if (cmp > 0)
			node = node->rb_left;
		else if (cmp < 0)
			node = node->rb_right;
		else {
			BUG_ON(thread != atoms->thread);
			return atoms;
		}
	}
	return NULL;
}

885
static void
886
__thread_latency_insert(struct rb_root *root, struct work_atoms *data,
887
			 struct list_head *sort_list)
888 889 890 891
{
	struct rb_node **new = &(root->rb_node), *parent = NULL;

	while (*new) {
892
		struct work_atoms *this;
893
		int cmp;
894

895
		this = container_of(*new, struct work_atoms, node);
896
		parent = *new;
897 898 899 900

		cmp = thread_lat_cmp(sort_list, data, this);

		if (cmp > 0)
901 902
			new = &((*new)->rb_left);
		else
903
			new = &((*new)->rb_right);
904 905 906 907 908 909
	}

	rb_link_node(&data->node, parent, new);
	rb_insert_color(&data->node, root);
}

910
static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
911
{
912
	struct work_atoms *atoms = zalloc(sizeof(*atoms));
913 914 915 916
	if (!atoms) {
		pr_err("No memory at %s\n", __func__);
		return -1;
	}
917

918
	atoms->thread = thread__get(thread);
919
	INIT_LIST_HEAD(&atoms->work_list);
920
	__thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
921
	return 0;
922 923
}

924
static char sched_out_state(u64 prev_state)
925 926 927
{
	const char *str = TASK_STATE_TO_CHAR_STR;

928
	return str[prev_state];
929 930
}

931
static int
932 933 934
add_sched_out_event(struct work_atoms *atoms,
		    char run_state,
		    u64 timestamp)
935
{
936
	struct work_atom *atom = zalloc(sizeof(*atom));
937 938 939 940
	if (!atom) {
		pr_err("Non memory at %s", __func__);
		return -1;
	}
941

942 943
	atom->sched_out_time = timestamp;

944
	if (run_state == 'R') {
945
		atom->state = THREAD_WAIT_CPU;
946
		atom->wake_up_time = atom->sched_out_time;
947 948
	}

949
	list_add_tail(&atom->list, &atoms->work_list);
950
	return 0;
951 952 953
}

static void
954 955
add_runtime_event(struct work_atoms *atoms, u64 delta,
		  u64 timestamp __maybe_unused)
956 957 958 959 960 961 962 963 964 965 966 967 968
{
	struct work_atom *atom;

	BUG_ON(list_empty(&atoms->work_list));

	atom = list_entry(atoms->work_list.prev, struct work_atom, list);

	atom->runtime += delta;
	atoms->total_runtime += delta;
}

static void
add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
969
{
970
	struct work_atom *atom;
971
	u64 delta;
972

973
	if (list_empty(&atoms->work_list))
974 975
		return;

976
	atom = list_entry(atoms->work_list.prev, struct work_atom, list);
977

978
	if (atom->state != THREAD_WAIT_CPU)
979 980
		return;

981 982
	if (timestamp < atom->wake_up_time) {
		atom->state = THREAD_IGNORE;
983 984 985
		return;
	}

986 987
	atom->state = THREAD_SCHED_IN;
	atom->sched_in_time = timestamp;
988

989
	delta = atom->sched_in_time - atom->wake_up_time;
990
	atoms->total_lat += delta;
991
	if (delta > atoms->max_lat) {
992
		atoms->max_lat = delta;
993 994
		atoms->max_lat_at = timestamp;
	}
995
	atoms->nb_atoms++;
996 997
}

998 999 1000 1001
static int latency_switch_event(struct perf_sched *sched,
				struct perf_evsel *evsel,
				struct perf_sample *sample,
				struct machine *machine)
1002
{
1003 1004 1005
	const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
		  next_pid = perf_evsel__intval(evsel, sample, "next_pid");
	const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
1006
	struct work_atoms *out_events, *in_events;
1007
	struct thread *sched_out, *sched_in;
1008
	u64 timestamp0, timestamp = sample->time;
1009
	int cpu = sample->cpu, err = -1;
I
Ingo Molnar 已提交
1010 1011
	s64 delta;

1012
	BUG_ON(cpu >= MAX_CPUS || cpu < 0);
I
Ingo Molnar 已提交
1013

1014 1015
	timestamp0 = sched->cpu_last_switched[cpu];
	sched->cpu_last_switched[cpu] = timestamp;
I
Ingo Molnar 已提交
1016 1017 1018 1019 1020
	if (timestamp0)
		delta = timestamp - timestamp0;
	else
		delta = 0;

1021 1022 1023 1024
	if (delta < 0) {
		pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
		return -1;
	}
1025

1026 1027
	sched_out = machine__findnew_thread(machine, -1, prev_pid);
	sched_in = machine__findnew_thread(machine, -1, next_pid);
1028 1029
	if (sched_out == NULL || sched_in == NULL)
		goto out_put;
1030

1031
	out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1032
	if (!out_events) {
1033
		if (thread_atoms_insert(sched, sched_out))
1034
			goto out_put;
1035
		out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1036 1037
		if (!out_events) {
			pr_err("out-event: Internal tree error");
1038
			goto out_put;
1039
		}
1040
	}
1041
	if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
1042
		return -1;
1043

1044
	in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1045
	if (!in_events) {
1046
		if (thread_atoms_insert(sched, sched_in))
1047
			goto out_put;
1048
		in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1049 1050
		if (!in_events) {
			pr_err("in-event: Internal tree error");
1051
			goto out_put;
1052
		}
1053 1054 1055 1056
		/*
		 * Take came in we have not heard about yet,
		 * add in an initial atom in runnable state:
		 */
1057
		if (add_sched_out_event(in_events, 'R', timestamp))
1058
			goto out_put;
1059
	}
1060
	add_sched_in_event(in_events, timestamp);
1061 1062 1063 1064 1065
	err = 0;
out_put:
	thread__put(sched_out);
	thread__put(sched_in);
	return err;
1066
}
1067

1068 1069 1070 1071
static int latency_runtime_event(struct perf_sched *sched,
				 struct perf_evsel *evsel,
				 struct perf_sample *sample,
				 struct machine *machine)
1072
{
1073 1074
	const u32 pid	   = perf_evsel__intval(evsel, sample, "pid");
	const u64 runtime  = perf_evsel__intval(evsel, sample, "runtime");
1075
	struct thread *thread = machine__findnew_thread(machine, -1, pid);
1076
	struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1077
	u64 timestamp = sample->time;
1078 1079 1080 1081
	int cpu = sample->cpu, err = -1;

	if (thread == NULL)
		return -1;
1082 1083 1084

	BUG_ON(cpu >= MAX_CPUS || cpu < 0);
	if (!atoms) {
1085
		if (thread_atoms_insert(sched, thread))
1086
			goto out_put;
1087
		atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1088
		if (!atoms) {
1089
			pr_err("in-event: Internal tree error");
1090
			goto out_put;
1091 1092
		}
		if (add_sched_out_event(atoms, 'R', timestamp))
1093
			goto out_put;
1094 1095
	}

1096
	add_runtime_event(atoms, runtime, timestamp);
1097 1098 1099 1100
	err = 0;
out_put:
	thread__put(thread);
	return err;
1101 1102
}

1103 1104 1105 1106
static int latency_wakeup_event(struct perf_sched *sched,
				struct perf_evsel *evsel,
				struct perf_sample *sample,
				struct machine *machine)
1107
{
1108
	const u32 pid	  = perf_evsel__intval(evsel, sample, "pid");
1109
	struct work_atoms *atoms;
1110
	struct work_atom *atom;
1111
	struct thread *wakee;
1112
	u64 timestamp = sample->time;
1113
	int err = -1;
1114

1115
	wakee = machine__findnew_thread(machine, -1, pid);
1116 1117
	if (wakee == NULL)
		return -1;
1118
	atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1119
	if (!atoms) {
1120
		if (thread_atoms_insert(sched, wakee))
1121
			goto out_put;
1122
		atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1123
		if (!atoms) {
1124
			pr_err("wakeup-event: Internal tree error");
1125
			goto out_put;
1126 1127
		}
		if (add_sched_out_event(atoms, 'S', timestamp))
1128
			goto out_put;
1129 1130
	}

1131
	BUG_ON(list_empty(&atoms->work_list));
1132

1133
	atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1134

1135
	/*
1136 1137 1138 1139 1140 1141
	 * As we do not guarantee the wakeup event happens when
	 * task is out of run queue, also may happen when task is
	 * on run queue and wakeup only change ->state to TASK_RUNNING,
	 * then we should not set the ->wake_up_time when wake up a
	 * task which is on run queue.
	 *
1142 1143
	 * You WILL be missing events if you've recorded only
	 * one CPU, or are only looking at only one, so don't
1144
	 * skip in this case.
1145
	 */
1146
	if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1147
		goto out_ok;
1148

1149
	sched->nr_timestamps++;
1150
	if (atom->sched_out_time > timestamp) {
1151
		sched->nr_unordered_timestamps++;
1152
		goto out_ok;
1153
	}
1154

1155 1156
	atom->state = THREAD_WAIT_CPU;
	atom->wake_up_time = timestamp;
1157 1158 1159 1160 1161
out_ok:
	err = 0;
out_put:
	thread__put(wakee);
	return err;
1162 1163
}

1164 1165 1166 1167
static int latency_migrate_task_event(struct perf_sched *sched,
				      struct perf_evsel *evsel,
				      struct perf_sample *sample,
				      struct machine *machine)
1168
{
1169
	const u32 pid = perf_evsel__intval(evsel, sample, "pid");
1170
	u64 timestamp = sample->time;
1171 1172 1173
	struct work_atoms *atoms;
	struct work_atom *atom;
	struct thread *migrant;
1174
	int err = -1;
1175 1176 1177 1178

	/*
	 * Only need to worry about migration when profiling one CPU.
	 */
1179
	if (sched->profile_cpu == -1)
1180
		return 0;
1181

1182
	migrant = machine__findnew_thread(machine, -1, pid);
1183 1184
	if (migrant == NULL)
		return -1;
1185
	atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1186
	if (!atoms) {
1187
		if (thread_atoms_insert(sched, migrant))
1188
			goto out_put;
1189
		register_pid(sched, migrant->tid, thread__comm_str(migrant));
1190
		atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1191
		if (!atoms) {
1192
			pr_err("migration-event: Internal tree error");
1193
			goto out_put;
1194 1195
		}
		if (add_sched_out_event(atoms, 'R', timestamp))
1196
			goto out_put;
1197 1198 1199 1200 1201 1202 1203
	}

	BUG_ON(list_empty(&atoms->work_list));

	atom = list_entry(atoms->work_list.prev, struct work_atom, list);
	atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;

1204
	sched->nr_timestamps++;
1205 1206

	if (atom->sched_out_time > timestamp)
1207
		sched->nr_unordered_timestamps++;
1208 1209 1210 1211
	err = 0;
out_put:
	thread__put(migrant);
	return err;
1212 1213
}

1214
static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
1215 1216 1217
{
	int i;
	int ret;
1218
	u64 avg;
1219
	char max_lat_at[32];
1220

1221
	if (!work_list->nb_atoms)
1222
		return;
1223 1224 1225
	/*
	 * Ignore idle threads:
	 */
1226
	if (!strcmp(thread__comm_str(work_list->thread), "swapper"))
1227
		return;
1228

1229 1230
	sched->all_runtime += work_list->total_runtime;
	sched->all_count   += work_list->nb_atoms;
1231

1232 1233 1234 1235
	if (work_list->num_merged > 1)
		ret = printf("  %s:(%d) ", thread__comm_str(work_list->thread), work_list->num_merged);
	else
		ret = printf("  %s:%d ", thread__comm_str(work_list->thread), work_list->thread->tid);
1236

M
mingo 已提交
1237
	for (i = 0; i < 24 - ret; i++)
1238 1239
		printf(" ");

1240
	avg = work_list->total_lat / work_list->nb_atoms;
1241
	timestamp__scnprintf_usec(work_list->max_lat_at, max_lat_at, sizeof(max_lat_at));
1242

1243
	printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %13s s\n",
1244 1245 1246
	      (double)work_list->total_runtime / NSEC_PER_MSEC,
		 work_list->nb_atoms, (double)avg / NSEC_PER_MSEC,
		 (double)work_list->max_lat / NSEC_PER_MSEC,
1247
		 max_lat_at);
1248 1249
}

1250
static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1251
{
1252 1253
	if (l->thread == r->thread)
		return 0;
1254
	if (l->thread->tid < r->thread->tid)
1255
		return -1;
1256
	if (l->thread->tid > r->thread->tid)
1257
		return 1;
1258
	return (int)(l->thread - r->thread);
1259 1260
}

1261
static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281
{
	u64 avgl, avgr;

	if (!l->nb_atoms)
		return -1;

	if (!r->nb_atoms)
		return 1;

	avgl = l->total_lat / l->nb_atoms;
	avgr = r->total_lat / r->nb_atoms;

	if (avgl < avgr)
		return -1;
	if (avgl > avgr)
		return 1;

	return 0;
}

1282
static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1283 1284 1285 1286 1287 1288 1289 1290 1291
{
	if (l->max_lat < r->max_lat)
		return -1;
	if (l->max_lat > r->max_lat)
		return 1;

	return 0;
}

1292
static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1293 1294 1295 1296 1297 1298 1299 1300 1301
{
	if (l->nb_atoms < r->nb_atoms)
		return -1;
	if (l->nb_atoms > r->nb_atoms)
		return 1;

	return 0;
}

1302
static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1303 1304 1305 1306 1307 1308 1309 1310 1311
{
	if (l->total_runtime < r->total_runtime)
		return -1;
	if (l->total_runtime > r->total_runtime)
		return 1;

	return 0;
}

1312
static int sort_dimension__add(const char *tok, struct list_head *list)
1313
{
1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341
	size_t i;
	static struct sort_dimension avg_sort_dimension = {
		.name = "avg",
		.cmp  = avg_cmp,
	};
	static struct sort_dimension max_sort_dimension = {
		.name = "max",
		.cmp  = max_cmp,
	};
	static struct sort_dimension pid_sort_dimension = {
		.name = "pid",
		.cmp  = pid_cmp,
	};
	static struct sort_dimension runtime_sort_dimension = {
		.name = "runtime",
		.cmp  = runtime_cmp,
	};
	static struct sort_dimension switch_sort_dimension = {
		.name = "switch",
		.cmp  = switch_cmp,
	};
	struct sort_dimension *available_sorts[] = {
		&pid_sort_dimension,
		&avg_sort_dimension,
		&max_sort_dimension,
		&switch_sort_dimension,
		&runtime_sort_dimension,
	};
1342

1343
	for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
1344 1345 1346 1347 1348 1349 1350 1351 1352 1353
		if (!strcmp(available_sorts[i]->name, tok)) {
			list_add_tail(&available_sorts[i]->list, list);

			return 0;
		}
	}

	return -1;
}

1354
static void perf_sched__sort_lat(struct perf_sched *sched)
1355 1356
{
	struct rb_node *node;
1357 1358
	struct rb_root *root = &sched->atom_root;
again:
1359
	for (;;) {
1360
		struct work_atoms *data;
1361
		node = rb_first(root);
1362 1363 1364
		if (!node)
			break;

1365
		rb_erase(node, root);
1366
		data = rb_entry(node, struct work_atoms, node);
1367
		__thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
1368
	}
1369 1370 1371 1372
	if (root == &sched->atom_root) {
		root = &sched->merged_atom_root;
		goto again;
	}
1373 1374
}

1375
static int process_sched_wakeup_event(struct perf_tool *tool,
1376
				      struct perf_evsel *evsel,
1377
				      struct perf_sample *sample,
1378
				      struct machine *machine)
1379
{
1380
	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1381

1382 1383
	if (sched->tp_handler->wakeup_event)
		return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
1384

1385
	return 0;
1386 1387
}

J
Jiri Olsa 已提交
1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419
union map_priv {
	void	*ptr;
	bool	 color;
};

static bool thread__has_color(struct thread *thread)
{
	union map_priv priv = {
		.ptr = thread__priv(thread),
	};

	return priv.color;
}

static struct thread*
map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid)
{
	struct thread *thread = machine__findnew_thread(machine, pid, tid);
	union map_priv priv = {
		.color = false,
	};

	if (!sched->map.color_pids || !thread || thread__priv(thread))
		return thread;

	if (thread_map__has(sched->map.color_pids, tid))
		priv.color = true;

	thread__set_priv(thread, priv.ptr);
	return thread;
}

1420 1421
static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
			    struct perf_sample *sample, struct machine *machine)
1422
{
1423 1424
	const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
	struct thread *sched_in;
1425
	int new_shortname;
1426
	u64 timestamp0, timestamp = sample->time;
1427
	s64 delta;
1428 1429 1430
	int i, this_cpu = sample->cpu;
	int cpus_nr;
	bool new_cpu = false;
1431
	const char *color = PERF_COLOR_NORMAL;
1432
	char stimestamp[32];
1433 1434 1435

	BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);

1436 1437
	if (this_cpu > sched->max_cpu)
		sched->max_cpu = this_cpu;
1438

1439 1440 1441 1442 1443 1444 1445 1446 1447
	if (sched->map.comp) {
		cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
		if (!test_and_set_bit(this_cpu, sched->map.comp_cpus_mask)) {
			sched->map.comp_cpus[cpus_nr++] = this_cpu;
			new_cpu = true;
		}
	} else
		cpus_nr = sched->max_cpu;

1448 1449
	timestamp0 = sched->cpu_last_switched[this_cpu];
	sched->cpu_last_switched[this_cpu] = timestamp;
1450 1451 1452 1453 1454
	if (timestamp0)
		delta = timestamp - timestamp0;
	else
		delta = 0;

1455
	if (delta < 0) {
1456
		pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1457 1458
		return -1;
	}
1459

J
Jiri Olsa 已提交
1460
	sched_in = map__findnew_thread(sched, machine, -1, next_pid);
1461 1462
	if (sched_in == NULL)
		return -1;
1463

1464
	sched->curr_thread[this_cpu] = thread__get(sched_in);
1465 1466 1467 1468 1469

	printf("  ");

	new_shortname = 0;
	if (!sched_in->shortname[0]) {
1470 1471 1472 1473 1474 1475 1476
		if (!strcmp(thread__comm_str(sched_in), "swapper")) {
			/*
			 * Don't allocate a letter-number for swapper:0
			 * as a shortname. Instead, we use '.' for it.
			 */
			sched_in->shortname[0] = '.';
			sched_in->shortname[1] = ' ';
1477
		} else {
1478 1479 1480 1481 1482
			sched_in->shortname[0] = sched->next_shortname1;
			sched_in->shortname[1] = sched->next_shortname2;

			if (sched->next_shortname1 < 'Z') {
				sched->next_shortname1++;
1483
			} else {
1484 1485 1486 1487 1488
				sched->next_shortname1 = 'A';
				if (sched->next_shortname2 < '9')
					sched->next_shortname2++;
				else
					sched->next_shortname2 = '0';
1489 1490 1491 1492 1493
			}
		}
		new_shortname = 1;
	}

1494 1495
	for (i = 0; i < cpus_nr; i++) {
		int cpu = sched->map.comp ? sched->map.comp_cpus[i] : i;
J
Jiri Olsa 已提交
1496 1497
		struct thread *curr_thread = sched->curr_thread[cpu];
		const char *pid_color = color;
J
Jiri Olsa 已提交
1498
		const char *cpu_color = color;
J
Jiri Olsa 已提交
1499 1500 1501

		if (curr_thread && thread__has_color(curr_thread))
			pid_color = COLOR_PIDS;
1502

1503 1504 1505
		if (sched->map.cpus && !cpu_map__has(sched->map.cpus, cpu))
			continue;

J
Jiri Olsa 已提交
1506 1507 1508
		if (sched->map.color_cpus && cpu_map__has(sched->map.color_cpus, cpu))
			cpu_color = COLOR_CPUS;

1509
		if (cpu != this_cpu)
1510
			color_fprintf(stdout, color, " ");
1511
		else
J
Jiri Olsa 已提交
1512
			color_fprintf(stdout, cpu_color, "*");
1513

1514
		if (sched->curr_thread[cpu])
J
Jiri Olsa 已提交
1515
			color_fprintf(stdout, pid_color, "%2s ", sched->curr_thread[cpu]->shortname);
1516
		else
1517
			color_fprintf(stdout, color, "   ");
1518 1519
	}

1520 1521 1522
	if (sched->map.cpus && !cpu_map__has(sched->map.cpus, this_cpu))
		goto out;

1523 1524
	timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
	color_fprintf(stdout, color, "  %12s secs ", stimestamp);
1525
	if (new_shortname || (verbose && sched_in->tid)) {
J
Jiri Olsa 已提交
1526 1527 1528 1529 1530 1531
		const char *pid_color = color;

		if (thread__has_color(sched_in))
			pid_color = COLOR_PIDS;

		color_fprintf(stdout, pid_color, "%s => %s:%d",
1532
		       sched_in->shortname, thread__comm_str(sched_in), sched_in->tid);
1533
	}
1534

1535
	if (sched->map.comp && new_cpu)
1536
		color_fprintf(stdout, color, " (CPU %d)", this_cpu);
1537

1538
out:
1539
	color_fprintf(stdout, color, "\n");
1540

1541 1542
	thread__put(sched_in);

1543
	return 0;
1544 1545
}

1546
static int process_sched_switch_event(struct perf_tool *tool,
1547
				      struct perf_evsel *evsel,
1548
				      struct perf_sample *sample,
1549
				      struct machine *machine)
1550
{
1551
	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1552
	int this_cpu = sample->cpu, err = 0;
1553 1554
	u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
	    next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1555

1556
	if (sched->curr_pid[this_cpu] != (u32)-1) {
1557 1558 1559 1560
		/*
		 * Are we trying to switch away a PID that is
		 * not current?
		 */
1561
		if (sched->curr_pid[this_cpu] != prev_pid)
1562
			sched->nr_context_switch_bugs++;
1563 1564
	}

1565 1566
	if (sched->tp_handler->switch_event)
		err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
1567 1568

	sched->curr_pid[this_cpu] = next_pid;
1569
	return err;
1570 1571
}

1572
static int process_sched_runtime_event(struct perf_tool *tool,
1573
				       struct perf_evsel *evsel,
1574
				       struct perf_sample *sample,
1575
				       struct machine *machine)
1576
{
1577
	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1578

1579 1580
	if (sched->tp_handler->runtime_event)
		return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
1581

1582
	return 0;
1583 1584
}

1585 1586 1587 1588
static int perf_sched__process_fork_event(struct perf_tool *tool,
					  union perf_event *event,
					  struct perf_sample *sample,
					  struct machine *machine)
1589
{
1590
	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1591

1592 1593 1594 1595
	/* run the fork event through the perf machineruy */
	perf_event__process_fork(tool, event, sample, machine);

	/* and then run additional processing needed for this command */
1596
	if (sched->tp_handler->fork_event)
1597
		return sched->tp_handler->fork_event(sched, event, machine);
1598

1599
	return 0;
1600 1601
}

1602
static int process_sched_migrate_task_event(struct perf_tool *tool,
1603
					    struct perf_evsel *evsel,
1604
					    struct perf_sample *sample,
1605
					    struct machine *machine)
1606
{
1607
	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1608

1609 1610
	if (sched->tp_handler->migrate_task_event)
		return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
1611

1612
	return 0;
1613 1614
}

1615
typedef int (*tracepoint_handler)(struct perf_tool *tool,
1616
				  struct perf_evsel *evsel,
1617
				  struct perf_sample *sample,
1618
				  struct machine *machine);
I
Ingo Molnar 已提交
1619

1620 1621
static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused,
						 union perf_event *event __maybe_unused,
1622 1623 1624
						 struct perf_sample *sample,
						 struct perf_evsel *evsel,
						 struct machine *machine)
I
Ingo Molnar 已提交
1625
{
1626
	int err = 0;
I
Ingo Molnar 已提交
1627

1628 1629
	if (evsel->handler != NULL) {
		tracepoint_handler f = evsel->handler;
1630
		err = f(tool, evsel, sample, machine);
1631
	}
I
Ingo Molnar 已提交
1632

1633
	return err;
I
Ingo Molnar 已提交
1634 1635
}

1636
static int perf_sched__read_events(struct perf_sched *sched)
I
Ingo Molnar 已提交
1637
{
1638 1639 1640 1641 1642 1643 1644
	const struct perf_evsel_str_handler handlers[] = {
		{ "sched:sched_switch",	      process_sched_switch_event, },
		{ "sched:sched_stat_runtime", process_sched_runtime_event, },
		{ "sched:sched_wakeup",	      process_sched_wakeup_event, },
		{ "sched:sched_wakeup_new",   process_sched_wakeup_event, },
		{ "sched:sched_migrate_task", process_sched_migrate_task_event, },
	};
1645
	struct perf_session *session;
1646 1647 1648
	struct perf_data_file file = {
		.path = input_name,
		.mode = PERF_DATA_MODE_READ,
1649
		.force = sched->force,
1650
	};
1651
	int rc = -1;
1652

1653
	session = perf_session__new(&file, false, &sched->tool);
1654 1655 1656 1657
	if (session == NULL) {
		pr_debug("No Memory for session\n");
		return -1;
	}
1658

1659
	symbol__init(&session->header.env);
1660

1661 1662
	if (perf_session__set_tracepoints_handlers(session, handlers))
		goto out_delete;
1663

1664
	if (perf_session__has_traces(session, "record -R")) {
1665
		int err = perf_session__process_events(session);
1666 1667 1668 1669
		if (err) {
			pr_err("Failed to process events, error %d", err);
			goto out_delete;
		}
1670

1671 1672 1673
		sched->nr_events      = session->evlist->stats.nr_events[0];
		sched->nr_lost_events = session->evlist->stats.total_lost;
		sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1674
	}
1675

1676
	rc = 0;
1677 1678
out_delete:
	perf_session__delete(session);
1679
	return rc;
I
Ingo Molnar 已提交
1680 1681
}

1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221
/*
 * scheduling times are printed as msec.usec
 */
static inline void print_sched_time(unsigned long long nsecs, int width)
{
	unsigned long msecs;
	unsigned long usecs;

	msecs  = nsecs / NSEC_PER_MSEC;
	nsecs -= msecs * NSEC_PER_MSEC;
	usecs  = nsecs / NSEC_PER_USEC;
	printf("%*lu.%03lu ", width, msecs, usecs);
}

/*
 * returns runtime data for event, allocating memory for it the
 * first time it is used.
 */
static struct evsel_runtime *perf_evsel__get_runtime(struct perf_evsel *evsel)
{
	struct evsel_runtime *r = evsel->priv;

	if (r == NULL) {
		r = zalloc(sizeof(struct evsel_runtime));
		evsel->priv = r;
	}

	return r;
}

/*
 * save last time event was seen per cpu
 */
static void perf_evsel__save_time(struct perf_evsel *evsel,
				  u64 timestamp, u32 cpu)
{
	struct evsel_runtime *r = perf_evsel__get_runtime(evsel);

	if (r == NULL)
		return;

	if ((cpu >= r->ncpu) || (r->last_time == NULL)) {
		int i, n = __roundup_pow_of_two(cpu+1);
		void *p = r->last_time;

		p = realloc(r->last_time, n * sizeof(u64));
		if (!p)
			return;

		r->last_time = p;
		for (i = r->ncpu; i < n; ++i)
			r->last_time[i] = (u64) 0;

		r->ncpu = n;
	}

	r->last_time[cpu] = timestamp;
}

/* returns last time this event was seen on the given cpu */
static u64 perf_evsel__get_time(struct perf_evsel *evsel, u32 cpu)
{
	struct evsel_runtime *r = perf_evsel__get_runtime(evsel);

	if ((r == NULL) || (r->last_time == NULL) || (cpu >= r->ncpu))
		return 0;

	return r->last_time[cpu];
}

static int comm_width = 20;

static char *timehist_get_commstr(struct thread *thread)
{
	static char str[32];
	const char *comm = thread__comm_str(thread);
	pid_t tid = thread->tid;
	pid_t pid = thread->pid_;
	int n;

	if (pid == 0)
		n = scnprintf(str, sizeof(str), "%s", comm);

	else if (tid != pid)
		n = scnprintf(str, sizeof(str), "%s[%d/%d]", comm, tid, pid);

	else
		n = scnprintf(str, sizeof(str), "%s[%d]", comm, tid);

	if (n > comm_width)
		comm_width = n;

	return str;
}

static void timehist_header(void)
{
	printf("%15s %6s ", "time", "cpu");

	printf(" %-20s  %9s  %9s  %9s",
		"task name", "wait time", "sch delay", "run time");

	printf("\n");

	/*
	 * units row
	 */
	printf("%15s %-6s ", "", "");

	printf(" %-20s  %9s  %9s  %9s\n", "[tid/pid]", "(msec)", "(msec)", "(msec)");

	/*
	 * separator
	 */
	printf("%.15s %.6s ", graph_dotted_line, graph_dotted_line);

	printf(" %.20s  %.9s  %.9s  %.9s",
		graph_dotted_line, graph_dotted_line, graph_dotted_line,
		graph_dotted_line);

	printf("\n");
}

static void timehist_print_sample(struct perf_sample *sample,
				  struct thread *thread)
{
	struct thread_runtime *tr = thread__priv(thread);
	char tstr[64];

	timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
	printf("%15s [%04d] ", tstr, sample->cpu);

	printf(" %-*s ", comm_width, timehist_get_commstr(thread));

	print_sched_time(tr->dt_wait, 6);
	print_sched_time(tr->dt_delay, 6);
	print_sched_time(tr->dt_run, 6);
	printf("\n");
}

/*
 * Explanation of delta-time stats:
 *
 *            t = time of current schedule out event
 *        tprev = time of previous sched out event
 *                also time of schedule-in event for current task
 *    last_time = time of last sched change event for current task
 *                (i.e, time process was last scheduled out)
 * ready_to_run = time of wakeup for current task
 *
 * -----|------------|------------|------------|------
 *    last         ready        tprev          t
 *    time         to run
 *
 *      |-------- dt_wait --------|
 *                   |- dt_delay -|-- dt_run --|
 *
 *   dt_run = run time of current task
 *  dt_wait = time between last schedule out event for task and tprev
 *            represents time spent off the cpu
 * dt_delay = time between wakeup and schedule-in of task
 */

static void timehist_update_runtime_stats(struct thread_runtime *r,
					 u64 t, u64 tprev)
{
	r->dt_delay   = 0;
	r->dt_wait    = 0;
	r->dt_run     = 0;
	if (tprev) {
		r->dt_run = t - tprev;
		if (r->ready_to_run) {
			if (r->ready_to_run > tprev)
				pr_debug("time travel: wakeup time for task > previous sched_switch event\n");
			else
				r->dt_delay = tprev - r->ready_to_run;
		}

		if (r->last_time > tprev)
			pr_debug("time travel: last sched out time for task > previous sched_switch event\n");
		else if (r->last_time)
			r->dt_wait = tprev - r->last_time;
	}

	update_stats(&r->run_stats, r->dt_run);
	r->total_run_time += r->dt_run;
}

static bool is_idle_sample(struct perf_sample *sample,
			   struct perf_evsel *evsel)
{
	/* pid 0 == swapper == idle task */
	if (sample->pid == 0)
		return true;

	if (strcmp(perf_evsel__name(evsel), "sched:sched_switch") == 0) {
		if (perf_evsel__intval(evsel, sample, "prev_pid") == 0)
			return true;
	}
	return false;
}

/*
 * Track idle stats per cpu by maintaining a local thread
 * struct for the idle task on each cpu.
 */
static int init_idle_threads(int ncpu)
{
	int i;

	idle_threads = zalloc(ncpu * sizeof(struct thread *));
	if (!idle_threads)
		return -ENOMEM;

	idle_max_cpu = ncpu - 1;

	/* allocate the actual thread struct if needed */
	for (i = 0; i < ncpu; ++i) {
		idle_threads[i] = thread__new(0, 0);
		if (idle_threads[i] == NULL)
			return -ENOMEM;

		thread__set_comm(idle_threads[i], idle_comm, 0);
	}

	return 0;
}

static void free_idle_threads(void)
{
	int i;

	if (idle_threads == NULL)
		return;

	for (i = 0; i <= idle_max_cpu; ++i) {
		if ((idle_threads[i]))
			thread__delete(idle_threads[i]);
	}

	free(idle_threads);
}

static struct thread *get_idle_thread(int cpu)
{
	/*
	 * expand/allocate array of pointers to local thread
	 * structs if needed
	 */
	if ((cpu >= idle_max_cpu) || (idle_threads == NULL)) {
		int i, j = __roundup_pow_of_two(cpu+1);
		void *p;

		p = realloc(idle_threads, j * sizeof(struct thread *));
		if (!p)
			return NULL;

		idle_threads = (struct thread **) p;
		i = idle_max_cpu ? idle_max_cpu + 1 : 0;
		for (; i < j; ++i)
			idle_threads[i] = NULL;

		idle_max_cpu = j;
	}

	/* allocate a new thread struct if needed */
	if (idle_threads[cpu] == NULL) {
		idle_threads[cpu] = thread__new(0, 0);

		if (idle_threads[cpu]) {
			idle_threads[cpu]->tid = 0;
			thread__set_comm(idle_threads[cpu], idle_comm, 0);
		}
	}

	return idle_threads[cpu];
}

/*
 * handle runtime stats saved per thread
 */
static struct thread_runtime *thread__init_runtime(struct thread *thread)
{
	struct thread_runtime *r;

	r = zalloc(sizeof(struct thread_runtime));
	if (!r)
		return NULL;

	init_stats(&r->run_stats);
	thread__set_priv(thread, r);

	return r;
}

static struct thread_runtime *thread__get_runtime(struct thread *thread)
{
	struct thread_runtime *tr;

	tr = thread__priv(thread);
	if (tr == NULL) {
		tr = thread__init_runtime(thread);
		if (tr == NULL)
			pr_debug("Failed to malloc memory for runtime data.\n");
	}

	return tr;
}

static struct thread *timehist_get_thread(struct perf_sample *sample,
					  struct machine *machine,
					  struct perf_evsel *evsel)
{
	struct thread *thread;

	if (is_idle_sample(sample, evsel)) {
		thread = get_idle_thread(sample->cpu);
		if (thread == NULL)
			pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);

	} else {
		thread = machine__findnew_thread(machine, sample->pid, sample->tid);
		if (thread == NULL) {
			pr_debug("Failed to get thread for tid %d. skipping sample.\n",
				 sample->tid);
		}
	}

	return thread;
}

static bool timehist_skip_sample(struct thread *thread)
{
	bool rc = false;

	if (thread__is_filtered(thread))
		rc = true;

	return rc;
}

static int timehist_sched_wakeup_event(struct perf_tool *tool __maybe_unused,
				       union perf_event *event __maybe_unused,
				       struct perf_evsel *evsel,
				       struct perf_sample *sample,
				       struct machine *machine)
{
	struct thread *thread;
	struct thread_runtime *tr = NULL;
	/* want pid of awakened task not pid in sample */
	const u32 pid = perf_evsel__intval(evsel, sample, "pid");

	thread = machine__findnew_thread(machine, 0, pid);
	if (thread == NULL)
		return -1;

	tr = thread__get_runtime(thread);
	if (tr == NULL)
		return -1;

	if (tr->ready_to_run == 0)
		tr->ready_to_run = sample->time;

	return 0;
}

static int timehist_sched_change_event(struct perf_tool *tool __maybe_unused,
				       union perf_event *event,
				       struct perf_evsel *evsel,
				       struct perf_sample *sample,
				       struct machine *machine)
{
	struct addr_location al;
	struct thread *thread;
	struct thread_runtime *tr = NULL;
	u64 tprev;
	int rc = 0;

	if (machine__resolve(machine, &al, sample) < 0) {
		pr_err("problem processing %d event. skipping it\n",
		       event->header.type);
		rc = -1;
		goto out;
	}

	thread = timehist_get_thread(sample, machine, evsel);
	if (thread == NULL) {
		rc = -1;
		goto out;
	}

	if (timehist_skip_sample(thread))
		goto out;

	tr = thread__get_runtime(thread);
	if (tr == NULL) {
		rc = -1;
		goto out;
	}

	tprev = perf_evsel__get_time(evsel, sample->cpu);

	timehist_update_runtime_stats(tr, sample->time, tprev);
	timehist_print_sample(sample, thread);

out:
	if (tr) {
		/* time of this sched_switch event becomes last time task seen */
		tr->last_time = sample->time;

		/* sched out event for task so reset ready to run time */
		tr->ready_to_run = 0;
	}

	perf_evsel__save_time(evsel, sample->time, sample->cpu);

	return rc;
}

static int timehist_sched_switch_event(struct perf_tool *tool,
			     union perf_event *event,
			     struct perf_evsel *evsel,
			     struct perf_sample *sample,
			     struct machine *machine __maybe_unused)
{
	return timehist_sched_change_event(tool, event, evsel, sample, machine);
}

static int process_lost(struct perf_tool *tool __maybe_unused,
			union perf_event *event,
			struct perf_sample *sample,
			struct machine *machine __maybe_unused)
{
	char tstr[64];

	timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
	printf("%15s ", tstr);
	printf("lost %" PRIu64 " events on cpu %d\n", event->lost.lost, sample->cpu);

	return 0;
}


typedef int (*sched_handler)(struct perf_tool *tool,
			  union perf_event *event,
			  struct perf_evsel *evsel,
			  struct perf_sample *sample,
			  struct machine *machine);

static int perf_timehist__process_sample(struct perf_tool *tool,
					 union perf_event *event,
					 struct perf_sample *sample,
					 struct perf_evsel *evsel,
					 struct machine *machine)
{
	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
	int err = 0;
	int this_cpu = sample->cpu;

	if (this_cpu > sched->max_cpu)
		sched->max_cpu = this_cpu;

	if (evsel->handler != NULL) {
		sched_handler f = evsel->handler;

		err = f(tool, event, evsel, sample, machine);
	}

	return err;
}

static int perf_sched__timehist(struct perf_sched *sched)
{
	const struct perf_evsel_str_handler handlers[] = {
		{ "sched:sched_switch",       timehist_sched_switch_event, },
		{ "sched:sched_wakeup",	      timehist_sched_wakeup_event, },
		{ "sched:sched_wakeup_new",   timehist_sched_wakeup_event, },
	};
	struct perf_data_file file = {
		.path = input_name,
		.mode = PERF_DATA_MODE_READ,
	};

	struct perf_session *session;
	int err = -1;

	/*
	 * event handlers for timehist option
	 */
	sched->tool.sample	 = perf_timehist__process_sample;
	sched->tool.mmap	 = perf_event__process_mmap;
	sched->tool.comm	 = perf_event__process_comm;
	sched->tool.exit	 = perf_event__process_exit;
	sched->tool.fork	 = perf_event__process_fork;
	sched->tool.lost	 = process_lost;
	sched->tool.attr	 = perf_event__process_attr;
	sched->tool.tracing_data = perf_event__process_tracing_data;
	sched->tool.build_id	 = perf_event__process_build_id;

	sched->tool.ordered_events = true;
	sched->tool.ordering_requires_timestamps = true;

	session = perf_session__new(&file, false, &sched->tool);
	if (session == NULL)
		return -ENOMEM;

	symbol__init(&session->header.env);

	setup_pager();

	/* setup per-evsel handlers */
	if (perf_session__set_tracepoints_handlers(session, handlers))
		goto out;

	if (!perf_session__has_traces(session, "record -R"))
		goto out;

	/* pre-allocate struct for per-CPU idle stats */
	sched->max_cpu = session->header.env.nr_cpus_online;
	if (sched->max_cpu == 0)
		sched->max_cpu = 4;
	if (init_idle_threads(sched->max_cpu))
		goto out;

	timehist_header();

	err = perf_session__process_events(session);
	if (err) {
		pr_err("Failed to process events, error %d", err);
		goto out;
	}

out:
	free_idle_threads();
	perf_session__delete(session);

	return err;
}


2222
static void print_bad_events(struct perf_sched *sched)
2223
{
2224
	if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
2225
		printf("  INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
2226 2227
			(double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
			sched->nr_unordered_timestamps, sched->nr_timestamps);
2228
	}
2229
	if (sched->nr_lost_events && sched->nr_events) {
2230
		printf("  INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
2231 2232
			(double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
			sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
2233
	}
2234
	if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
2235
		printf("  INFO: %.3f%% context switch bugs (%ld out of %ld)",
2236 2237 2238
			(double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
			sched->nr_context_switch_bugs, sched->nr_timestamps);
		if (sched->nr_lost_events)
2239 2240 2241 2242 2243
			printf(" (due to lost events?)");
		printf("\n");
	}
}

2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296
static void __merge_work_atoms(struct rb_root *root, struct work_atoms *data)
{
	struct rb_node **new = &(root->rb_node), *parent = NULL;
	struct work_atoms *this;
	const char *comm = thread__comm_str(data->thread), *this_comm;

	while (*new) {
		int cmp;

		this = container_of(*new, struct work_atoms, node);
		parent = *new;

		this_comm = thread__comm_str(this->thread);
		cmp = strcmp(comm, this_comm);
		if (cmp > 0) {
			new = &((*new)->rb_left);
		} else if (cmp < 0) {
			new = &((*new)->rb_right);
		} else {
			this->num_merged++;
			this->total_runtime += data->total_runtime;
			this->nb_atoms += data->nb_atoms;
			this->total_lat += data->total_lat;
			list_splice(&data->work_list, &this->work_list);
			if (this->max_lat < data->max_lat) {
				this->max_lat = data->max_lat;
				this->max_lat_at = data->max_lat_at;
			}
			zfree(&data);
			return;
		}
	}

	data->num_merged++;
	rb_link_node(&data->node, parent, new);
	rb_insert_color(&data->node, root);
}

static void perf_sched__merge_lat(struct perf_sched *sched)
{
	struct work_atoms *data;
	struct rb_node *node;

	if (sched->skip_merge)
		return;

	while ((node = rb_first(&sched->atom_root))) {
		rb_erase(node, &sched->atom_root);
		data = rb_entry(node, struct work_atoms, node);
		__merge_work_atoms(&sched->merged_atom_root, data);
	}
}

2297
static int perf_sched__lat(struct perf_sched *sched)
2298 2299 2300 2301
{
	struct rb_node *next;

	setup_pager();
2302

2303
	if (perf_sched__read_events(sched))
2304
		return -1;
2305

2306
	perf_sched__merge_lat(sched);
2307
	perf_sched__sort_lat(sched);
2308

2309 2310 2311
	printf("\n -----------------------------------------------------------------------------------------------------------------\n");
	printf("  Task                  |   Runtime ms  | Switches | Average delay ms | Maximum delay ms | Maximum delay at       |\n");
	printf(" -----------------------------------------------------------------------------------------------------------------\n");
2312

2313
	next = rb_first(&sched->sorted_atom_root);
2314 2315 2316 2317 2318

	while (next) {
		struct work_atoms *work_list;

		work_list = rb_entry(next, struct work_atoms, node);
2319
		output_lat_thread(sched, work_list);
2320
		next = rb_next(next);
2321
		thread__zput(work_list->thread);
2322 2323
	}

2324
	printf(" -----------------------------------------------------------------------------------------------------------------\n");
2325
	printf("  TOTAL:                |%11.3f ms |%9" PRIu64 " |\n",
2326
		(double)sched->all_runtime / NSEC_PER_MSEC, sched->all_count);
2327 2328 2329

	printf(" ---------------------------------------------------\n");

2330
	print_bad_events(sched);
2331 2332
	printf("\n");

2333
	return 0;
2334 2335
}

2336 2337
static int setup_map_cpus(struct perf_sched *sched)
{
2338 2339
	struct cpu_map *map;

2340 2341 2342 2343
	sched->max_cpu  = sysconf(_SC_NPROCESSORS_CONF);

	if (sched->map.comp) {
		sched->map.comp_cpus = zalloc(sched->max_cpu * sizeof(int));
J
Jiri Olsa 已提交
2344 2345
		if (!sched->map.comp_cpus)
			return -1;
2346 2347
	}

2348 2349 2350 2351 2352 2353 2354 2355 2356 2357
	if (!sched->map.cpus_str)
		return 0;

	map = cpu_map__new(sched->map.cpus_str);
	if (!map) {
		pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
		return -1;
	}

	sched->map.cpus = map;
2358 2359 2360
	return 0;
}

J
Jiri Olsa 已提交
2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377
static int setup_color_pids(struct perf_sched *sched)
{
	struct thread_map *map;

	if (!sched->map.color_pids_str)
		return 0;

	map = thread_map__new_by_tid_str(sched->map.color_pids_str);
	if (!map) {
		pr_err("failed to get thread map from %s\n", sched->map.color_pids_str);
		return -1;
	}

	sched->map.color_pids = map;
	return 0;
}

J
Jiri Olsa 已提交
2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394
static int setup_color_cpus(struct perf_sched *sched)
{
	struct cpu_map *map;

	if (!sched->map.color_cpus_str)
		return 0;

	map = cpu_map__new(sched->map.color_cpus_str);
	if (!map) {
		pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str);
		return -1;
	}

	sched->map.color_cpus = map;
	return 0;
}

2395
static int perf_sched__map(struct perf_sched *sched)
2396
{
2397 2398
	if (setup_map_cpus(sched))
		return -1;
2399

J
Jiri Olsa 已提交
2400 2401 2402
	if (setup_color_pids(sched))
		return -1;

J
Jiri Olsa 已提交
2403 2404 2405
	if (setup_color_cpus(sched))
		return -1;

2406
	setup_pager();
2407
	if (perf_sched__read_events(sched))
2408
		return -1;
2409
	print_bad_events(sched);
2410
	return 0;
2411 2412
}

2413
static int perf_sched__replay(struct perf_sched *sched)
2414 2415 2416
{
	unsigned long i;

2417 2418
	calibrate_run_measurement_overhead(sched);
	calibrate_sleep_measurement_overhead(sched);
2419

2420
	test_calibrations(sched);
2421

2422
	if (perf_sched__read_events(sched))
2423
		return -1;
2424

2425 2426 2427
	printf("nr_run_events:        %ld\n", sched->nr_run_events);
	printf("nr_sleep_events:      %ld\n", sched->nr_sleep_events);
	printf("nr_wakeup_events:     %ld\n", sched->nr_wakeup_events);
2428

2429 2430 2431 2432 2433
	if (sched->targetless_wakeups)
		printf("target-less wakeups:  %ld\n", sched->targetless_wakeups);
	if (sched->multitarget_wakeups)
		printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
	if (sched->nr_run_events_optimized)
2434
		printf("run atoms optimized: %ld\n",
2435
			sched->nr_run_events_optimized);
2436

2437 2438
	print_task_traces(sched);
	add_cross_task_wakeups(sched);
2439

2440
	create_tasks(sched);
2441
	printf("------------------------------------------------------------\n");
2442 2443
	for (i = 0; i < sched->replay_repeat; i++)
		run_one_test(sched);
2444 2445

	return 0;
2446 2447
}

2448 2449
static void setup_sorting(struct perf_sched *sched, const struct option *options,
			  const char * const usage_msg[])
2450
{
2451
	char *tmp, *tok, *str = strdup(sched->sort_order);
2452 2453 2454

	for (tok = strtok_r(str, ", ", &tmp);
			tok; tok = strtok_r(NULL, ", ", &tmp)) {
2455
		if (sort_dimension__add(tok, &sched->sort_list) < 0) {
2456 2457
			usage_with_options_msg(usage_msg, options,
					"Unknown --sort key: `%s'", tok);
2458 2459 2460 2461 2462
		}
	}

	free(str);

2463
	sort_dimension__add("pid", &sched->cmp_pid);
2464 2465
}

2466 2467 2468 2469
static int __cmd_record(int argc, const char **argv)
{
	unsigned int rec_argc, i, j;
	const char **rec_argv;
2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482
	const char * const record_args[] = {
		"record",
		"-a",
		"-R",
		"-m", "1024",
		"-c", "1",
		"-e", "sched:sched_switch",
		"-e", "sched:sched_stat_wait",
		"-e", "sched:sched_stat_sleep",
		"-e", "sched:sched_stat_iowait",
		"-e", "sched:sched_stat_runtime",
		"-e", "sched:sched_process_fork",
		"-e", "sched:sched_wakeup",
2483
		"-e", "sched:sched_wakeup_new",
2484 2485
		"-e", "sched:sched_migrate_task",
	};
2486 2487 2488 2489

	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
	rec_argv = calloc(rec_argc + 1, sizeof(char *));

2490
	if (rec_argv == NULL)
2491 2492
		return -ENOMEM;

2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503
	for (i = 0; i < ARRAY_SIZE(record_args); i++)
		rec_argv[i] = strdup(record_args[i]);

	for (j = 1; j < (unsigned int)argc; j++, i++)
		rec_argv[i] = argv[j];

	BUG_ON(i != rec_argc);

	return cmd_record(i, rec_argv, NULL);
}

2504
int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
I
Ingo Molnar 已提交
2505
{
2506 2507 2508 2509 2510 2511 2512
	const char default_sort_order[] = "avg, max, switch, runtime";
	struct perf_sched sched = {
		.tool = {
			.sample		 = perf_sched__process_tracepoint_sample,
			.comm		 = perf_event__process_comm,
			.lost		 = perf_event__process_lost,
			.fork		 = perf_sched__process_fork_event,
2513
			.ordered_events = true,
2514 2515 2516 2517 2518 2519 2520 2521 2522 2523
		},
		.cmp_pid	      = LIST_HEAD_INIT(sched.cmp_pid),
		.sort_list	      = LIST_HEAD_INIT(sched.sort_list),
		.start_work_mutex     = PTHREAD_MUTEX_INITIALIZER,
		.work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER,
		.sort_order	      = default_sort_order,
		.replay_repeat	      = 10,
		.profile_cpu	      = -1,
		.next_shortname1      = 'A',
		.next_shortname2      = '0',
2524
		.skip_merge           = 0,
2525
	};
2526 2527 2528 2529 2530 2531 2532 2533 2534
	const struct option sched_options[] = {
	OPT_STRING('i', "input", &input_name, "file",
		    "input file name"),
	OPT_INCR('v', "verbose", &verbose,
		    "be more verbose (show symbol address, etc)"),
	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
		    "dump raw trace in ASCII"),
	OPT_END()
	};
2535 2536 2537 2538 2539 2540 2541
	const struct option latency_options[] = {
	OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
		   "sort by key(s): runtime, switch, avg, max"),
	OPT_INTEGER('C', "CPU", &sched.profile_cpu,
		    "CPU to profile on"),
	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
		    "dump raw trace in ASCII"),
2542 2543
	OPT_BOOLEAN('p', "pids", &sched.skip_merge,
		    "latency stats per pid instead of per comm"),
2544
	OPT_PARENT(sched_options)
2545 2546 2547 2548 2549 2550
	};
	const struct option replay_options[] = {
	OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
		     "repeat the workload replay N times (-1: infinite)"),
	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
		    "dump raw trace in ASCII"),
2551
	OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"),
2552
	OPT_PARENT(sched_options)
2553
	};
2554 2555 2556
	const struct option map_options[] = {
	OPT_BOOLEAN(0, "compact", &sched.map.comp,
		    "map output in compact mode"),
J
Jiri Olsa 已提交
2557 2558
	OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids",
		   "highlight given pids in map"),
J
Jiri Olsa 已提交
2559 2560
	OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus",
                    "highlight given CPUs in map"),
2561 2562
	OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus",
                    "display given CPUs in map"),
2563
	OPT_PARENT(sched_options)
2564
	};
2565 2566 2567 2568 2569 2570 2571 2572 2573 2574
	const struct option timehist_options[] = {
	OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
		   "file", "vmlinux pathname"),
	OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
		   "file", "kallsyms pathname"),
	OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
		    "Look for files with symbols relative to this directory"),
	OPT_PARENT(sched_options)
	};

2575 2576 2577 2578 2579 2580 2581 2582
	const char * const latency_usage[] = {
		"perf sched latency [<options>]",
		NULL
	};
	const char * const replay_usage[] = {
		"perf sched replay [<options>]",
		NULL
	};
2583 2584 2585 2586
	const char * const map_usage[] = {
		"perf sched map [<options>]",
		NULL
	};
2587 2588 2589 2590
	const char * const timehist_usage[] = {
		"perf sched timehist [<options>]",
		NULL
	};
2591
	const char *const sched_subcommands[] = { "record", "latency", "map",
2592 2593
						  "replay", "script",
						  "timehist", NULL };
2594 2595
	const char *sched_usage[] = {
		NULL,
2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611
		NULL
	};
	struct trace_sched_handler lat_ops  = {
		.wakeup_event	    = latency_wakeup_event,
		.switch_event	    = latency_switch_event,
		.runtime_event	    = latency_runtime_event,
		.migrate_task_event = latency_migrate_task_event,
	};
	struct trace_sched_handler map_ops  = {
		.switch_event	    = map_switch_event,
	};
	struct trace_sched_handler replay_ops  = {
		.wakeup_event	    = replay_wakeup_event,
		.switch_event	    = replay_switch_event,
		.fork_event	    = replay_fork_event,
	};
A
Adrian Hunter 已提交
2612 2613 2614 2615
	unsigned int i;

	for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++)
		sched.curr_pid[i] = -1;
2616

2617 2618
	argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands,
					sched_usage, PARSE_OPT_STOP_AT_NON_OPTION);
2619 2620
	if (!argc)
		usage_with_options(sched_usage, sched_options);
I
Ingo Molnar 已提交
2621

2622
	/*
2623
	 * Aliased to 'perf script' for now:
2624
	 */
2625 2626
	if (!strcmp(argv[0], "script"))
		return cmd_script(argc, argv, prefix);
2627

2628 2629 2630
	if (!strncmp(argv[0], "rec", 3)) {
		return __cmd_record(argc, argv);
	} else if (!strncmp(argv[0], "lat", 3)) {
2631
		sched.tp_handler = &lat_ops;
2632 2633 2634 2635 2636
		if (argc > 1) {
			argc = parse_options(argc, argv, latency_options, latency_usage, 0);
			if (argc)
				usage_with_options(latency_usage, latency_options);
		}
2637 2638
		setup_sorting(&sched, latency_options, latency_usage);
		return perf_sched__lat(&sched);
2639
	} else if (!strcmp(argv[0], "map")) {
2640
		if (argc) {
J
Jiri Olsa 已提交
2641
			argc = parse_options(argc, argv, map_options, map_usage, 0);
2642 2643 2644
			if (argc)
				usage_with_options(map_usage, map_options);
		}
2645 2646 2647
		sched.tp_handler = &map_ops;
		setup_sorting(&sched, latency_options, latency_usage);
		return perf_sched__map(&sched);
2648
	} else if (!strncmp(argv[0], "rep", 3)) {
2649
		sched.tp_handler = &replay_ops;
2650 2651 2652 2653 2654
		if (argc) {
			argc = parse_options(argc, argv, replay_options, replay_usage, 0);
			if (argc)
				usage_with_options(replay_usage, replay_options);
		}
2655
		return perf_sched__replay(&sched);
2656 2657 2658 2659 2660 2661 2662 2663
	} else if (!strcmp(argv[0], "timehist")) {
		if (argc) {
			argc = parse_options(argc, argv, timehist_options,
					     timehist_usage, 0);
			if (argc)
				usage_with_options(timehist_usage, timehist_options);
		}
		return perf_sched__timehist(&sched);
2664 2665 2666 2667
	} else {
		usage_with_options(sched_usage, sched_options);
	}

I
Ingo Molnar 已提交
2668
	return 0;
I
Ingo Molnar 已提交
2669
}