session.c 36.7 KB
Newer Older
1 2
#define _FILE_OFFSET_BITS 64

3 4
#include <linux/kernel.h>

5
#include <byteswap.h>
6 7
#include <unistd.h>
#include <sys/types.h>
8
#include <sys/mman.h>
9

10 11
#include "evlist.h"
#include "evsel.h"
12
#include "session.h"
13
#include "tool.h"
14
#include "sort.h"
15
#include "util.h"
16
#include "cpumap.h"
17 18 19 20 21

static int perf_session__open(struct perf_session *self, bool force)
{
	struct stat input_stat;

22 23 24 25
	if (!strcmp(self->filename, "-")) {
		self->fd_pipe = true;
		self->fd = STDIN_FILENO;

26
		if (perf_session__read_header(self, self->fd) < 0)
27 28 29 30 31
			pr_err("incompatible file format");

		return 0;
	}

32
	self->fd = open(self->filename, O_RDONLY);
33
	if (self->fd < 0) {
34 35 36 37
		int err = errno;

		pr_err("failed to open %s: %s", self->filename, strerror(err));
		if (err == ENOENT && !strcmp(self->filename, "perf.data"))
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
			pr_err("  (try 'perf record' first)");
		pr_err("\n");
		return -errno;
	}

	if (fstat(self->fd, &input_stat) < 0)
		goto out_close;

	if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
		pr_err("file %s not owned by current user or root\n",
		       self->filename);
		goto out_close;
	}

	if (!input_stat.st_size) {
		pr_info("zero-sized file (%s), nothing to do!\n",
			self->filename);
		goto out_close;
	}

58
	if (perf_session__read_header(self, self->fd) < 0) {
59 60 61 62
		pr_err("incompatible file format");
		goto out_close;
	}

63 64 65 66 67 68 69 70 71 72
	if (!perf_evlist__valid_sample_type(self->evlist)) {
		pr_err("non matching sample_type");
		goto out_close;
	}

	if (!perf_evlist__valid_sample_id_all(self->evlist)) {
		pr_err("non matching sample_id_all");
		goto out_close;
	}

73 74 75 76 77 78 79 80 81
	self->size = input_stat.st_size;
	return 0;

out_close:
	close(self->fd);
	self->fd = -1;
	return -1;
}

82 83
void perf_session__update_sample_type(struct perf_session *self)
{
84
	self->sample_type = perf_evlist__sample_type(self->evlist);
85
	self->sample_size = __perf_evsel__sample_size(self->sample_type);
86
	self->sample_id_all = perf_evlist__sample_id_all(self->evlist);
87
	self->id_hdr_size = perf_evlist__id_hdr_size(self->evlist);
88
	self->host_machine.id_hdr_size = self->id_hdr_size;
89 90
}

91 92
int perf_session__create_kernel_maps(struct perf_session *self)
{
93
	int ret = machine__create_kernel_maps(&self->host_machine);
94 95

	if (ret >= 0)
96
		ret = machines__create_guest_kernel_maps(&self->machines);
97 98 99
	return ret;
}

100 101 102 103 104 105
static void perf_session__destroy_kernel_maps(struct perf_session *self)
{
	machine__destroy_kernel_maps(&self->host_machine);
	machines__destroy_guest_kernel_maps(&self->machines);
}

106 107
struct perf_session *perf_session__new(const char *filename, int mode,
				       bool force, bool repipe,
108
				       struct perf_tool *tool)
109
{
110 111 112 113 114 115 116 117 118 119 120 121 122
	struct perf_session *self;
	struct stat st;
	size_t len;

	if (!filename || !strlen(filename)) {
		if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
			filename = "-";
		else
			filename = "perf.data";
	}

	len = strlen(filename);
	self = zalloc(sizeof(*self) + len);
123 124 125 126 127

	if (self == NULL)
		goto out;

	memcpy(self->filename, filename, len);
128 129 130 131 132 133 134 135 136
	/*
	 * On 64bit we can mmap the data file in one go. No need for tiny mmap
	 * slices. On 32bit we use 32MB.
	 */
#if BITS_PER_LONG == 64
	self->mmap_window = ULLONG_MAX;
#else
	self->mmap_window = 32 * 1024 * 1024ULL;
#endif
137
	self->machines = RB_ROOT;
T
Tom Zanussi 已提交
138
	self->repipe = repipe;
139
	INIT_LIST_HEAD(&self->ordered_samples.samples);
140
	INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
141
	INIT_LIST_HEAD(&self->ordered_samples.to_free);
142
	machine__init(&self->host_machine, "", HOST_KERNEL_ID);
143

144 145 146
	if (mode == O_RDONLY) {
		if (perf_session__open(self, force) < 0)
			goto out_delete;
147
		perf_session__update_sample_type(self);
148 149 150
	} else if (mode == O_WRONLY) {
		/*
		 * In O_RDONLY mode this will be performed when reading the
151
		 * kernel MMAP event, in perf_event__process_mmap().
152 153 154 155
		 */
		if (perf_session__create_kernel_maps(self) < 0)
			goto out_delete;
	}
156

157 158
	if (tool && tool->ordering_requires_timestamps &&
	    tool->ordered_samples && !self->sample_id_all) {
159
		dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
160
		tool->ordered_samples = false;
161 162
	}

163 164
out:
	return self;
165 166 167
out_delete:
	perf_session__delete(self);
	return NULL;
168 169
}

170
static void machine__delete_dead_threads(struct machine *machine)
171 172 173
{
	struct thread *n, *t;

174
	list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
175 176 177 178 179
		list_del(&t->node);
		thread__delete(t);
	}
}

180 181 182 183 184 185
static void perf_session__delete_dead_threads(struct perf_session *session)
{
	machine__delete_dead_threads(&session->host_machine);
}

static void machine__delete_threads(struct machine *self)
186 187 188 189 190 191 192 193 194 195 196 197
{
	struct rb_node *nd = rb_first(&self->threads);

	while (nd) {
		struct thread *t = rb_entry(nd, struct thread, rb_node);

		rb_erase(&t->rb_node, &self->threads);
		nd = rb_next(nd);
		thread__delete(t);
	}
}

198 199 200 201 202
static void perf_session__delete_threads(struct perf_session *session)
{
	machine__delete_threads(&session->host_machine);
}

203 204
void perf_session__delete(struct perf_session *self)
{
205
	perf_session__destroy_kernel_maps(self);
206 207 208
	perf_session__delete_dead_threads(self);
	perf_session__delete_threads(self);
	machine__exit(&self->host_machine);
209 210 211
	close(self->fd);
	free(self);
}
212

213
void machine__remove_thread(struct machine *self, struct thread *th)
214
{
215
	self->last_match = NULL;
216 217 218 219 220 221 222 223
	rb_erase(&th->rb_node, &self->threads);
	/*
	 * We may have references to this thread, for instance in some hist_entry
	 * instances, so just move them to a separate list.
	 */
	list_add_tail(&th->node, &self->dead_threads);
}

224 225 226 227 228 229 230 231
static bool symbol__match_parent_regex(struct symbol *sym)
{
	if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
		return 1;

	return 0;
}

232 233 234 235
int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel,
			       struct thread *thread,
			       struct ip_callchain *chain,
			       struct symbol **parent)
236 237 238
{
	u8 cpumode = PERF_RECORD_MISC_USER;
	unsigned int i;
239
	int err;
240

241
	callchain_cursor_reset(&evsel->hists.callchain_cursor);
242 243

	for (i = 0; i < chain->nr; i++) {
244
		u64 ip;
245 246
		struct addr_location al;

247 248 249 250 251
		if (callchain_param.order == ORDER_CALLEE)
			ip = chain->ips[i];
		else
			ip = chain->ips[chain->nr - i - 1];

252 253 254 255 256 257 258 259 260 261 262 263 264 265
		if (ip >= PERF_CONTEXT_MAX) {
			switch (ip) {
			case PERF_CONTEXT_HV:
				cpumode = PERF_RECORD_MISC_HYPERVISOR;	break;
			case PERF_CONTEXT_KERNEL:
				cpumode = PERF_RECORD_MISC_KERNEL;	break;
			case PERF_CONTEXT_USER:
				cpumode = PERF_RECORD_MISC_USER;	break;
			default:
				break;
			}
			continue;
		}

266
		al.filtered = false;
267
		thread__find_addr_location(thread, self, cpumode,
268
					   MAP__FUNCTION, ip, &al, NULL);
269 270 271 272
		if (al.sym != NULL) {
			if (sort__has_parent && !*parent &&
			    symbol__match_parent_regex(al.sym))
				*parent = al.sym;
273
			if (!symbol_conf.use_callchain)
274 275
				break;
		}
276

277
		err = callchain_cursor_append(&evsel->hists.callchain_cursor,
278 279 280
					      ip, al.map, al.sym);
		if (err)
			return err;
281 282
	}

283
	return 0;
284
}
285

286 287 288 289 290 291 292
static int process_event_synth_tracing_data_stub(union perf_event *event __used,
						 struct perf_session *session __used)
{
	dump_printf(": unhandled!\n");
	return 0;
}

293 294 295 296 297 298 299
static int process_event_synth_attr_stub(union perf_event *event __used,
					 struct perf_evlist **pevlist __used)
{
	dump_printf(": unhandled!\n");
	return 0;
}

300
static int process_event_sample_stub(struct perf_tool *tool __used,
301
				     union perf_event *event __used,
302 303
				     struct perf_sample *sample __used,
				     struct perf_evsel *evsel __used,
304
				     struct machine *machine __used)
305 306 307 308 309
{
	dump_printf(": unhandled!\n");
	return 0;
}

310
static int process_event_stub(struct perf_tool *tool __used,
311
			      union perf_event *event __used,
312
			      struct perf_sample *sample __used,
313
			      struct machine *machine __used)
314 315 316 317 318
{
	dump_printf(": unhandled!\n");
	return 0;
}

319
static int process_finished_round_stub(struct perf_tool *tool __used,
320
				       union perf_event *event __used,
321 322 323 324 325 326
				       struct perf_session *perf_session __used)
{
	dump_printf(": unhandled!\n");
	return 0;
}

327
static int process_event_type_stub(struct perf_tool *tool __used,
328
				   union perf_event *event __used)
329 330 331 332 333
{
	dump_printf(": unhandled!\n");
	return 0;
}

334
static int process_finished_round(struct perf_tool *tool,
335 336
				  union perf_event *event,
				  struct perf_session *session);
337

338
static void perf_tool__fill_defaults(struct perf_tool *tool)
339
{
340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
	if (tool->sample == NULL)
		tool->sample = process_event_sample_stub;
	if (tool->mmap == NULL)
		tool->mmap = process_event_stub;
	if (tool->comm == NULL)
		tool->comm = process_event_stub;
	if (tool->fork == NULL)
		tool->fork = process_event_stub;
	if (tool->exit == NULL)
		tool->exit = process_event_stub;
	if (tool->lost == NULL)
		tool->lost = perf_event__process_lost;
	if (tool->read == NULL)
		tool->read = process_event_sample_stub;
	if (tool->throttle == NULL)
		tool->throttle = process_event_stub;
	if (tool->unthrottle == NULL)
		tool->unthrottle = process_event_stub;
	if (tool->attr == NULL)
		tool->attr = process_event_synth_attr_stub;
	if (tool->event_type == NULL)
		tool->event_type = process_event_type_stub;
	if (tool->tracing_data == NULL)
		tool->tracing_data = process_event_synth_tracing_data_stub;
	if (tool->build_id == NULL)
		tool->build_id = process_finished_round_stub;
	if (tool->finished_round == NULL) {
		if (tool->ordered_samples)
			tool->finished_round = process_finished_round;
369
		else
370
			tool->finished_round = process_finished_round_stub;
371
	}
372 373
}

374 375 376 377 378 379 380 381 382 383 384
void mem_bswap_64(void *src, int byte_size)
{
	u64 *m = src;

	while (byte_size > 0) {
		*m = bswap_64(*m);
		byte_size -= sizeof(u64);
		++m;
	}
}

385
static void perf_event__all64_swap(union perf_event *event)
386
{
387 388
	struct perf_event_header *hdr = &event->header;
	mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
389 390
}

391
static void perf_event__comm_swap(union perf_event *event)
392
{
393 394
	event->comm.pid = bswap_32(event->comm.pid);
	event->comm.tid = bswap_32(event->comm.tid);
395 396
}

397
static void perf_event__mmap_swap(union perf_event *event)
398
{
399 400 401 402 403
	event->mmap.pid	  = bswap_32(event->mmap.pid);
	event->mmap.tid	  = bswap_32(event->mmap.tid);
	event->mmap.start = bswap_64(event->mmap.start);
	event->mmap.len	  = bswap_64(event->mmap.len);
	event->mmap.pgoff = bswap_64(event->mmap.pgoff);
404 405
}

406
static void perf_event__task_swap(union perf_event *event)
407
{
408 409 410 411 412
	event->fork.pid	 = bswap_32(event->fork.pid);
	event->fork.tid	 = bswap_32(event->fork.tid);
	event->fork.ppid = bswap_32(event->fork.ppid);
	event->fork.ptid = bswap_32(event->fork.ptid);
	event->fork.time = bswap_64(event->fork.time);
413 414
}

415
static void perf_event__read_swap(union perf_event *event)
416
{
417 418 419 420 421 422
	event->read.pid		 = bswap_32(event->read.pid);
	event->read.tid		 = bswap_32(event->read.tid);
	event->read.value	 = bswap_64(event->read.value);
	event->read.time_enabled = bswap_64(event->read.time_enabled);
	event->read.time_running = bswap_64(event->read.time_running);
	event->read.id		 = bswap_64(event->read.id);
423 424
}

425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
/* exported for swapping attributes in file header */
void perf_event__attr_swap(struct perf_event_attr *attr)
{
	attr->type		= bswap_32(attr->type);
	attr->size		= bswap_32(attr->size);
	attr->config		= bswap_64(attr->config);
	attr->sample_period	= bswap_64(attr->sample_period);
	attr->sample_type	= bswap_64(attr->sample_type);
	attr->read_format	= bswap_64(attr->read_format);
	attr->wakeup_events	= bswap_32(attr->wakeup_events);
	attr->bp_type		= bswap_32(attr->bp_type);
	attr->bp_addr		= bswap_64(attr->bp_addr);
	attr->bp_len		= bswap_64(attr->bp_len);
}

static void perf_event__hdr_attr_swap(union perf_event *event)
441 442 443
{
	size_t size;

444
	perf_event__attr_swap(&event->attr.attr);
445

446 447 448
	size = event->header.size;
	size -= (void *)&event->attr.id - (void *)event;
	mem_bswap_64(event->attr.id, size);
449 450
}

451
static void perf_event__event_type_swap(union perf_event *event)
452
{
453 454
	event->event_type.event_type.event_id =
		bswap_64(event->event_type.event_type.event_id);
455 456
}

457
static void perf_event__tracing_data_swap(union perf_event *event)
458
{
459
	event->tracing_data.size = bswap_32(event->tracing_data.size);
460 461
}

462
typedef void (*perf_event__swap_op)(union perf_event *event);
463

464 465 466 467 468 469 470 471
static perf_event__swap_op perf_event__swap_ops[] = {
	[PERF_RECORD_MMAP]		  = perf_event__mmap_swap,
	[PERF_RECORD_COMM]		  = perf_event__comm_swap,
	[PERF_RECORD_FORK]		  = perf_event__task_swap,
	[PERF_RECORD_EXIT]		  = perf_event__task_swap,
	[PERF_RECORD_LOST]		  = perf_event__all64_swap,
	[PERF_RECORD_READ]		  = perf_event__read_swap,
	[PERF_RECORD_SAMPLE]		  = perf_event__all64_swap,
472
	[PERF_RECORD_HEADER_ATTR]	  = perf_event__hdr_attr_swap,
473 474 475 476
	[PERF_RECORD_HEADER_EVENT_TYPE]	  = perf_event__event_type_swap,
	[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
	[PERF_RECORD_HEADER_BUILD_ID]	  = NULL,
	[PERF_RECORD_HEADER_MAX]	  = NULL,
477 478
};

479 480
struct sample_queue {
	u64			timestamp;
481
	u64			file_offset;
482
	union perf_event	*event;
483 484 485
	struct list_head	list;
};

486 487 488 489
static void perf_session_free_sample_buffers(struct perf_session *session)
{
	struct ordered_samples *os = &session->ordered_samples;

490
	while (!list_empty(&os->to_free)) {
491 492
		struct sample_queue *sq;

493
		sq = list_entry(os->to_free.next, struct sample_queue, list);
494 495 496 497 498
		list_del(&sq->list);
		free(sq);
	}
}

499
static int perf_session_deliver_event(struct perf_session *session,
500
				      union perf_event *event,
501
				      struct perf_sample *sample,
502
				      struct perf_tool *tool,
503
				      u64 file_offset);
504

505
static void flush_sample_queue(struct perf_session *s,
506
			       struct perf_tool *tool)
507
{
508 509
	struct ordered_samples *os = &s->ordered_samples;
	struct list_head *head = &os->samples;
510
	struct sample_queue *tmp, *iter;
511
	struct perf_sample sample;
512 513
	u64 limit = os->next_flush;
	u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
514
	unsigned idx = 0, progress_next = os->nr_samples / 16;
515
	int ret;
516

517
	if (!tool->ordered_samples || !limit)
518 519 520 521
		return;

	list_for_each_entry_safe(iter, tmp, head, list) {
		if (iter->timestamp > limit)
522
			break;
523

524 525 526 527
		ret = perf_session__parse_sample(s, iter->event, &sample);
		if (ret)
			pr_err("Can't parse sample, err = %d\n", ret);
		else
528
			perf_session_deliver_event(s, iter->event, &sample, tool,
529
						   iter->file_offset);
530

531
		os->last_flush = iter->timestamp;
532
		list_del(&iter->list);
533
		list_add(&iter->list, &os->sample_cache);
534 535 536 537 538
		if (++idx >= progress_next) {
			progress_next += os->nr_samples / 16;
			ui_progress__update(idx, os->nr_samples,
					    "Processing time ordered events...");
		}
539
	}
540 541 542 543 544 545 546

	if (list_empty(head)) {
		os->last_sample = NULL;
	} else if (last_ts <= limit) {
		os->last_sample =
			list_entry(head->prev, struct sample_queue, list);
	}
547 548

	os->nr_samples = 0;
549 550
}

551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
/*
 * When perf record finishes a pass on every buffers, it records this pseudo
 * event.
 * We record the max timestamp t found in the pass n.
 * Assuming these timestamps are monotonic across cpus, we know that if
 * a buffer still has events with timestamps below t, they will be all
 * available and then read in the pass n + 1.
 * Hence when we start to read the pass n + 2, we can safely flush every
 * events with timestamps below t.
 *
 *    ============ PASS n =================
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          1          |         2
 *          2          |         3
 *          -          |         4  <--- max recorded
 *
 *    ============ PASS n + 1 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          3          |         5
 *          4          |         6
 *          5          |         7 <---- max recorded
 *
 *      Flush every events below timestamp 4
 *
 *    ============ PASS n + 2 ==============
 *       CPU 0         |   CPU 1
 *                     |
 *    cnt1 timestamps  |   cnt2 timestamps
 *          6          |         8
 *          7          |         9
 *          -          |         10
 *
 *      Flush every events below timestamp 7
 *      etc...
 */
590
static int process_finished_round(struct perf_tool *tool,
591 592
				  union perf_event *event __used,
				  struct perf_session *session)
593
{
594
	flush_sample_queue(session, tool);
595 596 597 598 599
	session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;

	return 0;
}

600
/* The queue is ordered by time */
601
static void __queue_event(struct sample_queue *new, struct perf_session *s)
602
{
603 604 605 606
	struct ordered_samples *os = &s->ordered_samples;
	struct sample_queue *sample = os->last_sample;
	u64 timestamp = new->timestamp;
	struct list_head *p;
607

608
	++os->nr_samples;
609
	os->last_sample = new;
610

611 612 613
	if (!sample) {
		list_add(&new->list, &os->samples);
		os->max_timestamp = timestamp;
614 615 616 617
		return;
	}

	/*
618 619 620
	 * last_sample might point to some random place in the list as it's
	 * the last queued event. We expect that the new event is close to
	 * this.
621
	 */
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
	if (sample->timestamp <= timestamp) {
		while (sample->timestamp <= timestamp) {
			p = sample->list.next;
			if (p == &os->samples) {
				list_add_tail(&new->list, &os->samples);
				os->max_timestamp = timestamp;
				return;
			}
			sample = list_entry(p, struct sample_queue, list);
		}
		list_add_tail(&new->list, &sample->list);
	} else {
		while (sample->timestamp > timestamp) {
			p = sample->list.prev;
			if (p == &os->samples) {
				list_add(&new->list, &os->samples);
				return;
			}
			sample = list_entry(p, struct sample_queue, list);
		}
		list_add(&new->list, &sample->list);
	}
644 645
}

646 647
#define MAX_SAMPLE_BUFFER	(64 * 1024 / sizeof(struct sample_queue))

648
static int perf_session_queue_event(struct perf_session *s, union perf_event *event,
649
				    struct perf_sample *sample, u64 file_offset)
650
{
651 652
	struct ordered_samples *os = &s->ordered_samples;
	struct list_head *sc = &os->sample_cache;
653
	u64 timestamp = sample->time;
654 655
	struct sample_queue *new;

656
	if (!timestamp || timestamp == ~0ULL)
657 658
		return -ETIME;

659 660 661 662 663
	if (timestamp < s->ordered_samples.last_flush) {
		printf("Warning: Timestamp below last timeslice flush\n");
		return -EINVAL;
	}

664 665 666
	if (!list_empty(sc)) {
		new = list_entry(sc->next, struct sample_queue, list);
		list_del(&new->list);
667 668 669 670
	} else if (os->sample_buffer) {
		new = os->sample_buffer + os->sample_buffer_idx;
		if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER)
			os->sample_buffer = NULL;
671
	} else {
672 673
		os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
		if (!os->sample_buffer)
674
			return -ENOMEM;
675 676 677
		list_add(&os->sample_buffer->list, &os->to_free);
		os->sample_buffer_idx = 2;
		new = os->sample_buffer + 1;
678
	}
679 680

	new->timestamp = timestamp;
681
	new->file_offset = file_offset;
682
	new->event = event;
683

684
	__queue_event(new, s);
685 686 687

	return 0;
}
688

689
static void callchain__printf(struct perf_sample *sample)
690 691
{
	unsigned int i;
692

693
	printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
694 695

	for (i = 0; i < sample->callchain->nr; i++)
696 697
		printf("..... %2d: %016" PRIx64 "\n",
		       i, sample->callchain->ips[i]);
698 699
}

700
static void perf_session__print_tstamp(struct perf_session *session,
701
				       union perf_event *event,
702
				       struct perf_sample *sample)
703 704 705 706 707 708 709 710 711 712 713
{
	if (event->header.type != PERF_RECORD_SAMPLE &&
	    !session->sample_id_all) {
		fputs("-1 -1 ", stdout);
		return;
	}

	if ((session->sample_type & PERF_SAMPLE_CPU))
		printf("%u ", sample->cpu);

	if (session->sample_type & PERF_SAMPLE_TIME)
714
		printf("%" PRIu64 " ", sample->time);
715 716
}

717
static void dump_event(struct perf_session *session, union perf_event *event,
718
		       u64 file_offset, struct perf_sample *sample)
719 720 721 722
{
	if (!dump_trace)
		return;

723 724
	printf("\n%#" PRIx64 " [%#x]: event: %d\n",
	       file_offset, event->header.size, event->header.type);
725 726 727 728 729 730

	trace_event(event);

	if (sample)
		perf_session__print_tstamp(session, event, sample);

731
	printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
732
	       event->header.size, perf_event__name(event->header.type));
733 734
}

735
static void dump_sample(struct perf_session *session, union perf_event *event,
736
			struct perf_sample *sample)
737
{
738 739 740
	if (!dump_trace)
		return;

741
	printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
742
	       event->header.misc, sample->pid, sample->tid, sample->ip,
743
	       sample->period, sample->addr);
744 745

	if (session->sample_type & PERF_SAMPLE_CALLCHAIN)
746
		callchain__printf(sample);
747 748
}

749 750 751 752 753 754 755 756 757 758 759 760
static struct machine *
	perf_session__find_machine_for_cpumode(struct perf_session *session,
					       union perf_event *event)
{
	const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;

	if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest)
		return perf_session__find_machine(session, event->ip.pid);

	return perf_session__find_host_machine(session);
}

761
static int perf_session_deliver_event(struct perf_session *session,
762
				      union perf_event *event,
763
				      struct perf_sample *sample,
764
				      struct perf_tool *tool,
765
				      u64 file_offset)
766
{
767
	struct perf_evsel *evsel;
768
	struct machine *machine;
769

770 771
	dump_event(session, event, file_offset, sample);

772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789
	evsel = perf_evlist__id2evsel(session->evlist, sample->id);
	if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) {
		/*
		 * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here
		 * because the tools right now may apply filters, discarding
		 * some of the samples. For consistency, in the future we
		 * should have something like nr_filtered_samples and remove
		 * the sample->period from total_sample_period, etc, KISS for
		 * now tho.
		 *
		 * Also testing against NULL allows us to handle files without
		 * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the
		 * future probably it'll be a good idea to restrict event
		 * processing via perf_session to files with both set.
		 */
		hists__inc_nr_events(&evsel->hists, event->header.type);
	}

790 791
	machine = perf_session__find_machine_for_cpumode(session, event);

792 793
	switch (event->header.type) {
	case PERF_RECORD_SAMPLE:
794
		dump_sample(session, event, sample);
795 796 797 798
		if (evsel == NULL) {
			++session->hists.stats.nr_unknown_id;
			return -1;
		}
799
		return tool->sample(tool, event, sample, evsel, machine);
800
	case PERF_RECORD_MMAP:
801
		return tool->mmap(tool, event, sample, machine);
802
	case PERF_RECORD_COMM:
803
		return tool->comm(tool, event, sample, machine);
804
	case PERF_RECORD_FORK:
805
		return tool->fork(tool, event, sample, machine);
806
	case PERF_RECORD_EXIT:
807
		return tool->exit(tool, event, sample, machine);
808
	case PERF_RECORD_LOST:
809
		if (tool->lost == perf_event__process_lost)
810
			session->hists.stats.total_lost += event->lost.lost;
811
		return tool->lost(tool, event, sample, machine);
812
	case PERF_RECORD_READ:
813
		return tool->read(tool, event, sample, evsel, machine);
814
	case PERF_RECORD_THROTTLE:
815
		return tool->throttle(tool, event, sample, machine);
816
	case PERF_RECORD_UNTHROTTLE:
817
		return tool->unthrottle(tool, event, sample, machine);
818 819 820 821 822 823
	default:
		++session->hists.stats.nr_unknown_events;
		return -1;
	}
}

824
static int perf_session__preprocess_sample(struct perf_session *session,
825
					   union perf_event *event, struct perf_sample *sample)
826 827 828 829 830 831 832 833 834 835 836 837 838 839
{
	if (event->header.type != PERF_RECORD_SAMPLE ||
	    !(session->sample_type & PERF_SAMPLE_CALLCHAIN))
		return 0;

	if (!ip_callchain__valid(sample->callchain, event)) {
		pr_debug("call-chain problem with event, skipping it.\n");
		++session->hists.stats.nr_invalid_chains;
		session->hists.stats.total_invalid_chains += sample->period;
		return -EINVAL;
	}
	return 0;
}

840
static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
841
					    struct perf_tool *tool, u64 file_offset)
842
{
843 844
	int err;

845
	dump_event(session, event, file_offset, NULL);
846

847
	/* These events are processed right away */
848
	switch (event->header.type) {
849
	case PERF_RECORD_HEADER_ATTR:
850
		err = tool->attr(event, &session->evlist);
851 852 853
		if (err == 0)
			perf_session__update_sample_type(session);
		return err;
854
	case PERF_RECORD_HEADER_EVENT_TYPE:
855
		return tool->event_type(tool, event);
856 857
	case PERF_RECORD_HEADER_TRACING_DATA:
		/* setup for reading amidst mmap */
858
		lseek(session->fd, file_offset, SEEK_SET);
859
		return tool->tracing_data(event, session);
860
	case PERF_RECORD_HEADER_BUILD_ID:
861
		return tool->build_id(tool, event, session);
862
	case PERF_RECORD_FINISHED_ROUND:
863
		return tool->finished_round(tool, event, session);
864
	default:
865
		return -EINVAL;
866
	}
867 868 869
}

static int perf_session__process_event(struct perf_session *session,
870
				       union perf_event *event,
871
				       struct perf_tool *tool,
872 873
				       u64 file_offset)
{
874
	struct perf_sample sample;
875 876
	int ret;

877 878 879
	if (session->header.needs_swap &&
	    perf_event__swap_ops[event->header.type])
		perf_event__swap_ops[event->header.type](event);
880 881 882 883 884 885 886

	if (event->header.type >= PERF_RECORD_HEADER_MAX)
		return -EINVAL;

	hists__inc_nr_events(&session->hists, event->header.type);

	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
887
		return perf_session__process_user_event(session, event, tool, file_offset);
888

889 890 891
	/*
	 * For all kernel events we get the sample data
	 */
892 893 894
	ret = perf_session__parse_sample(session, event, &sample);
	if (ret)
		return ret;
895 896 897 898 899

	/* Preprocess sample records - precheck callchains */
	if (perf_session__preprocess_sample(session, event, &sample))
		return 0;

900
	if (tool->ordered_samples) {
901 902
		ret = perf_session_queue_event(session, event, &sample,
					       file_offset);
903 904 905 906
		if (ret != -ETIME)
			return ret;
	}

907
	return perf_session_deliver_event(session, event, &sample, tool,
908
					  file_offset);
909 910
}

911 912 913 914 915 916 917
void perf_event_header__bswap(struct perf_event_header *self)
{
	self->type = bswap_32(self->type);
	self->misc = bswap_16(self->misc);
	self->size = bswap_16(self->size);
}

918 919 920 921 922
struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
{
	return machine__findnew_thread(&session->host_machine, pid);
}

923 924 925 926 927 928 929 930 931 932 933 934
static struct thread *perf_session__register_idle_thread(struct perf_session *self)
{
	struct thread *thread = perf_session__findnew(self, 0);

	if (thread == NULL || thread__set_comm(thread, "swapper")) {
		pr_err("problem inserting idle task.\n");
		thread = NULL;
	}

	return thread;
}

935
static void perf_session__warn_about_errors(const struct perf_session *session,
936
					    const struct perf_tool *tool)
937
{
938
	if (tool->lost == perf_event__process_lost &&
939 940 941 942 943
	    session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) {
		ui__warning("Processed %d events and lost %d chunks!\n\n"
			    "Check IO/CPU overload!\n\n",
			    session->hists.stats.nr_events[0],
			    session->hists.stats.nr_events[PERF_RECORD_LOST]);
944 945 946 947 948 949 950 951 952 953 954
	}

	if (session->hists.stats.nr_unknown_events != 0) {
		ui__warning("Found %u unknown events!\n\n"
			    "Is this an older tool processing a perf.data "
			    "file generated by a more recent tool?\n\n"
			    "If that is not the case, consider "
			    "reporting to linux-kernel@vger.kernel.org.\n\n",
			    session->hists.stats.nr_unknown_events);
	}

955 956 957 958 959
	if (session->hists.stats.nr_unknown_id != 0) {
		ui__warning("%u samples with id not present in the header\n",
			    session->hists.stats.nr_unknown_id);
	}

960 961 962 963 964 965 966 967 968
 	if (session->hists.stats.nr_invalid_chains != 0) {
 		ui__warning("Found invalid callchains!\n\n"
 			    "%u out of %u events were discarded for this reason.\n\n"
 			    "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
 			    session->hists.stats.nr_invalid_chains,
 			    session->hists.stats.nr_events[PERF_RECORD_SAMPLE]);
 	}
}

969 970 971 972
#define session_done()	(*(volatile int *)(&session_done))
volatile int session_done;

static int __perf_session__process_pipe_events(struct perf_session *self,
973
					       struct perf_tool *tool)
974
{
975
	union perf_event event;
976 977 978 979 980 981
	uint32_t size;
	int skip = 0;
	u64 head;
	int err;
	void *p;

982
	perf_tool__fill_defaults(tool);
983 984 985

	head = 0;
more:
986
	err = readn(self->fd, &event, sizeof(struct perf_event_header));
987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004
	if (err <= 0) {
		if (err == 0)
			goto done;

		pr_err("failed to read event header\n");
		goto out_err;
	}

	if (self->header.needs_swap)
		perf_event_header__bswap(&event.header);

	size = event.header.size;
	if (size == 0)
		size = 8;

	p = &event;
	p += sizeof(struct perf_event_header);

1005
	if (size - sizeof(struct perf_event_header)) {
1006
		err = readn(self->fd, p, size - sizeof(struct perf_event_header));
1007 1008 1009 1010 1011
		if (err <= 0) {
			if (err == 0) {
				pr_err("unexpected end of event stream\n");
				goto done;
			}
1012

1013 1014 1015
			pr_err("failed to read event data\n");
			goto out_err;
		}
1016 1017 1018
	}

	if (size == 0 ||
1019
	    (skip = perf_session__process_event(self, &event, tool, head)) < 0) {
1020
		dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
			    head, event.header.size, event.header.type);
		/*
		 * assume we lost track of the stream, check alignment, and
		 * increment a single u64 in the hope to catch on again 'soon'.
		 */
		if (unlikely(head & 7))
			head &= ~7ULL;

		size = 8;
	}

	head += size;

	if (skip > 0)
		head += skip;

	if (!session_done())
		goto more;
done:
	err = 0;
out_err:
1042
	perf_session__warn_about_errors(self, tool);
1043
	perf_session_free_sample_buffers(self);
1044 1045 1046
	return err;
}

1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
static union perf_event *
fetch_mmaped_event(struct perf_session *session,
		   u64 head, size_t mmap_size, char *buf)
{
	union perf_event *event;

	/*
	 * Ensure we have enough space remaining to read
	 * the size of the event in the headers.
	 */
	if (head + sizeof(event->header) > mmap_size)
		return NULL;

	event = (union perf_event *)(buf + head);

	if (session->header.needs_swap)
		perf_event_header__bswap(&event->header);

	if (head + event->header.size > mmap_size)
		return NULL;

	return event;
}

1071
int __perf_session__process_events(struct perf_session *session,
1072
				   u64 data_offset, u64 data_size,
1073
				   u64 file_size, struct perf_tool *tool)
1074
{
1075
	u64 head, page_offset, file_offset, file_pos, progress_next;
1076
	int err, mmap_prot, mmap_flags, map_idx = 0;
1077
	size_t	page_size, mmap_size;
1078
	char *buf, *mmaps[8];
1079
	union perf_event *event;
1080
	uint32_t size;
1081

1082
	perf_tool__fill_defaults(tool);
1083

1084
	page_size = sysconf(_SC_PAGESIZE);
1085

1086 1087 1088
	page_offset = page_size * (data_offset / page_size);
	file_offset = page_offset;
	head = data_offset - page_offset;
1089

1090 1091 1092
	if (data_offset + data_size < file_size)
		file_size = data_offset + data_size;

1093 1094 1095 1096 1097 1098
	progress_next = file_size / 16;

	mmap_size = session->mmap_window;
	if (mmap_size > file_size)
		mmap_size = file_size;

1099 1100
	memset(mmaps, 0, sizeof(mmaps));

1101 1102 1103
	mmap_prot  = PROT_READ;
	mmap_flags = MAP_SHARED;

1104
	if (session->header.needs_swap) {
1105 1106 1107
		mmap_prot  |= PROT_WRITE;
		mmap_flags = MAP_PRIVATE;
	}
1108
remap:
1109 1110
	buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd,
		   file_offset);
1111 1112 1113 1114 1115
	if (buf == MAP_FAILED) {
		pr_err("failed to mmap file\n");
		err = -errno;
		goto out_err;
	}
1116 1117
	mmaps[map_idx] = buf;
	map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1118
	file_pos = file_offset + head;
1119 1120

more:
1121 1122
	event = fetch_mmaped_event(session, head, mmap_size, buf);
	if (!event) {
1123 1124 1125 1126
		if (mmaps[map_idx]) {
			munmap(mmaps[map_idx], mmap_size);
			mmaps[map_idx] = NULL;
		}
1127

1128 1129 1130
		page_offset = page_size * (head / page_size);
		file_offset += page_offset;
		head -= page_offset;
1131 1132 1133 1134 1135
		goto remap;
	}

	size = event->header.size;

1136
	if (size == 0 ||
1137
	    perf_session__process_event(session, event, tool, file_pos) < 0) {
1138
		dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
1139
			    file_offset + head, event->header.size,
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151
			    event->header.type);
		/*
		 * assume we lost track of the stream, check alignment, and
		 * increment a single u64 in the hope to catch on again 'soon'.
		 */
		if (unlikely(head & 7))
			head &= ~7ULL;

		size = 8;
	}

	head += size;
1152
	file_pos += size;
1153

1154 1155
	if (file_pos >= progress_next) {
		progress_next += file_size / 16;
1156 1157
		ui_progress__update(file_pos, file_size,
				    "Processing events...");
1158 1159
	}

1160
	if (file_pos < file_size)
1161
		goto more;
1162

1163
	err = 0;
1164
	/* do the final flush for ordered samples */
1165
	session->ordered_samples.next_flush = ULLONG_MAX;
1166
	flush_sample_queue(session, tool);
1167
out_err:
1168
	perf_session__warn_about_errors(session, tool);
1169
	perf_session_free_sample_buffers(session);
1170 1171
	return err;
}
1172

1173
int perf_session__process_events(struct perf_session *self,
1174
				 struct perf_tool *tool)
1175 1176 1177 1178 1179 1180
{
	int err;

	if (perf_session__register_idle_thread(self) == NULL)
		return -ENOMEM;

1181 1182 1183 1184
	if (!self->fd_pipe)
		err = __perf_session__process_events(self,
						     self->header.data_offset,
						     self->header.data_size,
1185
						     self->size, tool);
1186
	else
1187
		err = __perf_session__process_pipe_events(self, tool);
1188

1189 1190 1191
	return err;
}

1192
bool perf_session__has_traces(struct perf_session *self, const char *msg)
1193 1194
{
	if (!(self->sample_type & PERF_SAMPLE_RAW)) {
1195 1196
		pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
		return false;
1197 1198
	}

1199
	return true;
1200
}
1201

1202 1203
int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
				     const char *symbol_name, u64 addr)
1204 1205
{
	char *bracket;
1206
	enum map_type i;
1207 1208 1209 1210 1211
	struct ref_reloc_sym *ref;

	ref = zalloc(sizeof(struct ref_reloc_sym));
	if (ref == NULL)
		return -ENOMEM;
1212

1213 1214 1215
	ref->name = strdup(symbol_name);
	if (ref->name == NULL) {
		free(ref);
1216
		return -ENOMEM;
1217
	}
1218

1219
	bracket = strchr(ref->name, ']');
1220 1221 1222
	if (bracket)
		*bracket = '\0';

1223
	ref->addr = addr;
1224 1225

	for (i = 0; i < MAP__NR_TYPES; ++i) {
1226 1227
		struct kmap *kmap = map__kmap(maps[i]);
		kmap->ref_reloc_sym = ref;
1228 1229
	}

1230 1231
	return 0;
}
1232 1233 1234 1235 1236 1237 1238

size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
{
	return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
	       __dsos__fprintf(&self->host_machine.user_dsos, fp) +
	       machines__fprintf_dsos(&self->machines, fp);
}
1239 1240 1241 1242 1243 1244 1245

size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
					  bool with_hits)
{
	size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
	return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
}
1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260

size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
{
	struct perf_evsel *pos;
	size_t ret = fprintf(fp, "Aggregated stats:\n");

	ret += hists__fprintf_nr_events(&session->hists, fp);

	list_for_each_entry(pos, &session->evlist->entries, node) {
		ret += fprintf(fp, "%s stats:\n", event_name(pos));
		ret += hists__fprintf_nr_events(&pos->hists, fp);
	}

	return ret;
}
1261

1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282
size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
{
	/*
	 * FIXME: Here we have to actually print all the machines in this
	 * session, not just the host...
	 */
	return machine__fprintf(&session->host_machine, fp);
}

void perf_session__remove_thread(struct perf_session *session,
				 struct thread *th)
{
	/*
	 * FIXME: This one makes no sense, we need to remove the thread from
	 * the machine it belongs to, perf_session can have many machines, so
	 * doing it always on ->host_machine is wrong.  Fix when auditing all
	 * the 'perf kvm' code.
	 */
	machine__remove_thread(&session->host_machine, th);
}

1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294
struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
					      unsigned int type)
{
	struct perf_evsel *pos;

	list_for_each_entry(pos, &session->evlist->entries, node) {
		if (pos->attr.type == type)
			return pos;
	}
	return NULL;
}

1295 1296 1297
void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
			  struct machine *machine, struct perf_evsel *evsel,
			  int print_sym, int print_dso)
1298 1299 1300
{
	struct addr_location al;
	const char *symname, *dsoname;
1301
	struct callchain_cursor *cursor = &evsel->hists.callchain_cursor;
1302 1303
	struct callchain_cursor_node *node;

1304
	if (perf_event__preprocess_sample(event, machine, &al, sample,
1305 1306 1307 1308 1309 1310 1311 1312
					  NULL) < 0) {
		error("problem processing %d event, skipping it.\n",
			event->header.type);
		return;
	}

	if (symbol_conf.use_callchain && sample->callchain) {

1313
		if (machine__resolve_callchain(machine, evsel, al.thread,
1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325
						sample->callchain, NULL) != 0) {
			if (verbose)
				error("Failed to resolve callchain. Skipping\n");
			return;
		}
		callchain_cursor_commit(cursor);

		while (1) {
			node = callchain_cursor_current(cursor);
			if (!node)
				break;

1326 1327 1328 1329 1330 1331
			printf("\t%16" PRIx64, node->ip);
			if (print_sym) {
				if (node->sym && node->sym->name)
					symname = node->sym->name;
				else
					symname = "";
1332

1333 1334 1335
				printf(" %s", symname);
			}
			if (print_dso) {
1336 1337 1338 1339
				if (node->map && node->map->dso && node->map->dso->name)
					dsoname = node->map->dso->name;
				else
					dsoname = "";
1340

1341
				printf(" (%s)", dsoname);
1342 1343
			}
			printf("\n");
1344 1345 1346 1347 1348

			callchain_cursor_advance(cursor);
		}

	} else {
1349
		printf("%16" PRIx64, sample->ip);
1350 1351 1352 1353 1354
		if (print_sym) {
			if (al.sym && al.sym->name)
				symname = al.sym->name;
			else
				symname = "";
1355

1356 1357 1358 1359
			printf(" %s", symname);
		}

		if (print_dso) {
1360 1361 1362 1363
			if (al.map && al.map->dso && al.map->dso->name)
				dsoname = al.map->dso->name;
			else
				dsoname = "";
1364

1365
			printf(" (%s)", dsoname);
1366
		}
1367 1368
	}
}
1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390

int perf_session__cpu_bitmap(struct perf_session *session,
			     const char *cpu_list, unsigned long *cpu_bitmap)
{
	int i;
	struct cpu_map *map;

	for (i = 0; i < PERF_TYPE_MAX; ++i) {
		struct perf_evsel *evsel;

		evsel = perf_session__find_first_evtype(session, i);
		if (!evsel)
			continue;

		if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
			pr_err("File does not contain CPU events. "
			       "Remove -c option to proceed.\n");
			return -1;
		}
	}

	map = cpu_map__new(cpu_list);
1391 1392 1393 1394
	if (map == NULL) {
		pr_err("Invalid cpu_list\n");
		return -1;
	}
1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409

	for (i = 0; i < map->nr; i++) {
		int cpu = map->map[i];

		if (cpu >= MAX_NR_CPUS) {
			pr_err("Requested CPU %d too large. "
			       "Consider raising MAX_NR_CPUS\n", cpu);
			return -1;
		}

		set_bit(cpu, cpu_bitmap);
	}

	return 0;
}
1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428

void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
				bool full)
{
	struct stat st;
	int ret;

	if (session == NULL || fp == NULL)
		return;

	ret = fstat(session->fd, &st);
	if (ret == -1)
		return;

	fprintf(fp, "# ========\n");
	fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
	perf_header__fprintf_info(session, fp, full);
	fprintf(fp, "# ========\n#\n");
}