header.c 74.7 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
#include <errno.h>
3
#include <inttypes.h>
4
#include "util.h"
5
#include "string2.h"
6
#include <sys/param.h>
7
#include <sys/types.h>
8
#include <byteswap.h>
9 10 11
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
12
#include <linux/compiler.h>
13
#include <linux/list.h>
14
#include <linux/kernel.h>
15
#include <linux/bitops.h>
16
#include <linux/stringify.h>
17
#include <sys/stat.h>
18
#include <sys/utsname.h>
19

20
#include "evlist.h"
21
#include "evsel.h"
22
#include "header.h"
23
#include "memswap.h"
24 25
#include "../perf.h"
#include "trace-event.h"
26
#include "session.h"
27
#include "symbol.h"
28
#include "debug.h"
29
#include "cpumap.h"
30
#include "pmu.h"
31
#include "vdso.h"
32
#include "strbuf.h"
33
#include "build-id.h"
34
#include "data.h"
35 36
#include <api/fs/fs.h>
#include "asm/bug.h"
37
#include "tool.h"
38

39 40
#include "sane_ctype.h"

41 42 43 44 45 46 47 48 49 50 51 52
/*
 * magic2 = "PERFILE2"
 * must be a numerical value to let the endianness
 * determine the memory layout. That way we are able
 * to detect endianness when reading the perf.data file
 * back.
 *
 * we check for legacy (PERFFILE) format.
 */
static const char *__perf_magic1 = "PERFFILE";
static const u64 __perf_magic2    = 0x32454c4946524550ULL;
static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
53

54
#define PERF_MAGIC	__perf_magic2
55

56 57
const char perf_version_string[] = PERF_VERSION;

58
struct perf_file_attr {
59
	struct perf_event_attr	attr;
60 61 62
	struct perf_file_section	ids;
};

63 64 65
struct feat_fd {
	struct perf_header	*ph;
	int			fd;
66
	void			*buf;	/* Either buf != NULL or fd >= 0 */
67 68
	ssize_t			offset;
	size_t			size;
69
	struct perf_evsel	*events;
70 71
};

72
void perf_header__set_feat(struct perf_header *header, int feat)
73
{
74
	set_bit(feat, header->adds_features);
75 76
}

77
void perf_header__clear_feat(struct perf_header *header, int feat)
78
{
79
	clear_bit(feat, header->adds_features);
80 81
}

82
bool perf_header__has_feat(const struct perf_header *header, int feat)
83
{
84
	return test_bit(feat, header->adds_features);
85 86
}

87
static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size)
88
{
89
	ssize_t ret = writen(ff->fd, buf, size);
90

91 92
	if (ret != (ssize_t)size)
		return ret < 0 ? (int)ret : -1;
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
	return 0;
}

static int __do_write_buf(struct feat_fd *ff,  const void *buf, size_t size)
{
	/* struct perf_event_header::size is u16 */
	const size_t max_size = 0xffff - sizeof(struct perf_event_header);
	size_t new_size = ff->size;
	void *addr;

	if (size + ff->offset > max_size)
		return -E2BIG;

	while (size > (new_size - ff->offset))
		new_size <<= 1;
	new_size = min(max_size, new_size);

	if (ff->size < new_size) {
		addr = realloc(ff->buf, new_size);
		if (!addr)
			return -ENOMEM;
		ff->buf = addr;
		ff->size = new_size;
	}

	memcpy(ff->buf + ff->offset, buf, size);
	ff->offset += size;
120 121

	return 0;
122 123
}

124 125 126 127 128 129 130 131
/* Return: 0 if succeded, -ERR if failed. */
int do_write(struct feat_fd *ff, const void *buf, size_t size)
{
	if (!ff->buf)
		return __do_write_fd(ff, buf, size);
	return __do_write_buf(ff, buf, size);
}

132
/* Return: 0 if succeded, -ERR if failed. */
133 134
int write_padded(struct feat_fd *ff, const void *bf,
		 size_t count, size_t count_aligned)
135 136
{
	static const char zero_buf[NAME_ALIGN];
137
	int err = do_write(ff, bf, count);
138 139

	if (!err)
140
		err = do_write(ff, zero_buf, count_aligned - count);
141 142 143 144

	return err;
}

145 146 147
#define string_size(str)						\
	(PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))

148
/* Return: 0 if succeded, -ERR if failed. */
149
static int do_write_string(struct feat_fd *ff, const char *str)
150 151 152 153 154
{
	u32 len, olen;
	int ret;

	olen = strlen(str) + 1;
155
	len = PERF_ALIGN(olen, NAME_ALIGN);
156 157

	/* write len, incl. \0 */
158
	ret = do_write(ff, &len, sizeof(len));
159 160 161
	if (ret < 0)
		return ret;

162
	return write_padded(ff, str, olen, len);
163 164
}

165
static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size)
166
{
167
	ssize_t ret = readn(ff->fd, addr, size);
168 169 170 171 172 173

	if (ret != size)
		return ret < 0 ? (int)ret : -1;
	return 0;
}

174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size)
{
	if (size > (ssize_t)ff->size - ff->offset)
		return -1;

	memcpy(addr, ff->buf + ff->offset, size);
	ff->offset += size;

	return 0;

}

static int __do_read(struct feat_fd *ff, void *addr, ssize_t size)
{
	if (!ff->buf)
		return __do_read_fd(ff, addr, size);
	return __do_read_buf(ff, addr, size);
}

193
static int do_read_u32(struct feat_fd *ff, u32 *addr)
194 195 196
{
	int ret;

197
	ret = __do_read(ff, addr, sizeof(*addr));
198 199 200
	if (ret)
		return ret;

201
	if (ff->ph->needs_swap)
202 203 204 205
		*addr = bswap_32(*addr);
	return 0;
}

206
static int do_read_u64(struct feat_fd *ff, u64 *addr)
207 208 209
{
	int ret;

210
	ret = __do_read(ff, addr, sizeof(*addr));
211 212 213
	if (ret)
		return ret;

214
	if (ff->ph->needs_swap)
215 216 217 218
		*addr = bswap_64(*addr);
	return 0;
}

219
static char *do_read_string(struct feat_fd *ff)
220 221 222 223
{
	u32 len;
	char *buf;

224
	if (do_read_u32(ff, &len))
225 226 227 228 229 230
		return NULL;

	buf = malloc(len);
	if (!buf)
		return NULL;

231
	if (!__do_read(ff, buf, len)) {
232 233 234 235 236 237 238 239 240 241 242 243
		/*
		 * strings are padded by zeroes
		 * thus the actual strlen of buf
		 * may be less than len
		 */
		return buf;
	}

	free(buf);
	return NULL;
}

244 245
static int write_tracing_data(struct feat_fd *ff,
			      struct perf_evlist *evlist)
246
{
247 248 249
	if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
		return -1;

250
	return read_tracing_data(ff->fd, &evlist->entries);
251 252
}

253
static int write_build_id(struct feat_fd *ff,
254
			  struct perf_evlist *evlist __maybe_unused)
255 256 257 258
{
	struct perf_session *session;
	int err;

259
	session = container_of(ff->ph, struct perf_session, header);
260

261 262 263
	if (!perf_session__read_build_ids(session, true))
		return -1;

264 265 266
	if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
		return -1;

267
	err = perf_session__write_buildid_table(session, ff);
268 269 270 271
	if (err < 0) {
		pr_debug("failed to write buildid table\n");
		return err;
	}
272
	perf_session__cache_build_ids(session);
273 274 275 276

	return 0;
}

277
static int write_hostname(struct feat_fd *ff,
278
			  struct perf_evlist *evlist __maybe_unused)
279 280 281 282 283 284 285 286
{
	struct utsname uts;
	int ret;

	ret = uname(&uts);
	if (ret < 0)
		return -1;

287
	return do_write_string(ff, uts.nodename);
288 289
}

290
static int write_osrelease(struct feat_fd *ff,
291
			   struct perf_evlist *evlist __maybe_unused)
292 293 294 295 296 297 298 299
{
	struct utsname uts;
	int ret;

	ret = uname(&uts);
	if (ret < 0)
		return -1;

300
	return do_write_string(ff, uts.release);
301 302
}

303
static int write_arch(struct feat_fd *ff,
304
		      struct perf_evlist *evlist __maybe_unused)
305 306 307 308 309 310 311 312
{
	struct utsname uts;
	int ret;

	ret = uname(&uts);
	if (ret < 0)
		return -1;

313
	return do_write_string(ff, uts.machine);
314 315
}

316
static int write_version(struct feat_fd *ff,
317
			 struct perf_evlist *evlist __maybe_unused)
318
{
319
	return do_write_string(ff, perf_version_string);
320 321
}

322
static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc)
323 324 325 326
{
	FILE *file;
	char *buf = NULL;
	char *s, *p;
327
	const char *search = cpuinfo_proc;
328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
	size_t len = 0;
	int ret = -1;

	if (!search)
		return -1;

	file = fopen("/proc/cpuinfo", "r");
	if (!file)
		return -1;

	while (getline(&buf, &len, file) > 0) {
		ret = strncmp(buf, search, strlen(search));
		if (!ret)
			break;
	}

344 345
	if (ret) {
		ret = -1;
346
		goto done;
347
	}
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371

	s = buf;

	p = strchr(buf, ':');
	if (p && *(p+1) == ' ' && *(p+2))
		s = p + 2;
	p = strchr(s, '\n');
	if (p)
		*p = '\0';

	/* squash extra space characters (branding string) */
	p = s;
	while (*p) {
		if (isspace(*p)) {
			char *r = p + 1;
			char *q = r;
			*p = ' ';
			while (*q && isspace(*q))
				q++;
			if (q != (p+1))
				while ((*r++ = *q++));
		}
		p++;
	}
372
	ret = do_write_string(ff, s);
373 374 375 376 377 378
done:
	free(buf);
	fclose(file);
	return ret;
}

379
static int write_cpudesc(struct feat_fd *ff,
380 381 382 383 384 385 386
		       struct perf_evlist *evlist __maybe_unused)
{
	const char *cpuinfo_procs[] = CPUINFO_PROC;
	unsigned int i;

	for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
		int ret;
387
		ret = __write_cpudesc(ff, cpuinfo_procs[i]);
388 389 390 391 392 393 394
		if (ret >= 0)
			return ret;
	}
	return -1;
}


395
static int write_nrcpus(struct feat_fd *ff,
396
			struct perf_evlist *evlist __maybe_unused)
397 398 399 400 401
{
	long nr;
	u32 nrc, nra;
	int ret;

402
	nrc = cpu__max_present_cpu();
403 404 405 406 407 408 409

	nr = sysconf(_SC_NPROCESSORS_ONLN);
	if (nr < 0)
		return -1;

	nra = (u32)(nr & UINT_MAX);

410
	ret = do_write(ff, &nrc, sizeof(nrc));
411 412 413
	if (ret < 0)
		return ret;

414
	return do_write(ff, &nra, sizeof(nra));
415 416
}

417
static int write_event_desc(struct feat_fd *ff,
418 419
			    struct perf_evlist *evlist)
{
420
	struct perf_evsel *evsel;
421
	u32 nre, nri, sz;
422 423
	int ret;

424
	nre = evlist->nr_entries;
425 426 427 428

	/*
	 * write number of events
	 */
429
	ret = do_write(ff, &nre, sizeof(nre));
430 431 432 433 434 435
	if (ret < 0)
		return ret;

	/*
	 * size of perf_event_attr struct
	 */
436
	sz = (u32)sizeof(evsel->attr);
437
	ret = do_write(ff, &sz, sizeof(sz));
438 439 440
	if (ret < 0)
		return ret;

441
	evlist__for_each_entry(evlist, evsel) {
442
		ret = do_write(ff, &evsel->attr, sz);
443 444 445 446 447 448 449 450 451
		if (ret < 0)
			return ret;
		/*
		 * write number of unique id per event
		 * there is one id per instance of an event
		 *
		 * copy into an nri to be independent of the
		 * type of ids,
		 */
452
		nri = evsel->ids;
453
		ret = do_write(ff, &nri, sizeof(nri));
454 455 456 457 458 459
		if (ret < 0)
			return ret;

		/*
		 * write event string as passed on cmdline
		 */
460
		ret = do_write_string(ff, perf_evsel__name(evsel));
461 462 463 464 465
		if (ret < 0)
			return ret;
		/*
		 * write unique ids for this event
		 */
466
		ret = do_write(ff, evsel->id, evsel->ids * sizeof(u64));
467 468 469 470 471 472
		if (ret < 0)
			return ret;
	}
	return 0;
}

473
static int write_cmdline(struct feat_fd *ff,
474
			 struct perf_evlist *evlist __maybe_unused)
475 476
{
	char buf[MAXPATHLEN];
477 478
	u32 n;
	int i, ret;
479

480 481
	/* actual path to perf binary */
	ret = readlink("/proc/self/exe", buf, sizeof(buf) - 1);
482 483 484 485 486 487 488
	if (ret <= 0)
		return -1;

	/* readlink() does not add null termination */
	buf[ret] = '\0';

	/* account for binary path */
489
	n = perf_env.nr_cmdline + 1;
490

491
	ret = do_write(ff, &n, sizeof(n));
492 493 494
	if (ret < 0)
		return ret;

495
	ret = do_write_string(ff, buf);
496 497 498
	if (ret < 0)
		return ret;

499
	for (i = 0 ; i < perf_env.nr_cmdline; i++) {
500
		ret = do_write_string(ff, perf_env.cmdline_argv[i]);
501 502 503 504 505 506 507 508 509 510 511 512
		if (ret < 0)
			return ret;
	}
	return 0;
}

#define CORE_SIB_FMT \
	"/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
#define THRD_SIB_FMT \
	"/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"

struct cpu_topo {
513
	u32 cpu_nr;
514 515 516 517 518 519 520 521 522 523 524 525
	u32 core_sib;
	u32 thread_sib;
	char **core_siblings;
	char **thread_siblings;
};

static int build_cpu_topo(struct cpu_topo *tp, int cpu)
{
	FILE *fp;
	char filename[MAXPATHLEN];
	char *buf = NULL, *p;
	size_t len = 0;
526
	ssize_t sret;
527 528 529 530 531 532
	u32 i = 0;
	int ret = -1;

	sprintf(filename, CORE_SIB_FMT, cpu);
	fp = fopen(filename, "r");
	if (!fp)
533
		goto try_threads;
534

535
	sret = getline(&buf, &len, fp);
536
	fclose(fp);
537 538
	if (sret <= 0)
		goto try_threads;
539 540 541 542 543 544 545 546 547 548 549 550 551 552 553

	p = strchr(buf, '\n');
	if (p)
		*p = '\0';

	for (i = 0; i < tp->core_sib; i++) {
		if (!strcmp(buf, tp->core_siblings[i]))
			break;
	}
	if (i == tp->core_sib) {
		tp->core_siblings[i] = buf;
		tp->core_sib++;
		buf = NULL;
		len = 0;
	}
554
	ret = 0;
555

556
try_threads:
557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
	sprintf(filename, THRD_SIB_FMT, cpu);
	fp = fopen(filename, "r");
	if (!fp)
		goto done;

	if (getline(&buf, &len, fp) <= 0)
		goto done;

	p = strchr(buf, '\n');
	if (p)
		*p = '\0';

	for (i = 0; i < tp->thread_sib; i++) {
		if (!strcmp(buf, tp->thread_siblings[i]))
			break;
	}
	if (i == tp->thread_sib) {
		tp->thread_siblings[i] = buf;
		tp->thread_sib++;
		buf = NULL;
	}
	ret = 0;
done:
	if(fp)
		fclose(fp);
	free(buf);
	return ret;
}

static void free_cpu_topo(struct cpu_topo *tp)
{
	u32 i;

	if (!tp)
		return;

	for (i = 0 ; i < tp->core_sib; i++)
594
		zfree(&tp->core_siblings[i]);
595 596

	for (i = 0 ; i < tp->thread_sib; i++)
597
		zfree(&tp->thread_siblings[i]);
598 599 600 601 602 603

	free(tp);
}

static struct cpu_topo *build_cpu_topology(void)
{
604
	struct cpu_topo *tp = NULL;
605 606
	void *addr;
	u32 nr, i;
607
	size_t sz;
608 609
	long ncpus;
	int ret = -1;
610
	struct cpu_map *map;
611

612
	ncpus = cpu__max_present_cpu();
613

614 615 616 617 618 619 620
	/* build online CPU map */
	map = cpu_map__new(NULL);
	if (map == NULL) {
		pr_debug("failed to get system cpumap\n");
		return NULL;
	}

621 622 623
	nr = (u32)(ncpus & UINT_MAX);

	sz = nr * sizeof(char *);
624
	addr = calloc(1, sizeof(*tp) + 2 * sz);
625
	if (!addr)
626
		goto out_free;
627 628

	tp = addr;
629
	tp->cpu_nr = nr;
630 631 632 633 634 635
	addr += sizeof(*tp);
	tp->core_siblings = addr;
	addr += sz;
	tp->thread_siblings = addr;

	for (i = 0; i < nr; i++) {
636 637 638
		if (!cpu_map__has(map, i))
			continue;

639 640 641 642
		ret = build_cpu_topo(tp, i);
		if (ret < 0)
			break;
	}
643 644 645

out_free:
	cpu_map__put(map);
646 647 648 649 650 651 652
	if (ret) {
		free_cpu_topo(tp);
		tp = NULL;
	}
	return tp;
}

653 654
static int write_cpu_topology(struct feat_fd *ff,
			      struct perf_evlist *evlist __maybe_unused)
655 656 657
{
	struct cpu_topo *tp;
	u32 i;
658
	int ret, j;
659 660 661 662 663

	tp = build_cpu_topology();
	if (!tp)
		return -1;

664
	ret = do_write(ff, &tp->core_sib, sizeof(tp->core_sib));
665 666 667 668
	if (ret < 0)
		goto done;

	for (i = 0; i < tp->core_sib; i++) {
669
		ret = do_write_string(ff, tp->core_siblings[i]);
670 671 672
		if (ret < 0)
			goto done;
	}
673
	ret = do_write(ff, &tp->thread_sib, sizeof(tp->thread_sib));
674 675 676 677
	if (ret < 0)
		goto done;

	for (i = 0; i < tp->thread_sib; i++) {
678
		ret = do_write_string(ff, tp->thread_siblings[i]);
679 680 681
		if (ret < 0)
			break;
	}
682

683 684 685 686 687
	ret = perf_env__read_cpu_topology_map(&perf_env);
	if (ret < 0)
		goto done;

	for (j = 0; j < perf_env.nr_cpus_avail; j++) {
688
		ret = do_write(ff, &perf_env.cpu[j].core_id,
689
			       sizeof(perf_env.cpu[j].core_id));
690 691
		if (ret < 0)
			return ret;
692
		ret = do_write(ff, &perf_env.cpu[j].socket_id,
693
			       sizeof(perf_env.cpu[j].socket_id));
694 695 696
		if (ret < 0)
			return ret;
	}
697 698 699 700 701 702 703
done:
	free_cpu_topo(tp);
	return ret;
}



704 705
static int write_total_mem(struct feat_fd *ff,
			   struct perf_evlist *evlist __maybe_unused)
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
{
	char *buf = NULL;
	FILE *fp;
	size_t len = 0;
	int ret = -1, n;
	uint64_t mem;

	fp = fopen("/proc/meminfo", "r");
	if (!fp)
		return -1;

	while (getline(&buf, &len, fp) > 0) {
		ret = strncmp(buf, "MemTotal:", 9);
		if (!ret)
			break;
	}
	if (!ret) {
		n = sscanf(buf, "%*s %"PRIu64, &mem);
		if (n == 1)
725
			ret = do_write(ff, &mem, sizeof(mem));
726 727
	} else
		ret = -1;
728 729 730 731 732
	free(buf);
	fclose(fp);
	return ret;
}

733
static int write_topo_node(struct feat_fd *ff, int node)
734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751
{
	char str[MAXPATHLEN];
	char field[32];
	char *buf = NULL, *p;
	size_t len = 0;
	FILE *fp;
	u64 mem_total, mem_free, mem;
	int ret = -1;

	sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
	fp = fopen(str, "r");
	if (!fp)
		return -1;

	while (getline(&buf, &len, fp) > 0) {
		/* skip over invalid lines */
		if (!strchr(buf, ':'))
			continue;
752
		if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
753 754 755 756 757 758 759 760
			goto done;
		if (!strcmp(field, "MemTotal:"))
			mem_total = mem;
		if (!strcmp(field, "MemFree:"))
			mem_free = mem;
	}

	fclose(fp);
761
	fp = NULL;
762

763
	ret = do_write(ff, &mem_total, sizeof(u64));
764 765 766
	if (ret)
		goto done;

767
	ret = do_write(ff, &mem_free, sizeof(u64));
768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
	if (ret)
		goto done;

	ret = -1;
	sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);

	fp = fopen(str, "r");
	if (!fp)
		goto done;

	if (getline(&buf, &len, fp) <= 0)
		goto done;

	p = strchr(buf, '\n');
	if (p)
		*p = '\0';

785
	ret = do_write_string(ff, buf);
786 787
done:
	free(buf);
788 789
	if (fp)
		fclose(fp);
790 791 792
	return ret;
}

793 794
static int write_numa_topology(struct feat_fd *ff,
			       struct perf_evlist *evlist __maybe_unused)
795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820
{
	char *buf = NULL;
	size_t len = 0;
	FILE *fp;
	struct cpu_map *node_map = NULL;
	char *c;
	u32 nr, i, j;
	int ret = -1;

	fp = fopen("/sys/devices/system/node/online", "r");
	if (!fp)
		return -1;

	if (getline(&buf, &len, fp) <= 0)
		goto done;

	c = strchr(buf, '\n');
	if (c)
		*c = '\0';

	node_map = cpu_map__new(buf);
	if (!node_map)
		goto done;

	nr = (u32)node_map->nr;

821
	ret = do_write(ff, &nr, sizeof(nr));
822 823 824 825 826
	if (ret < 0)
		goto done;

	for (i = 0; i < nr; i++) {
		j = (u32)node_map->map[i];
827
		ret = do_write(ff, &j, sizeof(j));
828 829 830
		if (ret < 0)
			break;

831
		ret = write_topo_node(ff, i);
832 833 834 835 836 837
		if (ret < 0)
			break;
	}
done:
	free(buf);
	fclose(fp);
838
	cpu_map__put(node_map);
839 840 841
	return ret;
}

842 843 844 845 846 847 848 849 850 851 852 853
/*
 * File format:
 *
 * struct pmu_mappings {
 *	u32	pmu_num;
 *	struct pmu_map {
 *		u32	type;
 *		char	name[];
 *	}[pmu_num];
 * };
 */

854
static int write_pmu_mappings(struct feat_fd *ff,
855
			      struct perf_evlist *evlist __maybe_unused)
856 857
{
	struct perf_pmu *pmu = NULL;
858
	u32 pmu_num = 0;
859
	int ret;
860

861 862 863 864 865 866 867 868 869 870
	/*
	 * Do a first pass to count number of pmu to avoid lseek so this
	 * works in pipe mode as well.
	 */
	while ((pmu = perf_pmu__scan(pmu))) {
		if (!pmu->name)
			continue;
		pmu_num++;
	}

871
	ret = do_write(ff, &pmu_num, sizeof(pmu_num));
872 873
	if (ret < 0)
		return ret;
874 875 876 877

	while ((pmu = perf_pmu__scan(pmu))) {
		if (!pmu->name)
			continue;
878

879
		ret = do_write(ff, &pmu->type, sizeof(pmu->type));
880 881 882
		if (ret < 0)
			return ret;

883
		ret = do_write_string(ff, pmu->name);
884 885
		if (ret < 0)
			return ret;
886 887 888 889 890
	}

	return 0;
}

891 892 893 894 895 896 897 898 899 900 901 902
/*
 * File format:
 *
 * struct group_descs {
 *	u32	nr_groups;
 *	struct group_desc {
 *		char	name[];
 *		u32	leader_idx;
 *		u32	nr_members;
 *	}[nr_groups];
 * };
 */
903
static int write_group_desc(struct feat_fd *ff,
904 905 906 907 908 909
			    struct perf_evlist *evlist)
{
	u32 nr_groups = evlist->nr_groups;
	struct perf_evsel *evsel;
	int ret;

910
	ret = do_write(ff, &nr_groups, sizeof(nr_groups));
911 912 913
	if (ret < 0)
		return ret;

914
	evlist__for_each_entry(evlist, evsel) {
915 916 917 918 919 920
		if (perf_evsel__is_group_leader(evsel) &&
		    evsel->nr_members > 1) {
			const char *name = evsel->group_name ?: "{anon_group}";
			u32 leader_idx = evsel->idx;
			u32 nr_members = evsel->nr_members;

921
			ret = do_write_string(ff, name);
922 923 924
			if (ret < 0)
				return ret;

925
			ret = do_write(ff, &leader_idx, sizeof(leader_idx));
926 927 928
			if (ret < 0)
				return ret;

929
			ret = do_write(ff, &nr_members, sizeof(nr_members));
930 931 932 933 934 935 936
			if (ret < 0)
				return ret;
		}
	}
	return 0;
}

937 938
/*
 * default get_cpuid(): nothing gets recorded
939
 * actual implementation must be in arch/$(SRCARCH)/util/header.c
940
 */
941
int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
942 943 944 945
{
	return -1;
}

946
static int write_cpuid(struct feat_fd *ff,
947
		       struct perf_evlist *evlist __maybe_unused)
948 949 950 951 952 953 954 955 956 957
{
	char buffer[64];
	int ret;

	ret = get_cpuid(buffer, sizeof(buffer));
	if (!ret)
		goto write_it;

	return -1;
write_it:
958
	return do_write_string(ff, buffer);
959 960
}

961 962
static int write_branch_stack(struct feat_fd *ff __maybe_unused,
			      struct perf_evlist *evlist __maybe_unused)
963 964 965 966
{
	return 0;
}

967
static int write_auxtrace(struct feat_fd *ff,
968 969
			  struct perf_evlist *evlist __maybe_unused)
{
970 971 972
	struct perf_session *session;
	int err;

973 974 975
	if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
		return -1;

976
	session = container_of(ff->ph, struct perf_session, header);
977

978
	err = auxtrace_index__write(ff->fd, &session->auxtrace_index);
979 980 981
	if (err < 0)
		pr_err("Failed to write auxtrace index\n");
	return err;
982 983
}

984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124
static int cpu_cache_level__sort(const void *a, const void *b)
{
	struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
	struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;

	return cache_a->level - cache_b->level;
}

static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
{
	if (a->level != b->level)
		return false;

	if (a->line_size != b->line_size)
		return false;

	if (a->sets != b->sets)
		return false;

	if (a->ways != b->ways)
		return false;

	if (strcmp(a->type, b->type))
		return false;

	if (strcmp(a->size, b->size))
		return false;

	if (strcmp(a->map, b->map))
		return false;

	return true;
}

static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
{
	char path[PATH_MAX], file[PATH_MAX];
	struct stat st;
	size_t len;

	scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
	scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);

	if (stat(file, &st))
		return 1;

	scnprintf(file, PATH_MAX, "%s/level", path);
	if (sysfs__read_int(file, (int *) &cache->level))
		return -1;

	scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
	if (sysfs__read_int(file, (int *) &cache->line_size))
		return -1;

	scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
	if (sysfs__read_int(file, (int *) &cache->sets))
		return -1;

	scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
	if (sysfs__read_int(file, (int *) &cache->ways))
		return -1;

	scnprintf(file, PATH_MAX, "%s/type", path);
	if (sysfs__read_str(file, &cache->type, &len))
		return -1;

	cache->type[len] = 0;
	cache->type = rtrim(cache->type);

	scnprintf(file, PATH_MAX, "%s/size", path);
	if (sysfs__read_str(file, &cache->size, &len)) {
		free(cache->type);
		return -1;
	}

	cache->size[len] = 0;
	cache->size = rtrim(cache->size);

	scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
	if (sysfs__read_str(file, &cache->map, &len)) {
		free(cache->map);
		free(cache->type);
		return -1;
	}

	cache->map[len] = 0;
	cache->map = rtrim(cache->map);
	return 0;
}

static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
{
	fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
}

static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
{
	u32 i, cnt = 0;
	long ncpus;
	u32 nr, cpu;
	u16 level;

	ncpus = sysconf(_SC_NPROCESSORS_CONF);
	if (ncpus < 0)
		return -1;

	nr = (u32)(ncpus & UINT_MAX);

	for (cpu = 0; cpu < nr; cpu++) {
		for (level = 0; level < 10; level++) {
			struct cpu_cache_level c;
			int err;

			err = cpu_cache_level__read(&c, cpu, level);
			if (err < 0)
				return err;

			if (err == 1)
				break;

			for (i = 0; i < cnt; i++) {
				if (cpu_cache_level__cmp(&c, &caches[i]))
					break;
			}

			if (i == cnt)
				caches[cnt++] = c;
			else
				cpu_cache_level__free(&c);

			if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
				goto out;
		}
	}
 out:
	*cntp = cnt;
	return 0;
}

#define MAX_CACHES 2000

1125 1126
static int write_cache(struct feat_fd *ff,
		       struct perf_evlist *evlist __maybe_unused)
1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137
{
	struct cpu_cache_level caches[MAX_CACHES];
	u32 cnt = 0, i, version = 1;
	int ret;

	ret = build_caches(caches, MAX_CACHES, &cnt);
	if (ret)
		goto out;

	qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);

1138
	ret = do_write(ff, &version, sizeof(u32));
1139 1140 1141
	if (ret < 0)
		goto out;

1142
	ret = do_write(ff, &cnt, sizeof(u32));
1143 1144 1145 1146 1147 1148 1149
	if (ret < 0)
		goto out;

	for (i = 0; i < cnt; i++) {
		struct cpu_cache_level *c = &caches[i];

		#define _W(v)					\
1150
			ret = do_write(ff, &c->v, sizeof(u32));	\
1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
			if (ret < 0)				\
				goto out;

		_W(level)
		_W(line_size)
		_W(sets)
		_W(ways)
		#undef _W

		#define _W(v)						\
1161
			ret = do_write_string(ff, (const char *) c->v);	\
1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176
			if (ret < 0)					\
				goto out;

		_W(type)
		_W(size)
		_W(map)
		#undef _W
	}

out:
	for (i = 0; i < cnt; i++)
		cpu_cache_level__free(&caches[i]);
	return ret;
}

1177
static int write_stat(struct feat_fd *ff __maybe_unused,
1178 1179 1180 1181 1182
		      struct perf_evlist *evlist __maybe_unused)
{
	return 0;
}

1183
static void print_hostname(struct feat_fd *ff, FILE *fp)
1184
{
1185
	fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
1186 1187
}

1188
static void print_osrelease(struct feat_fd *ff, FILE *fp)
1189
{
1190
	fprintf(fp, "# os release : %s\n", ff->ph->env.os_release);
1191 1192
}

1193
static void print_arch(struct feat_fd *ff, FILE *fp)
1194
{
1195
	fprintf(fp, "# arch : %s\n", ff->ph->env.arch);
1196 1197
}

1198
static void print_cpudesc(struct feat_fd *ff, FILE *fp)
1199
{
1200
	fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc);
1201 1202
}

1203
static void print_nrcpus(struct feat_fd *ff, FILE *fp)
1204
{
1205 1206
	fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online);
	fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail);
1207 1208
}

1209
static void print_version(struct feat_fd *ff, FILE *fp)
1210
{
1211
	fprintf(fp, "# perf version : %s\n", ff->ph->env.version);
1212 1213
}

1214
static void print_cmdline(struct feat_fd *ff, FILE *fp)
1215
{
1216
	int nr, i;
1217

1218
	nr = ff->ph->env.nr_cmdline;
1219 1220 1221

	fprintf(fp, "# cmdline : ");

1222
	for (i = 0; i < nr; i++)
1223
		fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]);
1224 1225 1226
	fputc('\n', fp);
}

1227
static void print_cpu_topology(struct feat_fd *ff, FILE *fp)
1228
{
1229 1230
	struct perf_header *ph = ff->ph;
	int cpu_nr = ph->env.nr_cpus_avail;
1231
	int nr, i;
1232 1233
	char *str;

1234 1235
	nr = ph->env.nr_sibling_cores;
	str = ph->env.sibling_cores;
1236 1237 1238

	for (i = 0; i < nr; i++) {
		fprintf(fp, "# sibling cores   : %s\n", str);
1239
		str += strlen(str) + 1;
1240 1241
	}

1242 1243
	nr = ph->env.nr_sibling_threads;
	str = ph->env.sibling_threads;
1244 1245 1246

	for (i = 0; i < nr; i++) {
		fprintf(fp, "# sibling threads : %s\n", str);
1247
		str += strlen(str) + 1;
1248
	}
1249 1250 1251 1252 1253 1254 1255

	if (ph->env.cpu != NULL) {
		for (i = 0; i < cpu_nr; i++)
			fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i,
				ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id);
	} else
		fprintf(fp, "# Core ID and Socket ID information is not available\n");
1256 1257
}

1258
static void free_event_desc(struct perf_evsel *events)
1259
{
1260 1261 1262 1263 1264 1265
	struct perf_evsel *evsel;

	if (!events)
		return;

	for (evsel = events; evsel->attr.size; evsel++) {
1266 1267
		zfree(&evsel->name);
		zfree(&evsel->id);
1268 1269 1270 1271 1272
	}

	free(events);
}

1273
static struct perf_evsel *read_event_desc(struct feat_fd *ff)
1274 1275 1276
{
	struct perf_evsel *evsel, *events = NULL;
	u64 *id;
1277
	void *buf = NULL;
1278 1279
	u32 nre, sz, nr, i, j;
	size_t msz;
1280 1281

	/* number of events */
1282
	if (do_read_u32(ff, &nre))
1283 1284
		goto error;

1285
	if (do_read_u32(ff, &sz))
1286 1287
		goto error;

1288
	/* buffer to hold on file attr struct */
1289 1290 1291 1292
	buf = malloc(sz);
	if (!buf)
		goto error;

1293 1294 1295 1296 1297 1298
	/* the last event terminates with evsel->attr.size == 0: */
	events = calloc(nre + 1, sizeof(*events));
	if (!events)
		goto error;

	msz = sizeof(evsel->attr);
1299
	if (sz < msz)
1300 1301
		msz = sz;

1302 1303
	for (i = 0, evsel = events; i < nre; evsel++, i++) {
		evsel->idx = i;
1304

1305 1306 1307 1308
		/*
		 * must read entire on-file attr struct to
		 * sync up with layout.
		 */
1309
		if (__do_read(ff, buf, sz))
1310 1311
			goto error;

1312
		if (ff->ph->needs_swap)
1313 1314
			perf_event__attr_swap(buf);

1315
		memcpy(&evsel->attr, buf, msz);
1316

1317
		if (do_read_u32(ff, &nr))
1318 1319
			goto error;

1320
		if (ff->ph->needs_swap)
1321
			evsel->needs_swap = true;
1322

1323
		evsel->name = do_read_string(ff);
1324 1325
		if (!evsel->name)
			goto error;
1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336

		if (!nr)
			continue;

		id = calloc(nr, sizeof(*id));
		if (!id)
			goto error;
		evsel->ids = nr;
		evsel->id = id;

		for (j = 0 ; j < nr; j++) {
1337
			if (do_read_u64(ff, id))
1338 1339 1340 1341 1342
				goto error;
			id++;
		}
	}
out:
1343
	free(buf);
1344 1345
	return events;
error:
1346
	free_event_desc(events);
1347 1348 1349 1350
	events = NULL;
	goto out;
}

1351
static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1352
				void *priv __maybe_unused)
1353 1354 1355 1356
{
	return fprintf(fp, ", %s = %s", name, val);
}

1357
static void print_event_desc(struct feat_fd *ff, FILE *fp)
1358
{
1359
	struct perf_evsel *evsel, *events;
1360 1361 1362
	u32 j;
	u64 *id;

1363 1364 1365 1366 1367
	if (ff->events)
		events = ff->events;
	else
		events = read_event_desc(ff);

1368 1369 1370 1371 1372 1373 1374
	if (!events) {
		fprintf(fp, "# event desc: not available or unable to read\n");
		return;
	}

	for (evsel = events; evsel->attr.size; evsel++) {
		fprintf(fp, "# event : name = %s, ", evsel->name);
1375

1376
		if (evsel->ids) {
1377
			fprintf(fp, ", id = {");
1378 1379 1380 1381 1382
			for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
				if (j)
					fputc(',', fp);
				fprintf(fp, " %"PRIu64, *id);
			}
1383
			fprintf(fp, " }");
1384
		}
1385

1386
		perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL);
1387

1388 1389
		fputc('\n', fp);
	}
1390 1391

	free_event_desc(events);
1392
	ff->events = NULL;
1393 1394
}

1395
static void print_total_mem(struct feat_fd *ff, FILE *fp)
1396
{
1397
	fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem);
1398 1399
}

1400
static void print_numa_topology(struct feat_fd *ff, FILE *fp)
1401
{
1402 1403
	int i;
	struct numa_node *n;
1404

1405 1406
	for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) {
		n = &ff->ph->env.numa_nodes[i];
1407 1408 1409

		fprintf(fp, "# node%u meminfo  : total = %"PRIu64" kB,"
			    " free = %"PRIu64" kB\n",
1410
			n->node, n->mem_total, n->mem_free);
1411

1412 1413
		fprintf(fp, "# node%u cpu list : ", n->node);
		cpu_map__fprintf(n->map, fp);
1414 1415 1416
	}
}

1417
static void print_cpuid(struct feat_fd *ff, FILE *fp)
1418
{
1419
	fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid);
1420 1421
}

1422
static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp)
1423 1424 1425 1426
{
	fprintf(fp, "# contains samples with branch stack\n");
}

1427
static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp)
1428 1429 1430 1431
{
	fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
}

1432
static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp)
1433 1434 1435 1436
{
	fprintf(fp, "# contains stat data\n");
}

1437
static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused)
1438 1439 1440 1441
{
	int i;

	fprintf(fp, "# CPU cache info:\n");
1442
	for (i = 0; i < ff->ph->env.caches_cnt; i++) {
1443
		fprintf(fp, "#  ");
1444
		cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]);
1445 1446 1447
	}
}

1448
static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
1449 1450
{
	const char *delimiter = "# pmu mappings: ";
1451
	char *str, *tmp;
1452 1453 1454
	u32 pmu_num;
	u32 type;

1455
	pmu_num = ff->ph->env.nr_pmu_mappings;
1456 1457 1458 1459 1460
	if (!pmu_num) {
		fprintf(fp, "# pmu mappings: not available\n");
		return;
	}

1461
	str = ff->ph->env.pmu_mappings;
1462

1463
	while (pmu_num) {
1464 1465 1466 1467 1468 1469
		type = strtoul(str, &tmp, 0);
		if (*tmp != ':')
			goto error;

		str = tmp + 1;
		fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1470

1471
		delimiter = ", ";
1472 1473
		str += strlen(str) + 1;
		pmu_num--;
1474 1475 1476 1477 1478 1479 1480 1481 1482 1483
	}

	fprintf(fp, "\n");

	if (!pmu_num)
		return;
error:
	fprintf(fp, "# pmu mappings: unable to read\n");
}

1484
static void print_group_desc(struct feat_fd *ff, FILE *fp)
1485 1486 1487 1488 1489
{
	struct perf_session *session;
	struct perf_evsel *evsel;
	u32 nr = 0;

1490
	session = container_of(ff->ph, struct perf_session, header);
1491

1492
	evlist__for_each_entry(session->evlist, evsel) {
1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507
		if (perf_evsel__is_group_leader(evsel) &&
		    evsel->nr_members > 1) {
			fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
				perf_evsel__name(evsel));

			nr = evsel->nr_members - 1;
		} else if (nr) {
			fprintf(fp, ",%s", perf_evsel__name(evsel));

			if (--nr == 0)
				fprintf(fp, "}\n");
		}
	}
}

1508 1509 1510 1511 1512 1513
static int __event_process_build_id(struct build_id_event *bev,
				    char *filename,
				    struct perf_session *session)
{
	int err = -1;
	struct machine *machine;
1514
	u16 cpumode;
1515 1516 1517 1518 1519 1520 1521
	struct dso *dso;
	enum dso_kernel_type dso_type;

	machine = perf_session__findnew_machine(session, bev->pid);
	if (!machine)
		goto out;

1522
	cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1523

1524
	switch (cpumode) {
1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538
	case PERF_RECORD_MISC_KERNEL:
		dso_type = DSO_TYPE_KERNEL;
		break;
	case PERF_RECORD_MISC_GUEST_KERNEL:
		dso_type = DSO_TYPE_GUEST_KERNEL;
		break;
	case PERF_RECORD_MISC_USER:
	case PERF_RECORD_MISC_GUEST_USER:
		dso_type = DSO_TYPE_USER;
		break;
	default:
		goto out;
	}

1539
	dso = machine__findnew_dso(machine, filename);
1540
	if (dso != NULL) {
1541
		char sbuild_id[SBUILD_ID_SIZE];
1542 1543 1544

		dso__set_build_id(dso, &bev->build_id);

1545 1546 1547 1548
		if (dso_type != DSO_TYPE_USER) {
			struct kmod_path m = { .name = NULL, };

			if (!kmod_path__parse_name(&m, filename) && m.kmod)
1549
				dso__set_module_info(dso, &m, machine);
1550 1551 1552 1553 1554
			else
				dso->kernel = dso_type;

			free(m.name);
		}
1555 1556 1557 1558 1559

		build_id__sprintf(dso->build_id, sizeof(dso->build_id),
				  sbuild_id);
		pr_debug("build id event received for %s: %s\n",
			 dso->long_name, sbuild_id);
1560
		dso__put(dso);
1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573
	}

	err = 0;
out:
	return err;
}

static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
						 int input, u64 offset, u64 size)
{
	struct perf_session *session = container_of(header, struct perf_session, header);
	struct {
		struct perf_event_header   header;
1574
		u8			   build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
1575 1576 1577 1578 1579 1580 1581 1582 1583
		char			   filename[0];
	} old_bev;
	struct build_id_event bev;
	char filename[PATH_MAX];
	u64 limit = offset + size;

	while (offset < limit) {
		ssize_t len;

1584
		if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1585 1586 1587 1588 1589 1590
			return -1;

		if (header->needs_swap)
			perf_event_header__bswap(&old_bev.header);

		len = old_bev.header.size - sizeof(old_bev);
1591
		if (readn(input, filename, len) != len)
1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625
			return -1;

		bev.header = old_bev.header;

		/*
		 * As the pid is the missing value, we need to fill
		 * it properly. The header.misc value give us nice hint.
		 */
		bev.pid	= HOST_KERNEL_ID;
		if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
		    bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
			bev.pid	= DEFAULT_GUEST_KERNEL_ID;

		memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
		__event_process_build_id(&bev, filename, session);

		offset += bev.header.size;
	}

	return 0;
}

static int perf_header__read_build_ids(struct perf_header *header,
				       int input, u64 offset, u64 size)
{
	struct perf_session *session = container_of(header, struct perf_session, header);
	struct build_id_event bev;
	char filename[PATH_MAX];
	u64 limit = offset + size, orig_offset = offset;
	int err = -1;

	while (offset < limit) {
		ssize_t len;

1626
		if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
1627 1628 1629 1630 1631 1632
			goto out;

		if (header->needs_swap)
			perf_event_header__bswap(&bev.header);

		len = bev.header.size - sizeof(bev);
1633
		if (readn(input, filename, len) != len)
1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662
			goto out;
		/*
		 * The a1645ce1 changeset:
		 *
		 * "perf: 'perf kvm' tool for monitoring guest performance from host"
		 *
		 * Added a field to struct build_id_event that broke the file
		 * format.
		 *
		 * Since the kernel build-id is the first entry, process the
		 * table using the old format if the well known
		 * '[kernel.kallsyms]' string for the kernel build-id has the
		 * first 4 characters chopped off (where the pid_t sits).
		 */
		if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
			if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
				return -1;
			return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
		}

		__event_process_build_id(&bev, filename, session);

		offset += bev.header.size;
	}
	err = 0;
out:
	return err;
}

1663 1664
/* Macro for features that simply need to read and store a string. */
#define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \
1665
static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
1666
{\
1667
	ff->ph->env.__feat_env = do_read_string(ff); \
1668
	return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
1669 1670 1671 1672 1673 1674 1675 1676 1677
}

FEAT_PROCESS_STR_FUN(hostname, hostname);
FEAT_PROCESS_STR_FUN(osrelease, os_release);
FEAT_PROCESS_STR_FUN(version, version);
FEAT_PROCESS_STR_FUN(arch, arch);
FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc);
FEAT_PROCESS_STR_FUN(cpuid, cpuid);

1678
static int process_tracing_data(struct feat_fd *ff, void *data)
1679
{
1680 1681
	ssize_t ret = trace_report(ff->fd, data, false);

1682
	return ret < 0 ? -1 : 0;
1683 1684
}

1685
static int process_build_id(struct feat_fd *ff, void *data __maybe_unused)
1686
{
1687
	if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size))
1688 1689 1690 1691
		pr_debug("Failed to read buildids, continuing...\n");
	return 0;
}

1692
static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused)
1693
{
1694 1695
	int ret;
	u32 nr_cpus_avail, nr_cpus_online;
1696

1697
	ret = do_read_u32(ff, &nr_cpus_avail);
1698 1699
	if (ret)
		return ret;
1700

1701
	ret = do_read_u32(ff, &nr_cpus_online);
1702 1703
	if (ret)
		return ret;
1704 1705
	ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail;
	ff->ph->env.nr_cpus_online = (int)nr_cpus_online;
1706 1707 1708
	return 0;
}

1709
static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused)
1710
{
1711 1712
	u64 total_mem;
	int ret;
1713

1714
	ret = do_read_u64(ff, &total_mem);
1715
	if (ret)
1716
		return -1;
1717
	ff->ph->env.total_mem = (unsigned long long)total_mem;
1718 1719 1720
	return 0;
}

1721 1722 1723 1724 1725
static struct perf_evsel *
perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
{
	struct perf_evsel *evsel;

1726
	evlist__for_each_entry(evlist, evsel) {
1727 1728 1729 1730 1731 1732 1733 1734
		if (evsel->idx == idx)
			return evsel;
	}

	return NULL;
}

static void
1735 1736
perf_evlist__set_event_name(struct perf_evlist *evlist,
			    struct perf_evsel *event)
1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753
{
	struct perf_evsel *evsel;

	if (!event->name)
		return;

	evsel = perf_evlist__find_by_index(evlist, event->idx);
	if (!evsel)
		return;

	if (evsel->name)
		return;

	evsel->name = strdup(event->name);
}

static int
1754
process_event_desc(struct feat_fd *ff, void *data __maybe_unused)
1755
{
1756
	struct perf_session *session;
1757
	struct perf_evsel *evsel, *events = read_event_desc(ff);
1758 1759 1760 1761

	if (!events)
		return 0;

1762
	session = container_of(ff->ph, struct perf_session, header);
1763

1764
	if (session->data->is_pipe) {
1765 1766 1767 1768 1769
		/* Save events for reading later by print_event_desc,
		 * since they can't be read again in pipe mode. */
		ff->events = events;
	}

1770 1771 1772
	for (evsel = events; evsel->attr.size; evsel++)
		perf_evlist__set_event_name(session->evlist, evsel);

1773
	if (!session->data->is_pipe)
1774
		free_event_desc(events);
1775 1776 1777 1778

	return 0;
}

1779
static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused)
1780
{
1781 1782
	char *str, *cmdline = NULL, **argv = NULL;
	u32 nr, i, len = 0;
1783

1784
	if (do_read_u32(ff, &nr))
1785 1786
		return -1;

1787
	ff->ph->env.nr_cmdline = nr;
1788

1789
	cmdline = zalloc(ff->size + nr + 1);
1790 1791 1792 1793 1794 1795
	if (!cmdline)
		return -1;

	argv = zalloc(sizeof(char *) * (nr + 1));
	if (!argv)
		goto error;
1796 1797

	for (i = 0; i < nr; i++) {
1798
		str = do_read_string(ff);
1799 1800 1801
		if (!str)
			goto error;

1802 1803 1804
		argv[i] = cmdline + len;
		memcpy(argv[i], str, strlen(str) + 1);
		len += strlen(str) + 1;
1805 1806
		free(str);
	}
1807 1808
	ff->ph->env.cmdline = cmdline;
	ff->ph->env.cmdline_argv = (const char **) argv;
1809 1810 1811
	return 0;

error:
1812 1813
	free(argv);
	free(cmdline);
1814 1815 1816
	return -1;
}

1817
static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
1818 1819 1820 1821
{
	u32 nr, i;
	char *str;
	struct strbuf sb;
1822
	int cpu_nr = ff->ph->env.nr_cpus_avail;
1823
	u64 size = 0;
1824
	struct perf_header *ph = ff->ph;
1825 1826 1827 1828

	ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
	if (!ph->env.cpu)
		return -1;
1829

1830
	if (do_read_u32(ff, &nr))
1831
		goto free_cpu;
1832 1833

	ph->env.nr_sibling_cores = nr;
1834
	size += sizeof(u32);
1835 1836
	if (strbuf_init(&sb, 128) < 0)
		goto free_cpu;
1837 1838

	for (i = 0; i < nr; i++) {
1839
		str = do_read_string(ff);
1840 1841 1842 1843
		if (!str)
			goto error;

		/* include a NULL character at the end */
1844 1845
		if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
			goto error;
1846
		size += string_size(str);
1847 1848 1849 1850
		free(str);
	}
	ph->env.sibling_cores = strbuf_detach(&sb, NULL);

1851
	if (do_read_u32(ff, &nr))
1852 1853 1854
		return -1;

	ph->env.nr_sibling_threads = nr;
1855
	size += sizeof(u32);
1856 1857

	for (i = 0; i < nr; i++) {
1858
		str = do_read_string(ff);
1859 1860 1861 1862
		if (!str)
			goto error;

		/* include a NULL character at the end */
1863 1864
		if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
			goto error;
1865
		size += string_size(str);
1866 1867 1868
		free(str);
	}
	ph->env.sibling_threads = strbuf_detach(&sb, NULL);
1869 1870 1871 1872 1873

	/*
	 * The header may be from old perf,
	 * which doesn't include core id and socket id information.
	 */
1874
	if (ff->size <= size) {
1875 1876 1877 1878 1879
		zfree(&ph->env.cpu);
		return 0;
	}

	for (i = 0; i < (u32)cpu_nr; i++) {
1880
		if (do_read_u32(ff, &nr))
1881 1882 1883 1884
			goto free_cpu;

		ph->env.cpu[i].core_id = nr;

1885
		if (do_read_u32(ff, &nr))
1886 1887
			goto free_cpu;

1888
		if (nr != (u32)-1 && nr > (u32)cpu_nr) {
1889 1890 1891 1892 1893 1894 1895 1896
			pr_debug("socket_id number is too big."
				 "You may need to upgrade the perf tool.\n");
			goto free_cpu;
		}

		ph->env.cpu[i].socket_id = nr;
	}

1897 1898 1899 1900
	return 0;

error:
	strbuf_release(&sb);
1901 1902
free_cpu:
	zfree(&ph->env.cpu);
1903 1904 1905
	return -1;
}

1906
static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused)
1907
{
1908 1909
	struct numa_node *nodes, *n;
	u32 nr, i;
1910 1911 1912
	char *str;

	/* nr nodes */
1913
	if (do_read_u32(ff, &nr))
1914
		return -1;
1915

1916 1917 1918
	nodes = zalloc(sizeof(*nodes) * nr);
	if (!nodes)
		return -ENOMEM;
1919 1920

	for (i = 0; i < nr; i++) {
1921 1922
		n = &nodes[i];

1923
		/* node number */
1924
		if (do_read_u32(ff, &n->node))
1925 1926
			goto error;

1927
		if (do_read_u64(ff, &n->mem_total))
1928 1929
			goto error;

1930
		if (do_read_u64(ff, &n->mem_free))
1931 1932
			goto error;

1933
		str = do_read_string(ff);
1934 1935 1936
		if (!str)
			goto error;

1937 1938
		n->map = cpu_map__new(str);
		if (!n->map)
1939
			goto error;
1940

1941 1942
		free(str);
	}
1943 1944
	ff->ph->env.nr_numa_nodes = nr;
	ff->ph->env.numa_nodes = nodes;
1945 1946 1947
	return 0;

error:
1948
	free(nodes);
1949 1950 1951
	return -1;
}

1952
static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused)
1953 1954 1955 1956 1957 1958
{
	char *name;
	u32 pmu_num;
	u32 type;
	struct strbuf sb;

1959
	if (do_read_u32(ff, &pmu_num))
1960 1961 1962 1963 1964 1965 1966
		return -1;

	if (!pmu_num) {
		pr_debug("pmu mappings not available\n");
		return 0;
	}

1967
	ff->ph->env.nr_pmu_mappings = pmu_num;
1968 1969
	if (strbuf_init(&sb, 128) < 0)
		return -1;
1970 1971

	while (pmu_num) {
1972
		if (do_read_u32(ff, &type))
1973 1974
			goto error;

1975
		name = do_read_string(ff);
1976 1977 1978
		if (!name)
			goto error;

1979 1980
		if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
			goto error;
1981
		/* include a NULL character at the end */
1982 1983
		if (strbuf_add(&sb, "", 1) < 0)
			goto error;
1984

1985
		if (!strcmp(name, "msr"))
1986
			ff->ph->env.msr_pmu_type = type;
1987

1988 1989 1990
		free(name);
		pmu_num--;
	}
1991
	ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
1992 1993 1994 1995 1996 1997 1998
	return 0;

error:
	strbuf_release(&sb);
	return -1;
}

1999
static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused)
2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010
{
	size_t ret = -1;
	u32 i, nr, nr_groups;
	struct perf_session *session;
	struct perf_evsel *evsel, *leader = NULL;
	struct group_desc {
		char *name;
		u32 leader_idx;
		u32 nr_members;
	} *desc;

2011
	if (do_read_u32(ff, &nr_groups))
2012 2013
		return -1;

2014
	ff->ph->env.nr_groups = nr_groups;
2015 2016 2017 2018 2019 2020 2021 2022 2023 2024
	if (!nr_groups) {
		pr_debug("group desc not available\n");
		return 0;
	}

	desc = calloc(nr_groups, sizeof(*desc));
	if (!desc)
		return -1;

	for (i = 0; i < nr_groups; i++) {
2025
		desc[i].name = do_read_string(ff);
2026 2027 2028
		if (!desc[i].name)
			goto out_free;

2029
		if (do_read_u32(ff, &desc[i].leader_idx))
2030 2031
			goto out_free;

2032
		if (do_read_u32(ff, &desc[i].nr_members))
2033 2034 2035 2036 2037 2038
			goto out_free;
	}

	/*
	 * Rebuild group relationship based on the group_desc
	 */
2039
	session = container_of(ff->ph, struct perf_session, header);
2040 2041 2042
	session->evlist->nr_groups = nr_groups;

	i = nr = 0;
2043
	evlist__for_each_entry(session->evlist, evsel) {
2044 2045 2046
		if (evsel->idx == (int) desc[i].leader_idx) {
			evsel->leader = evsel;
			/* {anon_group} is a dummy name */
N
Namhyung Kim 已提交
2047
			if (strcmp(desc[i].name, "{anon_group}")) {
2048
				evsel->group_name = desc[i].name;
N
Namhyung Kim 已提交
2049 2050
				desc[i].name = NULL;
			}
2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075
			evsel->nr_members = desc[i].nr_members;

			if (i >= nr_groups || nr > 0) {
				pr_debug("invalid group desc\n");
				goto out_free;
			}

			leader = evsel;
			nr = evsel->nr_members - 1;
			i++;
		} else if (nr) {
			/* This is a group member */
			evsel->leader = leader;

			nr--;
		}
	}

	if (i != nr_groups || nr != 0) {
		pr_debug("invalid group desc\n");
		goto out_free;
	}

	ret = 0;
out_free:
2076
	for (i = 0; i < nr_groups; i++)
2077
		zfree(&desc[i].name);
2078 2079 2080 2081 2082
	free(desc);

	return ret;
}

2083
static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused)
2084 2085 2086 2087
{
	struct perf_session *session;
	int err;

2088
	session = container_of(ff->ph, struct perf_session, header);
2089

2090
	err = auxtrace_index__process(ff->fd, ff->size, session,
2091
				      ff->ph->needs_swap);
2092 2093 2094 2095 2096
	if (err < 0)
		pr_err("Failed to process auxtrace index\n");
	return err;
}

2097
static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
2098 2099 2100 2101
{
	struct cpu_cache_level *caches;
	u32 cnt, i, version;

2102
	if (do_read_u32(ff, &version))
2103 2104 2105 2106 2107
		return -1;

	if (version != 1)
		return -1;

2108
	if (do_read_u32(ff, &cnt))
2109 2110 2111 2112 2113 2114 2115 2116 2117 2118
		return -1;

	caches = zalloc(sizeof(*caches) * cnt);
	if (!caches)
		return -1;

	for (i = 0; i < cnt; i++) {
		struct cpu_cache_level c;

		#define _R(v)						\
2119
			if (do_read_u32(ff, &c.v))\
2120 2121 2122 2123 2124 2125 2126 2127
				goto out_free_caches;			\

		_R(level)
		_R(line_size)
		_R(sets)
		_R(ways)
		#undef _R

2128
		#define _R(v)					\
2129
			c.v = do_read_string(ff);		\
2130
			if (!c.v)				\
2131 2132 2133 2134 2135 2136 2137 2138 2139 2140
				goto out_free_caches;

		_R(type)
		_R(size)
		_R(map)
		#undef _R

		caches[i] = c;
	}

2141 2142
	ff->ph->env.caches = caches;
	ff->ph->env.caches_cnt = cnt;
2143 2144 2145 2146 2147 2148
	return 0;
out_free_caches:
	free(caches);
	return -1;
}

2149
struct feature_ops {
2150
	int (*write)(struct feat_fd *ff, struct perf_evlist *evlist);
2151
	void (*print)(struct feat_fd *ff, FILE *fp);
2152
	int (*process)(struct feat_fd *ff, void *data);
2153 2154
	const char *name;
	bool full_only;
2155
	bool synthesize;
2156 2157
};

2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175
#define FEAT_OPR(n, func, __full_only) \
	[HEADER_##n] = {					\
		.name	    = __stringify(n),			\
		.write	    = write_##func,			\
		.print	    = print_##func,			\
		.full_only  = __full_only,			\
		.process    = process_##func,			\
		.synthesize = true				\
	}

#define FEAT_OPN(n, func, __full_only) \
	[HEADER_##n] = {					\
		.name	    = __stringify(n),			\
		.write	    = write_##func,			\
		.print	    = print_##func,			\
		.full_only  = __full_only,			\
		.process    = process_##func			\
	}
2176 2177

/* feature_ops not implemented: */
2178 2179
#define print_tracing_data	NULL
#define print_build_id		NULL
2180

2181 2182 2183 2184
#define process_branch_stack	NULL
#define process_stat		NULL


2185
static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205
	FEAT_OPN(TRACING_DATA,	tracing_data,	false),
	FEAT_OPN(BUILD_ID,	build_id,	false),
	FEAT_OPR(HOSTNAME,	hostname,	false),
	FEAT_OPR(OSRELEASE,	osrelease,	false),
	FEAT_OPR(VERSION,	version,	false),
	FEAT_OPR(ARCH,		arch,		false),
	FEAT_OPR(NRCPUS,	nrcpus,		false),
	FEAT_OPR(CPUDESC,	cpudesc,	false),
	FEAT_OPR(CPUID,		cpuid,		false),
	FEAT_OPR(TOTAL_MEM,	total_mem,	false),
	FEAT_OPR(EVENT_DESC,	event_desc,	false),
	FEAT_OPR(CMDLINE,	cmdline,	false),
	FEAT_OPR(CPU_TOPOLOGY,	cpu_topology,	true),
	FEAT_OPR(NUMA_TOPOLOGY,	numa_topology,	true),
	FEAT_OPN(BRANCH_STACK,	branch_stack,	false),
	FEAT_OPR(PMU_MAPPINGS,	pmu_mappings,	false),
	FEAT_OPN(GROUP_DESC,	group_desc,	false),
	FEAT_OPN(AUXTRACE,	auxtrace,	false),
	FEAT_OPN(STAT,		stat,		false),
	FEAT_OPN(CACHE,		cache,		true),
2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217
};

struct header_print_data {
	FILE *fp;
	bool full; /* extended list of headers */
};

static int perf_file_section__fprintf_info(struct perf_file_section *section,
					   struct perf_header *ph,
					   int feat, int fd, void *data)
{
	struct header_print_data *hd = data;
2218
	struct feat_fd ff;
2219 2220 2221 2222 2223 2224

	if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
		pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
				"%d, continuing...\n", section->offset, feat);
		return 0;
	}
2225
	if (feat >= HEADER_LAST_FEATURE) {
2226
		pr_warning("unknown feature %d\n", feat);
2227
		return 0;
2228 2229 2230 2231
	}
	if (!feat_ops[feat].print)
		return 0;

2232 2233 2234 2235 2236
	ff = (struct  feat_fd) {
		.fd = fd,
		.ph = ph,
	};

2237
	if (!feat_ops[feat].full_only || hd->full)
2238
		feat_ops[feat].print(&ff, hd->fp);
2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249
	else
		fprintf(hd->fp, "# %s info available, use -I to display\n",
			feat_ops[feat].name);

	return 0;
}

int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
{
	struct header_print_data hd;
	struct perf_header *header = &session->header;
2250
	int fd = perf_data__fd(session->data);
2251
	struct stat st;
J
Jiri Olsa 已提交
2252
	int ret, bit;
2253

2254 2255 2256
	hd.fp = fp;
	hd.full = full;

2257 2258 2259 2260 2261 2262
	ret = fstat(fd, &st);
	if (ret == -1)
		return -1;

	fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));

2263 2264
	perf_header__process_sections(header, fd, &hd,
				      perf_file_section__fprintf_info);
J
Jiri Olsa 已提交
2265

2266
	if (session->data->is_pipe)
2267 2268
		return 0;

J
Jiri Olsa 已提交
2269 2270 2271 2272 2273 2274 2275
	fprintf(fp, "# missing features: ");
	for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
		if (bit)
			fprintf(fp, "%s ", feat_ops[bit].name);
	}

	fprintf(fp, "\n");
2276 2277 2278
	return 0;
}

2279
static int do_write_feat(struct feat_fd *ff, int type,
2280 2281 2282 2283 2284 2285
			 struct perf_file_section **p,
			 struct perf_evlist *evlist)
{
	int err;
	int ret = 0;

2286
	if (perf_header__has_feat(ff->ph, type)) {
2287 2288
		if (!feat_ops[type].write)
			return -1;
2289

2290 2291 2292
		if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
			return -1;

2293
		(*p)->offset = lseek(ff->fd, 0, SEEK_CUR);
2294

2295
		err = feat_ops[type].write(ff, evlist);
2296
		if (err < 0) {
2297
			pr_debug("failed to write feature %s\n", feat_ops[type].name);
2298 2299

			/* undo anything written */
2300
			lseek(ff->fd, (*p)->offset, SEEK_SET);
2301 2302 2303

			return -1;
		}
2304
		(*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset;
2305 2306 2307 2308 2309
		(*p)++;
	}
	return ret;
}

2310
static int perf_header__adds_write(struct perf_header *header,
2311
				   struct perf_evlist *evlist, int fd)
2312
{
2313
	int nr_sections;
2314
	struct feat_fd ff;
2315
	struct perf_file_section *feat_sec, *p;
2316 2317
	int sec_size;
	u64 sec_start;
2318
	int feat;
2319
	int err;
2320

2321 2322 2323 2324 2325
	ff = (struct feat_fd){
		.fd  = fd,
		.ph = header,
	};

2326
	nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2327
	if (!nr_sections)
2328
		return 0;
2329

2330
	feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
2331 2332
	if (feat_sec == NULL)
		return -ENOMEM;
2333 2334 2335

	sec_size = sizeof(*feat_sec) * nr_sections;

2336
	sec_start = header->feat_offset;
2337
	lseek(fd, sec_start + sec_size, SEEK_SET);
2338

2339
	for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2340
		if (do_write_feat(&ff, feat, &p, evlist))
2341 2342
			perf_header__clear_feat(header, feat);
	}
2343

2344
	lseek(fd, sec_start, SEEK_SET);
2345 2346 2347 2348
	/*
	 * may write more than needed due to dropped feature, but
	 * this is okay, reader will skip the mising entries
	 */
2349
	err = do_write(&ff, feat_sec, sec_size);
2350 2351
	if (err < 0)
		pr_debug("failed to write feature section\n");
2352
	free(feat_sec);
2353
	return err;
2354
}
2355

2356 2357 2358
int perf_header__write_pipe(int fd)
{
	struct perf_pipe_file_header f_header;
2359
	struct feat_fd ff;
2360 2361
	int err;

2362 2363
	ff = (struct feat_fd){ .fd = fd };

2364 2365 2366 2367 2368
	f_header = (struct perf_pipe_file_header){
		.magic	   = PERF_MAGIC,
		.size	   = sizeof(f_header),
	};

2369
	err = do_write(&ff, &f_header, sizeof(f_header));
2370 2371 2372 2373 2374 2375 2376 2377
	if (err < 0) {
		pr_debug("failed to write perf pipe header\n");
		return err;
	}

	return 0;
}

2378 2379 2380
int perf_session__write_header(struct perf_session *session,
			       struct perf_evlist *evlist,
			       int fd, bool at_exit)
2381 2382 2383
{
	struct perf_file_header f_header;
	struct perf_file_attr   f_attr;
2384
	struct perf_header *header = &session->header;
2385
	struct perf_evsel *evsel;
2386
	struct feat_fd ff;
2387
	u64 attr_offset;
2388
	int err;
2389

2390
	ff = (struct feat_fd){ .fd = fd};
2391 2392
	lseek(fd, sizeof(f_header), SEEK_SET);

2393
	evlist__for_each_entry(session->evlist, evsel) {
2394
		evsel->id_offset = lseek(fd, 0, SEEK_CUR);
2395
		err = do_write(&ff, evsel->id, evsel->ids * sizeof(u64));
2396 2397 2398 2399
		if (err < 0) {
			pr_debug("failed to write perf header\n");
			return err;
		}
2400 2401
	}

2402
	attr_offset = lseek(ff.fd, 0, SEEK_CUR);
2403

2404
	evlist__for_each_entry(evlist, evsel) {
2405
		f_attr = (struct perf_file_attr){
2406
			.attr = evsel->attr,
2407
			.ids  = {
2408 2409
				.offset = evsel->id_offset,
				.size   = evsel->ids * sizeof(u64),
2410 2411
			}
		};
2412
		err = do_write(&ff, &f_attr, sizeof(f_attr));
2413 2414 2415 2416
		if (err < 0) {
			pr_debug("failed to write perf header attribute\n");
			return err;
		}
2417 2418
	}

2419 2420
	if (!header->data_offset)
		header->data_offset = lseek(fd, 0, SEEK_CUR);
2421
	header->feat_offset = header->data_offset + header->data_size;
2422

2423
	if (at_exit) {
2424
		err = perf_header__adds_write(header, evlist, fd);
2425 2426 2427
		if (err < 0)
			return err;
	}
2428

2429 2430 2431 2432 2433
	f_header = (struct perf_file_header){
		.magic	   = PERF_MAGIC,
		.size	   = sizeof(f_header),
		.attr_size = sizeof(f_attr),
		.attrs = {
2434
			.offset = attr_offset,
2435
			.size   = evlist->nr_entries * sizeof(f_attr),
2436 2437
		},
		.data = {
2438 2439
			.offset = header->data_offset,
			.size	= header->data_size,
2440
		},
2441
		/* event_types is ignored, store zeros */
2442 2443
	};

2444
	memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
2445

2446
	lseek(fd, 0, SEEK_SET);
2447
	err = do_write(&ff, &f_header, sizeof(f_header));
2448 2449 2450 2451
	if (err < 0) {
		pr_debug("failed to write perf header\n");
		return err;
	}
2452
	lseek(fd, header->data_offset + header->data_size, SEEK_SET);
2453

2454
	return 0;
2455 2456
}

2457
static int perf_header__getbuffer64(struct perf_header *header,
2458 2459
				    int fd, void *buf, size_t size)
{
2460
	if (readn(fd, buf, size) <= 0)
2461 2462
		return -1;

2463
	if (header->needs_swap)
2464 2465 2466 2467 2468
		mem_bswap_64(buf, size);

	return 0;
}

2469
int perf_header__process_sections(struct perf_header *header, int fd,
2470
				  void *data,
2471
				  int (*process)(struct perf_file_section *section,
2472 2473
						 struct perf_header *ph,
						 int feat, int fd, void *data))
2474
{
2475
	struct perf_file_section *feat_sec, *sec;
2476 2477
	int nr_sections;
	int sec_size;
2478 2479
	int feat;
	int err;
2480

2481
	nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2482
	if (!nr_sections)
2483
		return 0;
2484

2485
	feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
2486
	if (!feat_sec)
2487
		return -1;
2488 2489 2490

	sec_size = sizeof(*feat_sec) * nr_sections;

2491
	lseek(fd, header->feat_offset, SEEK_SET);
2492

2493 2494
	err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
	if (err < 0)
2495
		goto out_free;
2496

2497 2498 2499 2500
	for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
		err = process(sec++, header, feat, fd, data);
		if (err < 0)
			goto out_free;
2501
	}
2502
	err = 0;
2503
out_free:
2504 2505
	free(feat_sec);
	return err;
2506
}
2507

2508 2509 2510
static const int attr_file_abi_sizes[] = {
	[0] = PERF_ATTR_SIZE_VER0,
	[1] = PERF_ATTR_SIZE_VER1,
2511
	[2] = PERF_ATTR_SIZE_VER2,
2512
	[3] = PERF_ATTR_SIZE_VER3,
2513
	[4] = PERF_ATTR_SIZE_VER4,
2514 2515 2516 2517 2518 2519 2520 2521 2522 2523
	0,
};

/*
 * In the legacy file format, the magic number is not used to encode endianness.
 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
 * on ABI revisions, we need to try all combinations for all endianness to
 * detect the endianness.
 */
static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
2524
{
2525 2526
	uint64_t ref_size, attr_size;
	int i;
2527

2528 2529 2530 2531 2532 2533 2534
	for (i = 0 ; attr_file_abi_sizes[i]; i++) {
		ref_size = attr_file_abi_sizes[i]
			 + sizeof(struct perf_file_section);
		if (hdr_sz != ref_size) {
			attr_size = bswap_64(hdr_sz);
			if (attr_size != ref_size)
				continue;
2535

2536 2537 2538 2539 2540 2541 2542 2543 2544 2545
			ph->needs_swap = true;
		}
		pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
			 i,
			 ph->needs_swap);
		return 0;
	}
	/* could not determine endianness */
	return -1;
}
2546

2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570
#define PERF_PIPE_HDR_VER0	16

static const size_t attr_pipe_abi_sizes[] = {
	[0] = PERF_PIPE_HDR_VER0,
	0,
};

/*
 * In the legacy pipe format, there is an implicit assumption that endiannesss
 * between host recording the samples, and host parsing the samples is the
 * same. This is not always the case given that the pipe output may always be
 * redirected into a file and analyzed on a different machine with possibly a
 * different endianness and perf_event ABI revsions in the perf tool itself.
 */
static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
{
	u64 attr_size;
	int i;

	for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
		if (hdr_sz != attr_pipe_abi_sizes[i]) {
			attr_size = bswap_64(hdr_sz);
			if (attr_size != hdr_sz)
				continue;
2571 2572 2573

			ph->needs_swap = true;
		}
2574
		pr_debug("Pipe ABI%d perf.data file detected\n", i);
2575 2576
		return 0;
	}
2577 2578 2579
	return -1;
}

F
Feng Tang 已提交
2580 2581 2582 2583 2584 2585 2586 2587 2588 2589
bool is_perf_magic(u64 magic)
{
	if (!memcmp(&magic, __perf_magic1, sizeof(magic))
		|| magic == __perf_magic2
		|| magic == __perf_magic2_sw)
		return true;

	return false;
}

2590 2591 2592 2593 2594 2595 2596 2597
static int check_magic_endian(u64 magic, uint64_t hdr_sz,
			      bool is_pipe, struct perf_header *ph)
{
	int ret;

	/* check for legacy format */
	ret = memcmp(&magic, __perf_magic1, sizeof(magic));
	if (ret == 0) {
2598
		ph->version = PERF_HEADER_VERSION_1;
2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609
		pr_debug("legacy perf.data format\n");
		if (is_pipe)
			return try_all_pipe_abis(hdr_sz, ph);

		return try_all_file_abis(hdr_sz, ph);
	}
	/*
	 * the new magic number serves two purposes:
	 * - unique number to identify actual perf.data files
	 * - encode endianness of file
	 */
2610
	ph->version = PERF_HEADER_VERSION_2;
2611

2612 2613
	/* check magic number with one endianness */
	if (magic == __perf_magic2)
2614 2615
		return 0;

2616 2617
	/* check magic number with opposite endianness */
	if (magic != __perf_magic2_sw)
2618 2619 2620 2621 2622 2623 2624
		return -1;

	ph->needs_swap = true;

	return 0;
}

2625
int perf_file_header__read(struct perf_file_header *header,
2626 2627
			   struct perf_header *ph, int fd)
{
2628
	ssize_t ret;
2629

2630 2631
	lseek(fd, 0, SEEK_SET);

2632 2633
	ret = readn(fd, header, sizeof(*header));
	if (ret <= 0)
2634 2635
		return -1;

2636 2637 2638
	if (check_magic_endian(header->magic,
			       header->attr_size, false, ph) < 0) {
		pr_debug("magic/endian check failed\n");
2639
		return -1;
2640
	}
2641

2642
	if (ph->needs_swap) {
2643
		mem_bswap_64(header, offsetof(struct perf_file_header,
2644
			     adds_features));
2645 2646
	}

2647
	if (header->size != sizeof(*header)) {
2648
		/* Support the previous format */
2649 2650
		if (header->size == offsetof(typeof(*header), adds_features))
			bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2651 2652
		else
			return -1;
2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668
	} else if (ph->needs_swap) {
		/*
		 * feature bitmap is declared as an array of unsigned longs --
		 * not good since its size can differ between the host that
		 * generated the data file and the host analyzing the file.
		 *
		 * We need to handle endianness, but we don't know the size of
		 * the unsigned long where the file was generated. Take a best
		 * guess at determining it: try 64-bit swap first (ie., file
		 * created on a 64-bit host), and check if the hostname feature
		 * bit is set (this feature bit is forced on as of fbe96f2).
		 * If the bit is not, undo the 64-bit swap and try a 32-bit
		 * swap. If the hostname bit is still not set (e.g., older data
		 * file), punt and fallback to the original behavior --
		 * clearing all feature bits and setting buildid.
		 */
2669 2670
		mem_bswap_64(&header->adds_features,
			    BITS_TO_U64(HEADER_FEAT_BITS));
2671 2672

		if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2673 2674 2675 2676 2677 2678 2679
			/* unswap as u64 */
			mem_bswap_64(&header->adds_features,
				    BITS_TO_U64(HEADER_FEAT_BITS));

			/* unswap as u32 */
			mem_bswap_32(&header->adds_features,
				    BITS_TO_U32(HEADER_FEAT_BITS));
2680 2681 2682 2683 2684 2685
		}

		if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
			bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
			set_bit(HEADER_BUILD_ID, header->adds_features);
		}
2686
	}
2687

2688
	memcpy(&ph->adds_features, &header->adds_features,
2689
	       sizeof(ph->adds_features));
2690

2691 2692
	ph->data_offset  = header->data.offset;
	ph->data_size	 = header->data.size;
2693
	ph->feat_offset  = header->data.offset + header->data.size;
2694 2695 2696
	return 0;
}

2697
static int perf_file_section__process(struct perf_file_section *section,
2698
				      struct perf_header *ph,
2699
				      int feat, int fd, void *data)
2700
{
2701
	struct feat_fd fdd = {
2702 2703
		.fd	= fd,
		.ph	= ph,
2704 2705
		.size	= section->size,
		.offset	= section->offset,
2706 2707
	};

2708
	if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2709
		pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2710
			  "%d, continuing...\n", section->offset, feat);
2711 2712 2713
		return 0;
	}

2714 2715 2716 2717 2718
	if (feat >= HEADER_LAST_FEATURE) {
		pr_debug("unknown feature %d, continuing...\n", feat);
		return 0;
	}

2719 2720
	if (!feat_ops[feat].process)
		return 0;
2721

2722
	return feat_ops[feat].process(&fdd, data);
2723
}
2724

2725
static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
T
Tom Zanussi 已提交
2726 2727
				       struct perf_header *ph, int fd,
				       bool repipe)
2728
{
2729 2730 2731 2732
	struct feat_fd ff = {
		.fd = STDOUT_FILENO,
		.ph = ph,
	};
2733
	ssize_t ret;
2734 2735 2736 2737 2738

	ret = readn(fd, header, sizeof(*header));
	if (ret <= 0)
		return -1;

2739 2740
	if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
		pr_debug("endian/magic failed\n");
2741
		return -1;
2742 2743 2744 2745
	}

	if (ph->needs_swap)
		header->size = bswap_64(header->size);
2746

2747
	if (repipe && do_write(&ff, header, sizeof(*header)) < 0)
T
Tom Zanussi 已提交
2748 2749
		return -1;

2750 2751 2752
	return 0;
}

2753
static int perf_header__read_pipe(struct perf_session *session)
2754
{
2755
	struct perf_header *header = &session->header;
2756 2757
	struct perf_pipe_file_header f_header;

2758
	if (perf_file_header__read_pipe(&f_header, header,
2759
					perf_data__fd(session->data),
T
Tom Zanussi 已提交
2760
					session->repipe) < 0) {
2761 2762 2763 2764 2765 2766 2767
		pr_debug("incompatible file format\n");
		return -EINVAL;
	}

	return 0;
}

2768 2769 2770 2771 2772 2773
static int read_attr(int fd, struct perf_header *ph,
		     struct perf_file_attr *f_attr)
{
	struct perf_event_attr *attr = &f_attr->attr;
	size_t sz, left;
	size_t our_sz = sizeof(f_attr->attr);
2774
	ssize_t ret;
2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787

	memset(f_attr, 0, sizeof(*f_attr));

	/* read minimal guaranteed structure */
	ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
	if (ret <= 0) {
		pr_debug("cannot read %d bytes of header attr\n",
			 PERF_ATTR_SIZE_VER0);
		return -1;
	}

	/* on file perf_event_attr size */
	sz = attr->size;
2788

2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813
	if (ph->needs_swap)
		sz = bswap_32(sz);

	if (sz == 0) {
		/* assume ABI0 */
		sz =  PERF_ATTR_SIZE_VER0;
	} else if (sz > our_sz) {
		pr_debug("file uses a more recent and unsupported ABI"
			 " (%zu bytes extra)\n", sz - our_sz);
		return -1;
	}
	/* what we have not yet read and that we know about */
	left = sz - PERF_ATTR_SIZE_VER0;
	if (left) {
		void *ptr = attr;
		ptr += PERF_ATTR_SIZE_VER0;

		ret = readn(fd, ptr, left);
	}
	/* read perf_file_section, ids are read in caller */
	ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));

	return ret <= 0 ? -1 : 0;
}

2814 2815
static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
						struct pevent *pevent)
2816
{
2817
	struct event_format *event;
2818 2819
	char bf[128];

2820 2821 2822 2823
	/* already prepared */
	if (evsel->tp_format)
		return 0;

2824 2825 2826 2827 2828
	if (pevent == NULL) {
		pr_debug("broken or missing trace data\n");
		return -1;
	}

2829
	event = pevent_find_event(pevent, evsel->attr.config);
2830 2831
	if (event == NULL) {
		pr_debug("cannot find event format for %d\n", (int)evsel->attr.config);
2832
		return -1;
2833
	}
2834

2835 2836 2837 2838 2839 2840
	if (!evsel->name) {
		snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
		evsel->name = strdup(bf);
		if (evsel->name == NULL)
			return -1;
	}
2841

2842
	evsel->tp_format = event;
2843 2844 2845
	return 0;
}

2846 2847
static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
						  struct pevent *pevent)
2848 2849 2850
{
	struct perf_evsel *pos;

2851
	evlist__for_each_entry(evlist, pos) {
2852 2853
		if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
		    perf_evsel__prepare_tracepoint_event(pos, pevent))
2854 2855 2856 2857 2858 2859
			return -1;
	}

	return 0;
}

2860
int perf_session__read_header(struct perf_session *session)
2861
{
2862
	struct perf_data *data = session->data;
2863
	struct perf_header *header = &session->header;
2864
	struct perf_file_header	f_header;
2865 2866 2867
	struct perf_file_attr	f_attr;
	u64			f_id;
	int nr_attrs, nr_ids, i, j;
2868
	int fd = perf_data__fd(data);
2869

2870
	session->evlist = perf_evlist__new();
2871 2872 2873
	if (session->evlist == NULL)
		return -ENOMEM;

2874
	session->evlist->env = &header->env;
2875
	session->machines.host.env = &header->env;
2876
	if (perf_data__is_pipe(data))
2877
		return perf_header__read_pipe(session);
2878

2879
	if (perf_file_header__read(&f_header, header, fd) < 0)
2880
		return -EINVAL;
2881

2882 2883 2884 2885 2886 2887 2888 2889 2890
	/*
	 * Sanity check that perf.data was written cleanly; data size is
	 * initialized to 0 and updated only if the on_exit function is run.
	 * If data size is still 0 then the file contains only partial
	 * information.  Just warn user and process it as much as it can.
	 */
	if (f_header.data.size == 0) {
		pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
			   "Was the 'perf record' command properly terminated?\n",
J
Jiri Olsa 已提交
2891
			   data->file.path);
2892 2893
	}

2894
	nr_attrs = f_header.attrs.size / f_header.attr_size;
2895 2896 2897
	lseek(fd, f_header.attrs.offset, SEEK_SET);

	for (i = 0; i < nr_attrs; i++) {
2898
		struct perf_evsel *evsel;
2899
		off_t tmp;
2900

2901
		if (read_attr(fd, header, &f_attr) < 0)
2902
			goto out_errno;
2903

2904 2905 2906
		if (header->needs_swap) {
			f_attr.ids.size   = bswap_64(f_attr.ids.size);
			f_attr.ids.offset = bswap_64(f_attr.ids.offset);
2907
			perf_event__attr_swap(&f_attr.attr);
2908
		}
2909

2910
		tmp = lseek(fd, 0, SEEK_CUR);
2911
		evsel = perf_evsel__new(&f_attr.attr);
2912

2913 2914
		if (evsel == NULL)
			goto out_delete_evlist;
2915 2916

		evsel->needs_swap = header->needs_swap;
2917 2918 2919 2920 2921
		/*
		 * Do it before so that if perf_evsel__alloc_id fails, this
		 * entry gets purged too at perf_evlist__delete().
		 */
		perf_evlist__add(session->evlist, evsel);
2922 2923

		nr_ids = f_attr.ids.size / sizeof(u64);
2924 2925 2926 2927 2928 2929 2930 2931
		/*
		 * We don't have the cpu and thread maps on the header, so
		 * for allocating the perf_sample_id table we fake 1 cpu and
		 * hattr->ids threads.
		 */
		if (perf_evsel__alloc_id(evsel, 1, nr_ids))
			goto out_delete_evlist;

2932 2933 2934
		lseek(fd, f_attr.ids.offset, SEEK_SET);

		for (j = 0; j < nr_ids; j++) {
2935
			if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
2936
				goto out_errno;
2937

2938
			perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
2939
		}
2940

2941 2942 2943
		lseek(fd, tmp, SEEK_SET);
	}

2944 2945
	symbol_conf.nr_events = nr_attrs;

J
Jiri Olsa 已提交
2946
	perf_header__process_sections(header, fd, &session->tevent,
2947
				      perf_file_section__process);
2948

2949
	if (perf_evlist__prepare_tracepoint_events(session->evlist,
J
Jiri Olsa 已提交
2950
						   session->tevent.pevent))
2951 2952
		goto out_delete_evlist;

2953
	return 0;
2954 2955
out_errno:
	return -errno;
2956 2957 2958 2959 2960

out_delete_evlist:
	perf_evlist__delete(session->evlist);
	session->evlist = NULL;
	return -ENOMEM;
2961
}
2962

2963
int perf_event__synthesize_attr(struct perf_tool *tool,
2964
				struct perf_event_attr *attr, u32 ids, u64 *id,
2965
				perf_event__handler_t process)
2966
{
2967
	union perf_event *ev;
2968 2969 2970 2971
	size_t size;
	int err;

	size = sizeof(struct perf_event_attr);
2972
	size = PERF_ALIGN(size, sizeof(u64));
2973 2974 2975 2976 2977
	size += sizeof(struct perf_event_header);
	size += ids * sizeof(u64);

	ev = malloc(size);

2978 2979 2980
	if (ev == NULL)
		return -ENOMEM;

2981 2982 2983 2984
	ev->attr.attr = *attr;
	memcpy(ev->attr.id, id, ids * sizeof(u64));

	ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2985
	ev->attr.header.size = (u16)size;
2986

2987 2988 2989 2990
	if (ev->attr.header.size == size)
		err = process(tool, ev, NULL, NULL);
	else
		err = -E2BIG;
2991 2992 2993 2994 2995 2996

	free(ev);

	return err;
}

2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093
int perf_event__synthesize_features(struct perf_tool *tool,
				    struct perf_session *session,
				    struct perf_evlist *evlist,
				    perf_event__handler_t process)
{
	struct perf_header *header = &session->header;
	struct feat_fd ff;
	struct feature_event *fe;
	size_t sz, sz_hdr;
	int feat, ret;

	sz_hdr = sizeof(fe->header);
	sz = sizeof(union perf_event);
	/* get a nice alignment */
	sz = PERF_ALIGN(sz, page_size);

	memset(&ff, 0, sizeof(ff));

	ff.buf = malloc(sz);
	if (!ff.buf)
		return -ENOMEM;

	ff.size = sz - sz_hdr;

	for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
		if (!feat_ops[feat].synthesize) {
			pr_debug("No record header feature for header :%d\n", feat);
			continue;
		}

		ff.offset = sizeof(*fe);

		ret = feat_ops[feat].write(&ff, evlist);
		if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
			pr_debug("Error writing feature\n");
			continue;
		}
		/* ff.buf may have changed due to realloc in do_write() */
		fe = ff.buf;
		memset(fe, 0, sizeof(*fe));

		fe->feat_id = feat;
		fe->header.type = PERF_RECORD_HEADER_FEATURE;
		fe->header.size = ff.offset;

		ret = process(tool, ff.buf, NULL, NULL);
		if (ret) {
			free(ff.buf);
			return ret;
		}
	}
	free(ff.buf);
	return 0;
}

int perf_event__process_feature(struct perf_tool *tool,
				union perf_event *event,
				struct perf_session *session __maybe_unused)
{
	struct feat_fd ff = { .fd = 0 };
	struct feature_event *fe = (struct feature_event *)event;
	int type = fe->header.type;
	u64 feat = fe->feat_id;

	if (type < 0 || type >= PERF_RECORD_HEADER_MAX) {
		pr_warning("invalid record type %d in pipe-mode\n", type);
		return 0;
	}
	if (feat == HEADER_RESERVED || feat > HEADER_LAST_FEATURE) {
		pr_warning("invalid record type %d in pipe-mode\n", type);
		return -1;
	}

	if (!feat_ops[feat].process)
		return 0;

	ff.buf  = (void *)fe->data;
	ff.size = event->header.size - sizeof(event->header);
	ff.ph = &session->header;

	if (feat_ops[feat].process(&ff, NULL))
		return -1;

	if (!feat_ops[feat].print || !tool->show_feat_hdr)
		return 0;

	if (!feat_ops[feat].full_only ||
	    tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) {
		feat_ops[feat].print(&ff, stdout);
	} else {
		fprintf(stdout, "# %s info available, use -I to display\n",
			feat_ops[feat].name);
	}

	return 0;
}

3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130
static struct event_update_event *
event_update_event__new(size_t size, u64 type, u64 id)
{
	struct event_update_event *ev;

	size += sizeof(*ev);
	size  = PERF_ALIGN(size, sizeof(u64));

	ev = zalloc(size);
	if (ev) {
		ev->header.type = PERF_RECORD_EVENT_UPDATE;
		ev->header.size = (u16)size;
		ev->type = type;
		ev->id = id;
	}
	return ev;
}

int
perf_event__synthesize_event_update_unit(struct perf_tool *tool,
					 struct perf_evsel *evsel,
					 perf_event__handler_t process)
{
	struct event_update_event *ev;
	size_t size = strlen(evsel->unit);
	int err;

	ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
	if (ev == NULL)
		return -ENOMEM;

	strncpy(ev->data, evsel->unit, size);
	err = process(tool, (union perf_event *)ev, NULL, NULL);
	free(ev);
	return err;
}

3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150
int
perf_event__synthesize_event_update_scale(struct perf_tool *tool,
					  struct perf_evsel *evsel,
					  perf_event__handler_t process)
{
	struct event_update_event *ev;
	struct event_update_event_scale *ev_data;
	int err;

	ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
	if (ev == NULL)
		return -ENOMEM;

	ev_data = (struct event_update_event_scale *) ev->data;
	ev_data->scale = evsel->scale;
	err = process(tool, (union perf_event*) ev, NULL, NULL);
	free(ev);
	return err;
}

3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168
int
perf_event__synthesize_event_update_name(struct perf_tool *tool,
					 struct perf_evsel *evsel,
					 perf_event__handler_t process)
{
	struct event_update_event *ev;
	size_t len = strlen(evsel->name);
	int err;

	ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
	if (ev == NULL)
		return -ENOMEM;

	strncpy(ev->data, evsel->name, len);
	err = process(tool, (union perf_event*) ev, NULL, NULL);
	free(ev);
	return err;
}
3169

3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200
int
perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
					struct perf_evsel *evsel,
					perf_event__handler_t process)
{
	size_t size = sizeof(struct event_update_event);
	struct event_update_event *ev;
	int max, err;
	u16 type;

	if (!evsel->own_cpus)
		return 0;

	ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max);
	if (!ev)
		return -ENOMEM;

	ev->header.type = PERF_RECORD_EVENT_UPDATE;
	ev->header.size = (u16)size;
	ev->type = PERF_EVENT_UPDATE__CPUS;
	ev->id   = evsel->id[0];

	cpu_map_data__synthesize((struct cpu_map_data *) ev->data,
				 evsel->own_cpus,
				 type, max);

	err = process(tool, (union perf_event*) ev, NULL, NULL);
	free(ev);
	return err;
}

3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238
size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
{
	struct event_update_event *ev = &event->event_update;
	struct event_update_event_scale *ev_scale;
	struct event_update_event_cpus *ev_cpus;
	struct cpu_map *map;
	size_t ret;

	ret = fprintf(fp, "\n... id:    %" PRIu64 "\n", ev->id);

	switch (ev->type) {
	case PERF_EVENT_UPDATE__SCALE:
		ev_scale = (struct event_update_event_scale *) ev->data;
		ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
		break;
	case PERF_EVENT_UPDATE__UNIT:
		ret += fprintf(fp, "... unit:  %s\n", ev->data);
		break;
	case PERF_EVENT_UPDATE__NAME:
		ret += fprintf(fp, "... name:  %s\n", ev->data);
		break;
	case PERF_EVENT_UPDATE__CPUS:
		ev_cpus = (struct event_update_event_cpus *) ev->data;
		ret += fprintf(fp, "... ");

		map = cpu_map__new_data(&ev_cpus->cpus);
		if (map)
			ret += cpu_map__fprintf(map, fp);
		else
			ret += fprintf(fp, "failed to get cpus\n");
		break;
	default:
		ret += fprintf(fp, "... unknown type\n");
		break;
	}

	return ret;
}
3239

3240
int perf_event__synthesize_attrs(struct perf_tool *tool,
3241
				   struct perf_session *session,
3242
				   perf_event__handler_t process)
3243
{
3244
	struct perf_evsel *evsel;
3245
	int err = 0;
3246

3247
	evlist__for_each_entry(session->evlist, evsel) {
3248 3249
		err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
						  evsel->id, process);
3250 3251 3252 3253 3254 3255 3256 3257 3258
		if (err) {
			pr_debug("failed to create perf header attribute\n");
			return err;
		}
	}

	return err;
}

3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326
static bool has_unit(struct perf_evsel *counter)
{
	return counter->unit && *counter->unit;
}

static bool has_scale(struct perf_evsel *counter)
{
	return counter->scale != 1;
}

int perf_event__synthesize_extra_attr(struct perf_tool *tool,
				      struct perf_evlist *evsel_list,
				      perf_event__handler_t process,
				      bool is_pipe)
{
	struct perf_evsel *counter;
	int err;

	/*
	 * Synthesize other events stuff not carried within
	 * attr event - unit, scale, name
	 */
	evlist__for_each_entry(evsel_list, counter) {
		if (!counter->supported)
			continue;

		/*
		 * Synthesize unit and scale only if it's defined.
		 */
		if (has_unit(counter)) {
			err = perf_event__synthesize_event_update_unit(tool, counter, process);
			if (err < 0) {
				pr_err("Couldn't synthesize evsel unit.\n");
				return err;
			}
		}

		if (has_scale(counter)) {
			err = perf_event__synthesize_event_update_scale(tool, counter, process);
			if (err < 0) {
				pr_err("Couldn't synthesize evsel counter.\n");
				return err;
			}
		}

		if (counter->own_cpus) {
			err = perf_event__synthesize_event_update_cpus(tool, counter, process);
			if (err < 0) {
				pr_err("Couldn't synthesize evsel cpus.\n");
				return err;
			}
		}

		/*
		 * Name is needed only for pipe output,
		 * perf.data carries event names.
		 */
		if (is_pipe) {
			err = perf_event__synthesize_event_update_name(tool, counter, process);
			if (err < 0) {
				pr_err("Couldn't synthesize evsel name.\n");
				return err;
			}
		}
	}
	return 0;
}

3327 3328
int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
			     union perf_event *event,
3329
			     struct perf_evlist **pevlist)
3330
{
3331
	u32 i, ids, n_ids;
3332
	struct perf_evsel *evsel;
3333
	struct perf_evlist *evlist = *pevlist;
3334

3335
	if (evlist == NULL) {
3336
		*pevlist = evlist = perf_evlist__new();
3337
		if (evlist == NULL)
3338 3339 3340
			return -ENOMEM;
	}

3341
	evsel = perf_evsel__new(&event->attr.attr);
3342
	if (evsel == NULL)
3343 3344
		return -ENOMEM;

3345
	perf_evlist__add(evlist, evsel);
3346

3347 3348
	ids = event->header.size;
	ids -= (void *)&event->attr.id - (void *)event;
3349
	n_ids = ids / sizeof(u64);
3350 3351 3352 3353 3354 3355 3356
	/*
	 * We don't have the cpu and thread maps on the header, so
	 * for allocating the perf_sample_id table we fake 1 cpu and
	 * hattr->ids threads.
	 */
	if (perf_evsel__alloc_id(evsel, 1, n_ids))
		return -ENOMEM;
3357 3358

	for (i = 0; i < n_ids; i++) {
3359
		perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
3360 3361
	}

3362 3363
	symbol_conf.nr_events = evlist->nr_entries;

3364 3365
	return 0;
}
3366

3367 3368 3369 3370 3371
int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
				     union perf_event *event,
				     struct perf_evlist **pevlist)
{
	struct event_update_event *ev = &event->event_update;
3372
	struct event_update_event_scale *ev_scale;
3373
	struct event_update_event_cpus *ev_cpus;
3374 3375
	struct perf_evlist *evlist;
	struct perf_evsel *evsel;
3376
	struct cpu_map *map;
3377 3378 3379 3380 3381 3382 3383 3384 3385 3386

	if (!pevlist || *pevlist == NULL)
		return -EINVAL;

	evlist = *pevlist;

	evsel = perf_evlist__id2evsel(evlist, ev->id);
	if (evsel == NULL)
		return -EINVAL;

3387 3388 3389
	switch (ev->type) {
	case PERF_EVENT_UPDATE__UNIT:
		evsel->unit = strdup(ev->data);
3390
		break;
3391 3392 3393
	case PERF_EVENT_UPDATE__NAME:
		evsel->name = strdup(ev->data);
		break;
3394 3395 3396
	case PERF_EVENT_UPDATE__SCALE:
		ev_scale = (struct event_update_event_scale *) ev->data;
		evsel->scale = ev_scale->scale;
3397
		break;
3398 3399 3400 3401 3402 3403 3404 3405
	case PERF_EVENT_UPDATE__CPUS:
		ev_cpus = (struct event_update_event_cpus *) ev->data;

		map = cpu_map__new_data(&ev_cpus->cpus);
		if (map)
			evsel->own_cpus = map;
		else
			pr_err("failed to get event_update cpus\n");
3406 3407 3408 3409
	default:
		break;
	}

3410 3411 3412
	return 0;
}

3413
int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
3414
					struct perf_evlist *evlist,
3415
					perf_event__handler_t process)
3416
{
3417
	union perf_event ev;
J
Jiri Olsa 已提交
3418
	struct tracing_data *tdata;
3419
	ssize_t size = 0, aligned_size = 0, padding;
3420
	struct feat_fd ff;
3421
	int err __maybe_unused = 0;
3422

J
Jiri Olsa 已提交
3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437
	/*
	 * We are going to store the size of the data followed
	 * by the data contents. Since the fd descriptor is a pipe,
	 * we cannot seek back to store the size of the data once
	 * we know it. Instead we:
	 *
	 * - write the tracing data to the temp file
	 * - get/write the data size to pipe
	 * - write the tracing data from the temp file
	 *   to the pipe
	 */
	tdata = tracing_data_get(&evlist->entries, fd, true);
	if (!tdata)
		return -1;

3438 3439 3440
	memset(&ev, 0, sizeof(ev));

	ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
J
Jiri Olsa 已提交
3441
	size = tdata->size;
3442
	aligned_size = PERF_ALIGN(size, sizeof(u64));
3443 3444 3445 3446
	padding = aligned_size - size;
	ev.tracing_data.header.size = sizeof(ev.tracing_data);
	ev.tracing_data.size = aligned_size;

3447
	process(tool, &ev, NULL, NULL);
3448

J
Jiri Olsa 已提交
3449 3450 3451 3452 3453 3454
	/*
	 * The put function will copy all the tracing data
	 * stored in temp file to the pipe.
	 */
	tracing_data_put(tdata);

3455 3456
	ff = (struct feat_fd){ .fd = fd };
	if (write_padded(&ff, NULL, 0, padding))
3457
		return -1;
3458 3459 3460 3461

	return aligned_size;
}

3462 3463
int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
				     union perf_event *event,
3464
				     struct perf_session *session)
3465
{
3466
	ssize_t size_read, padding, size = event->tracing_data.size;
3467
	int fd = perf_data__fd(session->data);
3468
	off_t offset = lseek(fd, 0, SEEK_CUR);
3469 3470 3471
	char buf[BUFSIZ];

	/* setup for reading amidst mmap */
3472
	lseek(fd, offset + sizeof(struct tracing_data_event),
3473 3474
	      SEEK_SET);

J
Jiri Olsa 已提交
3475
	size_read = trace_report(fd, &session->tevent,
3476
				 session->repipe);
3477
	padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
3478

3479
	if (readn(fd, buf, padding) < 0) {
3480 3481 3482
		pr_err("%s: reading input file", __func__);
		return -1;
	}
T
Tom Zanussi 已提交
3483 3484
	if (session->repipe) {
		int retw = write(STDOUT_FILENO, buf, padding);
3485 3486 3487 3488
		if (retw <= 0 || retw != padding) {
			pr_err("%s: repiping tracing data padding", __func__);
			return -1;
		}
T
Tom Zanussi 已提交
3489
	}
3490

3491 3492 3493 3494
	if (size_read + padding != size) {
		pr_err("%s: tracing data size mismatch", __func__);
		return -1;
	}
3495

3496
	perf_evlist__prepare_tracepoint_events(session->evlist,
J
Jiri Olsa 已提交
3497
					       session->tevent.pevent);
3498

3499 3500
	return size_read + padding;
}
3501

3502
int perf_event__synthesize_build_id(struct perf_tool *tool,
3503
				    struct dso *pos, u16 misc,
3504
				    perf_event__handler_t process,
3505
				    struct machine *machine)
3506
{
3507
	union perf_event ev;
3508 3509 3510 3511 3512 3513 3514 3515 3516
	size_t len;
	int err = 0;

	if (!pos->hit)
		return err;

	memset(&ev, 0, sizeof(ev));

	len = pos->long_name_len + 1;
3517
	len = PERF_ALIGN(len, NAME_ALIGN);
3518 3519 3520
	memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
	ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
	ev.build_id.header.misc = misc;
3521
	ev.build_id.pid = machine->pid;
3522 3523 3524
	ev.build_id.header.size = sizeof(ev.build_id) + len;
	memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);

3525
	err = process(tool, &ev, NULL, machine);
3526 3527 3528 3529

	return err;
}

3530
int perf_event__process_build_id(struct perf_tool *tool __maybe_unused,
3531
				 union perf_event *event,
3532
				 struct perf_session *session)
3533
{
3534 3535
	__event_process_build_id(&event->build_id,
				 event->build_id.filename,
3536
				 session);
3537 3538
	return 0;
}