evlist.c 20.8 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
 *
 * Parts came from builtin-{top,stat,record}.c, see those files for further
 * copyright notes.
 *
 * Released under the GPL v2. (and only v2, not any later version)
 */
9
#include "util.h"
10
#include <lk/debugfs.h>
11
#include <poll.h>
12 13
#include "cpumap.h"
#include "thread_map.h"
14
#include "target.h"
15 16
#include "evlist.h"
#include "evsel.h"
A
Adrian Hunter 已提交
17
#include "debug.h"
18
#include <unistd.h>
19

20 21
#include "parse-events.h"

22 23
#include <sys/mman.h>

24 25 26
#include <linux/bitops.h>
#include <linux/hash.h>

27
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
28
#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
29

30 31
void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
		       struct thread_map *threads)
32 33 34 35 36 37
{
	int i;

	for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
		INIT_HLIST_HEAD(&evlist->heads[i]);
	INIT_LIST_HEAD(&evlist->entries);
38
	perf_evlist__set_maps(evlist, cpus, threads);
39
	evlist->workload.pid = -1;
40 41
}

42
struct perf_evlist *perf_evlist__new(void)
43 44 45
{
	struct perf_evlist *evlist = zalloc(sizeof(*evlist));

46
	if (evlist != NULL)
47
		perf_evlist__init(evlist, NULL, NULL);
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63

	return evlist;
}

static void perf_evlist__purge(struct perf_evlist *evlist)
{
	struct perf_evsel *pos, *n;

	list_for_each_entry_safe(pos, n, &evlist->entries, node) {
		list_del_init(&pos->node);
		perf_evsel__delete(pos);
	}

	evlist->nr_entries = 0;
}

64
void perf_evlist__exit(struct perf_evlist *evlist)
65
{
66
	free(evlist->mmap);
67
	free(evlist->pollfd);
68 69 70 71 72 73 74 75
	evlist->mmap = NULL;
	evlist->pollfd = NULL;
}

void perf_evlist__delete(struct perf_evlist *evlist)
{
	perf_evlist__purge(evlist);
	perf_evlist__exit(evlist);
76 77 78 79 80 81 82 83 84
	free(evlist);
}

void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
{
	list_add_tail(&entry->node, &evlist->entries);
	++evlist->nr_entries;
}

85 86 87
void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
				   struct list_head *list,
				   int nr_entries)
88 89 90 91 92
{
	list_splice_tail(list, &evlist->entries);
	evlist->nr_entries += nr_entries;
}

93 94 95 96 97
void __perf_evlist__set_leader(struct list_head *list)
{
	struct perf_evsel *evsel, *leader;

	leader = list_entry(list->next, struct perf_evsel, node);
98 99 100
	evsel = list_entry(list->prev, struct perf_evsel, node);

	leader->nr_members = evsel->idx - leader->idx + 1;
101 102

	list_for_each_entry(evsel, list, node) {
103
		evsel->leader = leader;
104 105 106 107
	}
}

void perf_evlist__set_leader(struct perf_evlist *evlist)
108
{
109 110
	if (evlist->nr_entries) {
		evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
111
		__perf_evlist__set_leader(&evlist->entries);
112
	}
113 114
}

115 116 117 118 119 120
int perf_evlist__add_default(struct perf_evlist *evlist)
{
	struct perf_event_attr attr = {
		.type = PERF_TYPE_HARDWARE,
		.config = PERF_COUNT_HW_CPU_CYCLES,
	};
121 122 123
	struct perf_evsel *evsel;

	event_attr_init(&attr);
124

125
	evsel = perf_evsel__new(&attr, 0);
126
	if (evsel == NULL)
127 128 129 130 131 132
		goto error;

	/* use strdup() because free(evsel) assumes name is allocated */
	evsel->name = strdup("cycles");
	if (!evsel->name)
		goto error_free;
133 134 135

	perf_evlist__add(evlist, evsel);
	return 0;
136 137 138 139
error_free:
	perf_evsel__delete(evsel);
error:
	return -ENOMEM;
140
}
141

142 143
static int perf_evlist__add_attrs(struct perf_evlist *evlist,
				  struct perf_event_attr *attrs, size_t nr_attrs)
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
{
	struct perf_evsel *evsel, *n;
	LIST_HEAD(head);
	size_t i;

	for (i = 0; i < nr_attrs; i++) {
		evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
		if (evsel == NULL)
			goto out_delete_partial_list;
		list_add_tail(&evsel->node, &head);
	}

	perf_evlist__splice_list_tail(evlist, &head, nr_attrs);

	return 0;

out_delete_partial_list:
	list_for_each_entry_safe(evsel, n, &head, node)
		perf_evsel__delete(evsel);
	return -1;
}

166 167 168 169 170 171 172 173 174 175 176
int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
				     struct perf_event_attr *attrs, size_t nr_attrs)
{
	size_t i;

	for (i = 0; i < nr_attrs; i++)
		event_attr_init(attrs + i);

	return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
}

177 178
struct perf_evsel *
perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
179 180 181 182 183 184 185 186 187 188 189 190
{
	struct perf_evsel *evsel;

	list_for_each_entry(evsel, &evlist->entries, node) {
		if (evsel->attr.type   == PERF_TYPE_TRACEPOINT &&
		    (int)evsel->attr.config == id)
			return evsel;
	}

	return NULL;
}

191 192 193 194 195 196 197 198 199 200 201 202 203 204
int perf_evlist__add_newtp(struct perf_evlist *evlist,
			   const char *sys, const char *name, void *handler)
{
	struct perf_evsel *evsel;

	evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
	if (evsel == NULL)
		return -1;

	evsel->handler.func = handler;
	perf_evlist__add(evlist, evsel);
	return 0;
}

205 206 207 208
void perf_evlist__disable(struct perf_evlist *evlist)
{
	int cpu, thread;
	struct perf_evsel *pos;
209 210
	int nr_cpus = cpu_map__nr(evlist->cpus);
	int nr_threads = thread_map__nr(evlist->threads);
211

212
	for (cpu = 0; cpu < nr_cpus; cpu++) {
213
		list_for_each_entry(pos, &evlist->entries, node) {
214
			if (!perf_evsel__is_group_leader(pos))
215
				continue;
216
			for (thread = 0; thread < nr_threads; thread++)
217 218
				ioctl(FD(pos, cpu, thread),
				      PERF_EVENT_IOC_DISABLE, 0);
219 220 221 222
		}
	}
}

223 224 225 226
void perf_evlist__enable(struct perf_evlist *evlist)
{
	int cpu, thread;
	struct perf_evsel *pos;
227 228
	int nr_cpus = cpu_map__nr(evlist->cpus);
	int nr_threads = thread_map__nr(evlist->threads);
229

230
	for (cpu = 0; cpu < nr_cpus; cpu++) {
231
		list_for_each_entry(pos, &evlist->entries, node) {
232
			if (!perf_evsel__is_group_leader(pos))
233
				continue;
234
			for (thread = 0; thread < nr_threads; thread++)
235 236
				ioctl(FD(pos, cpu, thread),
				      PERF_EVENT_IOC_ENABLE, 0);
237 238 239 240
		}
	}
}

241
static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
242
{
243 244 245
	int nr_cpus = cpu_map__nr(evlist->cpus);
	int nr_threads = thread_map__nr(evlist->threads);
	int nfds = nr_cpus * nr_threads * evlist->nr_entries;
246 247 248
	evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
	return evlist->pollfd != NULL ? 0 : -ENOMEM;
}
249 250 251 252 253 254 255 256

void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
{
	fcntl(fd, F_SETFL, O_NONBLOCK);
	evlist->pollfd[evlist->nr_fds].fd = fd;
	evlist->pollfd[evlist->nr_fds].events = POLLIN;
	evlist->nr_fds++;
}
257

258 259 260
static void perf_evlist__id_hash(struct perf_evlist *evlist,
				 struct perf_evsel *evsel,
				 int cpu, int thread, u64 id)
261 262 263 264 265 266 267 268 269 270
{
	int hash;
	struct perf_sample_id *sid = SID(evsel, cpu, thread);

	sid->id = id;
	sid->evsel = evsel;
	hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
	hlist_add_head(&sid->node, &evlist->heads[hash]);
}

271 272 273 274 275 276 277 278 279 280
void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
			 int cpu, int thread, u64 id)
{
	perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
	evsel->id[evsel->ids++] = id;
}

static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
				  struct perf_evsel *evsel,
				  int cpu, int thread, int fd)
281 282
{
	u64 read_data[4] = { 0, };
283
	int id_idx = 1; /* The first entry is the counter value */
284 285 286 287 288 289 290 291 292 293 294
	u64 id;
	int ret;

	ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
	if (!ret)
		goto add;

	if (errno != ENOTTY)
		return -1;

	/* Legacy way to get event id.. All hail to old kernels! */
295

296 297 298 299 300 301 302
	/*
	 * This way does not work with group format read, so bail
	 * out in that case.
	 */
	if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
		return -1;

303 304 305 306 307 308 309 310 311
	if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
	    read(fd, &read_data, sizeof(read_data)) == -1)
		return -1;

	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		++id_idx;
	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		++id_idx;

312 313 314 315
	id = read_data[id_idx];

 add:
	perf_evlist__id_add(evlist, evsel, cpu, thread, id);
316 317 318
	return 0;
}

319
struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
320 321 322 323 324 325 326 327
{
	struct hlist_head *head;
	struct perf_sample_id *sid;
	int hash;

	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
	head = &evlist->heads[hash];

328
	hlist_for_each_entry(sid, head, node)
329
		if (sid->id == id)
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
			return sid;

	return NULL;
}

struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
{
	struct perf_sample_id *sid;

	if (evlist->nr_entries == 1)
		return perf_evlist__first(evlist);

	sid = perf_evlist__id2sid(evlist, id);
	if (sid)
		return sid->evsel;
345 346

	if (!perf_evlist__sample_id_all(evlist))
347
		return perf_evlist__first(evlist);
348

349 350
	return NULL;
}
351

352
union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
353
{
354
	struct perf_mmap *md = &evlist->mmap[idx];
355 356 357
	unsigned int head = perf_mmap__read_head(md);
	unsigned int old = md->prev;
	unsigned char *data = md->base + page_size;
358
	union perf_event *event = NULL;
359

360
	if (evlist->overwrite) {
361
		/*
362 363 364 365 366 367
		 * If we're further behind than half the buffer, there's a chance
		 * the writer will bite our tail and mess up the samples under us.
		 *
		 * If we somehow ended up ahead of the head, we got messed up.
		 *
		 * In either case, truncate and restart at head.
368
		 */
369 370 371 372 373 374 375 376 377
		int diff = head - old;
		if (diff > md->mask / 2 || diff < 0) {
			fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");

			/*
			 * head points to a known good entry, start there.
			 */
			old = head;
		}
378 379 380 381 382
	}

	if (old != head) {
		size_t size;

383
		event = (union perf_event *)&data[old & md->mask];
384 385 386 387 388 389 390 391 392
		size = event->header.size;

		/*
		 * Event straddles the mmap boundary -- header should always
		 * be inside due to u64 alignment of output.
		 */
		if ((old & md->mask) + size != ((old + size) & md->mask)) {
			unsigned int offset = old;
			unsigned int len = min(sizeof(*event), size), cpy;
393
			void *dst = &md->event_copy;
394 395 396 397 398 399 400 401 402

			do {
				cpy = min(md->mask + 1 - (offset & md->mask), len);
				memcpy(dst, &data[offset & md->mask], cpy);
				offset += cpy;
				dst += cpy;
				len -= cpy;
			} while (len);

403
			event = &md->event_copy;
404 405 406 407 408 409
		}

		old += size;
	}

	md->prev = old;
410 411 412 413

	if (!evlist->overwrite)
		perf_mmap__write_tail(md, old);

414 415
	return event;
}
416

417 418 419 420 421 422 423 424
static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
{
	if (evlist->mmap[idx].base != NULL) {
		munmap(evlist->mmap[idx].base, evlist->mmap_len);
		evlist->mmap[idx].base = NULL;
	}
}

425
void perf_evlist__munmap(struct perf_evlist *evlist)
426
{
427
	int i;
428

429 430
	for (i = 0; i < evlist->nr_mmaps; i++)
		__perf_evlist__munmap(evlist, i);
431 432 433

	free(evlist->mmap);
	evlist->mmap = NULL;
434 435
}

436
static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
437
{
438
	evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
439
	if (cpu_map__empty(evlist->cpus))
440
		evlist->nr_mmaps = thread_map__nr(evlist->threads);
441
	evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
442 443 444
	return evlist->mmap != NULL ? 0 : -ENOMEM;
}

445
static int __perf_evlist__mmap(struct perf_evlist *evlist,
446
			       int idx, int prot, int mask, int fd)
447
{
448 449 450
	evlist->mmap[idx].prev = 0;
	evlist->mmap[idx].mask = mask;
	evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
451
				      MAP_SHARED, fd, 0);
452 453
	if (evlist->mmap[idx].base == MAP_FAILED) {
		evlist->mmap[idx].base = NULL;
454
		return -1;
455
	}
456 457 458 459 460

	perf_evlist__add_pollfd(evlist, fd);
	return 0;
}

461 462 463 464
static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
{
	struct perf_evsel *evsel;
	int cpu, thread;
465 466
	int nr_cpus = cpu_map__nr(evlist->cpus);
	int nr_threads = thread_map__nr(evlist->threads);
467

A
Adrian Hunter 已提交
468
	pr_debug2("perf event ring buffer mmapped per cpu\n");
469
	for (cpu = 0; cpu < nr_cpus; cpu++) {
470 471
		int output = -1;

472
		for (thread = 0; thread < nr_threads; thread++) {
473 474 475 476 477
			list_for_each_entry(evsel, &evlist->entries, node) {
				int fd = FD(evsel, cpu, thread);

				if (output == -1) {
					output = fd;
478
					if (__perf_evlist__mmap(evlist, cpu,
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495
								prot, mask, output) < 0)
						goto out_unmap;
				} else {
					if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
						goto out_unmap;
				}

				if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
				    perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
					goto out_unmap;
			}
		}
	}

	return 0;

out_unmap:
496 497
	for (cpu = 0; cpu < nr_cpus; cpu++)
		__perf_evlist__munmap(evlist, cpu);
498 499 500 501 502 503 504
	return -1;
}

static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
{
	struct perf_evsel *evsel;
	int thread;
505
	int nr_threads = thread_map__nr(evlist->threads);
506

A
Adrian Hunter 已提交
507
	pr_debug2("perf event ring buffer mmapped per thread\n");
508
	for (thread = 0; thread < nr_threads; thread++) {
509 510 511 512 513 514 515
		int output = -1;

		list_for_each_entry(evsel, &evlist->entries, node) {
			int fd = FD(evsel, 0, thread);

			if (output == -1) {
				output = fd;
516
				if (__perf_evlist__mmap(evlist, thread,
517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
							prot, mask, output) < 0)
					goto out_unmap;
			} else {
				if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
					goto out_unmap;
			}

			if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
			    perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
				goto out_unmap;
		}
	}

	return 0;

out_unmap:
533 534
	for (thread = 0; thread < nr_threads; thread++)
		__perf_evlist__munmap(evlist, thread);
535 536 537
	return -1;
}

538 539 540 541 542 543 544 545 546 547 548 549
/** perf_evlist__mmap - Create per cpu maps to receive events
 *
 * @evlist - list of events
 * @pages - map length in pages
 * @overwrite - overwrite older events?
 *
 * If overwrite is false the user needs to signal event consuption using:
 *
 *	struct perf_mmap *m = &evlist->mmap[cpu];
 *	unsigned int head = perf_mmap__read_head(m);
 *
 *	perf_mmap__write_tail(m, head)
550 551
 *
 * Using perf_evlist__read_on_cpu does this automatically.
552
 */
553 554
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
		      bool overwrite)
555
{
556
	struct perf_evsel *evsel;
557 558
	const struct cpu_map *cpus = evlist->cpus;
	const struct thread_map *threads = evlist->threads;
559 560 561 562 563
	int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;

        /* 512 kiB: default amount of unprivileged mlocked memory */
        if (pages == UINT_MAX)
                pages = (512 * 1024) / page_size;
564 565
	else if (!is_power_of_2(pages))
		return -EINVAL;
566 567

	mask = pages * page_size - 1;
568

569
	if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
570 571
		return -ENOMEM;

572
	if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
573 574 575 576 577 578 579
		return -ENOMEM;

	evlist->overwrite = overwrite;
	evlist->mmap_len = (pages + 1) * page_size;

	list_for_each_entry(evsel, &evlist->entries, node) {
		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
580
		    evsel->sample_id == NULL &&
581
		    perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
582 583 584
			return -ENOMEM;
	}

585
	if (cpu_map__empty(cpus))
586
		return perf_evlist__mmap_per_thread(evlist, prot, mask);
587

588
	return perf_evlist__mmap_per_cpu(evlist, prot, mask);
589
}
590

591 592
int perf_evlist__create_maps(struct perf_evlist *evlist,
			     struct perf_target *target)
593
{
594 595
	evlist->threads = thread_map__new_str(target->pid, target->tid,
					      target->uid);
596 597 598 599

	if (evlist->threads == NULL)
		return -1;

600
	if (perf_target__has_task(target))
601
		evlist->cpus = cpu_map__dummy_new();
N
Namhyung Kim 已提交
602 603
	else if (!perf_target__has_cpu(target) && !target->uses_mmap)
		evlist->cpus = cpu_map__dummy_new();
604 605
	else
		evlist->cpus = cpu_map__new(target->cpu_list);
606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623

	if (evlist->cpus == NULL)
		goto out_delete_threads;

	return 0;

out_delete_threads:
	thread_map__delete(evlist->threads);
	return -1;
}

void perf_evlist__delete_maps(struct perf_evlist *evlist)
{
	cpu_map__delete(evlist->cpus);
	thread_map__delete(evlist->threads);
	evlist->cpus	= NULL;
	evlist->threads = NULL;
}
624

625
int perf_evlist__apply_filters(struct perf_evlist *evlist)
626 627
{
	struct perf_evsel *evsel;
628 629
	int err = 0;
	const int ncpus = cpu_map__nr(evlist->cpus),
630
		  nthreads = thread_map__nr(evlist->threads);
631 632

	list_for_each_entry(evsel, &evlist->entries, node) {
633
		if (evsel->filter == NULL)
634
			continue;
635 636 637 638

		err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
		if (err)
			break;
639 640
	}

641 642 643 644 645 646 647 648
	return err;
}

int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
{
	struct perf_evsel *evsel;
	int err = 0;
	const int ncpus = cpu_map__nr(evlist->cpus),
649
		  nthreads = thread_map__nr(evlist->threads);
650 651 652 653 654 655 656 657

	list_for_each_entry(evsel, &evlist->entries, node) {
		err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
		if (err)
			break;
	}

	return err;
658
}
659

660
bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
661
{
662
	struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
663 664 665 666

	list_for_each_entry_continue(pos, &evlist->entries, node) {
		if (first->attr.sample_type != pos->attr.sample_type)
			return false;
667 668
	}

669
	return true;
670 671
}

672
u64 perf_evlist__sample_type(struct perf_evlist *evlist)
673
{
674
	struct perf_evsel *first = perf_evlist__first(evlist);
675 676 677
	return first->attr.sample_type;
}

678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703
bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
{
	struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
	u64 read_format = first->attr.read_format;
	u64 sample_type = first->attr.sample_type;

	list_for_each_entry_continue(pos, &evlist->entries, node) {
		if (read_format != pos->attr.read_format)
			return false;
	}

	/* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
	if ((sample_type & PERF_SAMPLE_READ) &&
	    !(read_format & PERF_FORMAT_ID)) {
		return false;
	}

	return true;
}

u64 perf_evlist__read_format(struct perf_evlist *evlist)
{
	struct perf_evsel *first = perf_evlist__first(evlist);
	return first->attr.read_format;
}

704
u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
705
{
706
	struct perf_evsel *first = perf_evlist__first(evlist);
707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
	struct perf_sample *data;
	u64 sample_type;
	u16 size = 0;

	if (!first->attr.sample_id_all)
		goto out;

	sample_type = first->attr.sample_type;

	if (sample_type & PERF_SAMPLE_TID)
		size += sizeof(data->tid) * 2;

       if (sample_type & PERF_SAMPLE_TIME)
		size += sizeof(data->time);

	if (sample_type & PERF_SAMPLE_ID)
		size += sizeof(data->id);

	if (sample_type & PERF_SAMPLE_STREAM_ID)
		size += sizeof(data->stream_id);

	if (sample_type & PERF_SAMPLE_CPU)
		size += sizeof(data->cpu) * 2;
out:
	return size;
}

734
bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
735
{
736
	struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
737 738 739 740

	list_for_each_entry_continue(pos, &evlist->entries, node) {
		if (first->attr.sample_id_all != pos->attr.sample_id_all)
			return false;
741 742
	}

743 744 745
	return true;
}

746
bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
747
{
748
	struct perf_evsel *first = perf_evlist__first(evlist);
749
	return first->attr.sample_id_all;
750
}
751 752 753 754 755 756

void perf_evlist__set_selected(struct perf_evlist *evlist,
			       struct perf_evsel *evsel)
{
	evlist->selected = evsel;
}
757

758 759 760 761 762 763 764 765 766 767
void perf_evlist__close(struct perf_evlist *evlist)
{
	struct perf_evsel *evsel;
	int ncpus = cpu_map__nr(evlist->cpus);
	int nthreads = thread_map__nr(evlist->threads);

	list_for_each_entry_reverse(evsel, &evlist->entries, node)
		perf_evsel__close(evsel, ncpus, nthreads);
}

768
int perf_evlist__open(struct perf_evlist *evlist)
769
{
770
	struct perf_evsel *evsel;
771
	int err;
772 773

	list_for_each_entry(evsel, &evlist->entries, node) {
774
		err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
775 776 777 778 779 780
		if (err < 0)
			goto out_err;
	}

	return 0;
out_err:
781
	perf_evlist__close(evlist);
782
	errno = -err;
783 784
	return err;
}
785 786

int perf_evlist__prepare_workload(struct perf_evlist *evlist,
787
				  struct perf_target *target,
788 789
				  const char *argv[], bool pipe_output,
				  bool want_signal)
790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810
{
	int child_ready_pipe[2], go_pipe[2];
	char bf;

	if (pipe(child_ready_pipe) < 0) {
		perror("failed to create 'ready' pipe");
		return -1;
	}

	if (pipe(go_pipe) < 0) {
		perror("failed to create 'go' pipe");
		goto out_close_ready_pipe;
	}

	evlist->workload.pid = fork();
	if (evlist->workload.pid < 0) {
		perror("failed to fork");
		goto out_close_pipes;
	}

	if (!evlist->workload.pid) {
811
		if (pipe_output)
812 813
			dup2(2, 1);

814 815
		signal(SIGTERM, SIG_DFL);

816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833
		close(child_ready_pipe[0]);
		close(go_pipe[1]);
		fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);

		/*
		 * Tell the parent we're ready to go
		 */
		close(child_ready_pipe[1]);

		/*
		 * Wait until the parent tells us to go.
		 */
		if (read(go_pipe[0], &bf, 1) == -1)
			perror("unable to read pipe");

		execvp(argv[0], (char **)argv);

		perror(argv[0]);
834 835
		if (want_signal)
			kill(getppid(), SIGUSR1);
836 837 838
		exit(-1);
	}

839
	if (perf_target__none(target))
840 841 842 843 844 845 846 847 848 849 850 851
		evlist->threads->map[0] = evlist->workload.pid;

	close(child_ready_pipe[1]);
	close(go_pipe[0]);
	/*
	 * wait for child to settle
	 */
	if (read(child_ready_pipe[0], &bf, 1) == -1) {
		perror("unable to read pipe");
		goto out_close_pipes;
	}

852
	fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868
	evlist->workload.cork_fd = go_pipe[1];
	close(child_ready_pipe[0]);
	return 0;

out_close_pipes:
	close(go_pipe[0]);
	close(go_pipe[1]);
out_close_ready_pipe:
	close(child_ready_pipe[0]);
	close(child_ready_pipe[1]);
	return -1;
}

int perf_evlist__start_workload(struct perf_evlist *evlist)
{
	if (evlist->workload.cork_fd > 0) {
869
		char bf = 0;
870
		int ret;
871 872 873
		/*
		 * Remove the cork, let it rip!
		 */
874 875 876 877 878 879
		ret = write(evlist->workload.cork_fd, &bf, 1);
		if (ret < 0)
			perror("enable to write to pipe");

		close(evlist->workload.cork_fd);
		return ret;
880 881 882 883
	}

	return 0;
}
884

885
int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
886
			      struct perf_sample *sample)
887
{
888
	struct perf_evsel *evsel = perf_evlist__first(evlist);
889
	return perf_evsel__parse_sample(evsel, event, sample);
890
}
891 892 893 894 895 896 897 898 899 900 901 902 903

size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
{
	struct perf_evsel *evsel;
	size_t printed = 0;

	list_for_each_entry(evsel, &evlist->entries, node) {
		printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
				   perf_evsel__name(evsel));
	}

	return printed + fprintf(fp, "\n");;
}