evlist.c 23.2 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
 *
 * Parts came from builtin-{top,stat,record}.c, see those files for further
 * copyright notes.
 *
 * Released under the GPL v2. (and only v2, not any later version)
 */
9
#include "util.h"
10
#include <lk/debugfs.h>
11
#include <poll.h>
12 13
#include "cpumap.h"
#include "thread_map.h"
14
#include "target.h"
15 16
#include "evlist.h"
#include "evsel.h"
A
Adrian Hunter 已提交
17
#include "debug.h"
18
#include <unistd.h>
19

20 21
#include "parse-events.h"

22 23
#include <sys/mman.h>

24 25 26
#include <linux/bitops.h>
#include <linux/hash.h>

27
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
28
#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
29

30 31
void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
		       struct thread_map *threads)
32 33 34 35 36 37
{
	int i;

	for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
		INIT_HLIST_HEAD(&evlist->heads[i]);
	INIT_LIST_HEAD(&evlist->entries);
38
	perf_evlist__set_maps(evlist, cpus, threads);
39
	evlist->workload.pid = -1;
40 41
}

42
struct perf_evlist *perf_evlist__new(void)
43 44 45
{
	struct perf_evlist *evlist = zalloc(sizeof(*evlist));

46
	if (evlist != NULL)
47
		perf_evlist__init(evlist, NULL, NULL);
48 49 50 51

	return evlist;
}

52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
/**
 * perf_evlist__set_id_pos - set the positions of event ids.
 * @evlist: selected event list
 *
 * Events with compatible sample types all have the same id_pos
 * and is_pos.  For convenience, put a copy on evlist.
 */
void perf_evlist__set_id_pos(struct perf_evlist *evlist)
{
	struct perf_evsel *first = perf_evlist__first(evlist);

	evlist->id_pos = first->id_pos;
	evlist->is_pos = first->is_pos;
}

67 68 69 70 71 72 73 74 75 76 77 78
static void perf_evlist__purge(struct perf_evlist *evlist)
{
	struct perf_evsel *pos, *n;

	list_for_each_entry_safe(pos, n, &evlist->entries, node) {
		list_del_init(&pos->node);
		perf_evsel__delete(pos);
	}

	evlist->nr_entries = 0;
}

79
void perf_evlist__exit(struct perf_evlist *evlist)
80
{
81
	free(evlist->mmap);
82
	free(evlist->pollfd);
83 84 85 86 87 88 89 90
	evlist->mmap = NULL;
	evlist->pollfd = NULL;
}

void perf_evlist__delete(struct perf_evlist *evlist)
{
	perf_evlist__purge(evlist);
	perf_evlist__exit(evlist);
91 92 93 94 95 96
	free(evlist);
}

void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
{
	list_add_tail(&entry->node, &evlist->entries);
97 98
	if (!evlist->nr_entries++)
		perf_evlist__set_id_pos(evlist);
99 100
}

101 102 103
void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
				   struct list_head *list,
				   int nr_entries)
104
{
105 106
	bool set_id_pos = !evlist->nr_entries;

107 108
	list_splice_tail(list, &evlist->entries);
	evlist->nr_entries += nr_entries;
109 110
	if (set_id_pos)
		perf_evlist__set_id_pos(evlist);
111 112
}

113 114 115 116 117
void __perf_evlist__set_leader(struct list_head *list)
{
	struct perf_evsel *evsel, *leader;

	leader = list_entry(list->next, struct perf_evsel, node);
118 119 120
	evsel = list_entry(list->prev, struct perf_evsel, node);

	leader->nr_members = evsel->idx - leader->idx + 1;
121 122

	list_for_each_entry(evsel, list, node) {
123
		evsel->leader = leader;
124 125 126 127
	}
}

void perf_evlist__set_leader(struct perf_evlist *evlist)
128
{
129 130
	if (evlist->nr_entries) {
		evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
131
		__perf_evlist__set_leader(&evlist->entries);
132
	}
133 134
}

135 136 137 138 139 140
int perf_evlist__add_default(struct perf_evlist *evlist)
{
	struct perf_event_attr attr = {
		.type = PERF_TYPE_HARDWARE,
		.config = PERF_COUNT_HW_CPU_CYCLES,
	};
141 142 143
	struct perf_evsel *evsel;

	event_attr_init(&attr);
144

145
	evsel = perf_evsel__new(&attr, 0);
146
	if (evsel == NULL)
147 148 149 150 151 152
		goto error;

	/* use strdup() because free(evsel) assumes name is allocated */
	evsel->name = strdup("cycles");
	if (!evsel->name)
		goto error_free;
153 154 155

	perf_evlist__add(evlist, evsel);
	return 0;
156 157 158 159
error_free:
	perf_evsel__delete(evsel);
error:
	return -ENOMEM;
160
}
161

162 163
static int perf_evlist__add_attrs(struct perf_evlist *evlist,
				  struct perf_event_attr *attrs, size_t nr_attrs)
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
{
	struct perf_evsel *evsel, *n;
	LIST_HEAD(head);
	size_t i;

	for (i = 0; i < nr_attrs; i++) {
		evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
		if (evsel == NULL)
			goto out_delete_partial_list;
		list_add_tail(&evsel->node, &head);
	}

	perf_evlist__splice_list_tail(evlist, &head, nr_attrs);

	return 0;

out_delete_partial_list:
	list_for_each_entry_safe(evsel, n, &head, node)
		perf_evsel__delete(evsel);
	return -1;
}

186 187 188 189 190 191 192 193 194 195 196
int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
				     struct perf_event_attr *attrs, size_t nr_attrs)
{
	size_t i;

	for (i = 0; i < nr_attrs; i++)
		event_attr_init(attrs + i);

	return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
}

197 198
struct perf_evsel *
perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
199 200 201 202 203 204 205 206 207 208 209 210
{
	struct perf_evsel *evsel;

	list_for_each_entry(evsel, &evlist->entries, node) {
		if (evsel->attr.type   == PERF_TYPE_TRACEPOINT &&
		    (int)evsel->attr.config == id)
			return evsel;
	}

	return NULL;
}

211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
struct perf_evsel *
perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
				     const char *name)
{
	struct perf_evsel *evsel;

	list_for_each_entry(evsel, &evlist->entries, node) {
		if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
		    (strcmp(evsel->name, name) == 0))
			return evsel;
	}

	return NULL;
}

226 227 228 229 230 231 232 233 234 235 236 237 238 239
int perf_evlist__add_newtp(struct perf_evlist *evlist,
			   const char *sys, const char *name, void *handler)
{
	struct perf_evsel *evsel;

	evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
	if (evsel == NULL)
		return -1;

	evsel->handler.func = handler;
	perf_evlist__add(evlist, evsel);
	return 0;
}

240 241 242 243
void perf_evlist__disable(struct perf_evlist *evlist)
{
	int cpu, thread;
	struct perf_evsel *pos;
244 245
	int nr_cpus = cpu_map__nr(evlist->cpus);
	int nr_threads = thread_map__nr(evlist->threads);
246

247
	for (cpu = 0; cpu < nr_cpus; cpu++) {
248
		list_for_each_entry(pos, &evlist->entries, node) {
249
			if (!perf_evsel__is_group_leader(pos))
250
				continue;
251
			for (thread = 0; thread < nr_threads; thread++)
252 253
				ioctl(FD(pos, cpu, thread),
				      PERF_EVENT_IOC_DISABLE, 0);
254 255 256 257
		}
	}
}

258 259 260 261
void perf_evlist__enable(struct perf_evlist *evlist)
{
	int cpu, thread;
	struct perf_evsel *pos;
262 263
	int nr_cpus = cpu_map__nr(evlist->cpus);
	int nr_threads = thread_map__nr(evlist->threads);
264

265
	for (cpu = 0; cpu < nr_cpus; cpu++) {
266
		list_for_each_entry(pos, &evlist->entries, node) {
267
			if (!perf_evsel__is_group_leader(pos))
268
				continue;
269
			for (thread = 0; thread < nr_threads; thread++)
270 271
				ioctl(FD(pos, cpu, thread),
				      PERF_EVENT_IOC_ENABLE, 0);
272 273 274 275
		}
	}
}

276
static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
277
{
278 279 280
	int nr_cpus = cpu_map__nr(evlist->cpus);
	int nr_threads = thread_map__nr(evlist->threads);
	int nfds = nr_cpus * nr_threads * evlist->nr_entries;
281 282 283
	evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
	return evlist->pollfd != NULL ? 0 : -ENOMEM;
}
284 285 286 287 288 289 290 291

void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
{
	fcntl(fd, F_SETFL, O_NONBLOCK);
	evlist->pollfd[evlist->nr_fds].fd = fd;
	evlist->pollfd[evlist->nr_fds].events = POLLIN;
	evlist->nr_fds++;
}
292

293 294 295
static void perf_evlist__id_hash(struct perf_evlist *evlist,
				 struct perf_evsel *evsel,
				 int cpu, int thread, u64 id)
296 297 298 299 300 301 302 303 304 305
{
	int hash;
	struct perf_sample_id *sid = SID(evsel, cpu, thread);

	sid->id = id;
	sid->evsel = evsel;
	hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
	hlist_add_head(&sid->node, &evlist->heads[hash]);
}

306 307 308 309 310 311 312 313 314 315
void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
			 int cpu, int thread, u64 id)
{
	perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
	evsel->id[evsel->ids++] = id;
}

static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
				  struct perf_evsel *evsel,
				  int cpu, int thread, int fd)
316 317
{
	u64 read_data[4] = { 0, };
318
	int id_idx = 1; /* The first entry is the counter value */
319 320 321 322 323 324 325 326 327 328 329
	u64 id;
	int ret;

	ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
	if (!ret)
		goto add;

	if (errno != ENOTTY)
		return -1;

	/* Legacy way to get event id.. All hail to old kernels! */
330

331 332 333 334 335 336 337
	/*
	 * This way does not work with group format read, so bail
	 * out in that case.
	 */
	if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
		return -1;

338 339 340 341 342 343 344 345 346
	if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
	    read(fd, &read_data, sizeof(read_data)) == -1)
		return -1;

	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		++id_idx;
	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		++id_idx;

347 348 349 350
	id = read_data[id_idx];

 add:
	perf_evlist__id_add(evlist, evsel, cpu, thread, id);
351 352 353
	return 0;
}

354
struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
355 356 357 358 359 360 361 362
{
	struct hlist_head *head;
	struct perf_sample_id *sid;
	int hash;

	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
	head = &evlist->heads[hash];

363
	hlist_for_each_entry(sid, head, node)
364
		if (sid->id == id)
365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
			return sid;

	return NULL;
}

struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
{
	struct perf_sample_id *sid;

	if (evlist->nr_entries == 1)
		return perf_evlist__first(evlist);

	sid = perf_evlist__id2sid(evlist, id);
	if (sid)
		return sid->evsel;
380 381

	if (!perf_evlist__sample_id_all(evlist))
382
		return perf_evlist__first(evlist);
383

384 385
	return NULL;
}
386

387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
static int perf_evlist__event2id(struct perf_evlist *evlist,
				 union perf_event *event, u64 *id)
{
	const u64 *array = event->sample.array;
	ssize_t n;

	n = (event->header.size - sizeof(event->header)) >> 3;

	if (event->header.type == PERF_RECORD_SAMPLE) {
		if (evlist->id_pos >= n)
			return -1;
		*id = array[evlist->id_pos];
	} else {
		if (evlist->is_pos > n)
			return -1;
		n -= evlist->is_pos;
		*id = array[n];
	}
	return 0;
}

static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
						   union perf_event *event)
{
	struct hlist_head *head;
	struct perf_sample_id *sid;
	int hash;
	u64 id;

	if (evlist->nr_entries == 1)
		return perf_evlist__first(evlist);

	if (perf_evlist__event2id(evlist, event, &id))
		return NULL;

	/* Synthesized events have an id of zero */
	if (!id)
		return perf_evlist__first(evlist);

	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
	head = &evlist->heads[hash];

	hlist_for_each_entry(sid, head, node) {
		if (sid->id == id)
			return sid->evsel;
	}
	return NULL;
}

436
union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
437
{
438
	struct perf_mmap *md = &evlist->mmap[idx];
439 440 441
	unsigned int head = perf_mmap__read_head(md);
	unsigned int old = md->prev;
	unsigned char *data = md->base + page_size;
442
	union perf_event *event = NULL;
443

444
	if (evlist->overwrite) {
445
		/*
446 447 448 449 450 451
		 * If we're further behind than half the buffer, there's a chance
		 * the writer will bite our tail and mess up the samples under us.
		 *
		 * If we somehow ended up ahead of the head, we got messed up.
		 *
		 * In either case, truncate and restart at head.
452
		 */
453 454 455 456 457 458 459 460 461
		int diff = head - old;
		if (diff > md->mask / 2 || diff < 0) {
			fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");

			/*
			 * head points to a known good entry, start there.
			 */
			old = head;
		}
462 463 464 465 466
	}

	if (old != head) {
		size_t size;

467
		event = (union perf_event *)&data[old & md->mask];
468 469 470 471 472 473 474 475 476
		size = event->header.size;

		/*
		 * Event straddles the mmap boundary -- header should always
		 * be inside due to u64 alignment of output.
		 */
		if ((old & md->mask) + size != ((old + size) & md->mask)) {
			unsigned int offset = old;
			unsigned int len = min(sizeof(*event), size), cpy;
477
			void *dst = &md->event_copy;
478 479 480 481 482 483 484 485 486

			do {
				cpy = min(md->mask + 1 - (offset & md->mask), len);
				memcpy(dst, &data[offset & md->mask], cpy);
				offset += cpy;
				dst += cpy;
				len -= cpy;
			} while (len);

487
			event = &md->event_copy;
488 489 490 491 492 493
		}

		old += size;
	}

	md->prev = old;
494 495 496 497

	if (!evlist->overwrite)
		perf_mmap__write_tail(md, old);

498 499
	return event;
}
500

501 502 503 504 505 506 507 508
static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
{
	if (evlist->mmap[idx].base != NULL) {
		munmap(evlist->mmap[idx].base, evlist->mmap_len);
		evlist->mmap[idx].base = NULL;
	}
}

509
void perf_evlist__munmap(struct perf_evlist *evlist)
510
{
511
	int i;
512

513 514
	for (i = 0; i < evlist->nr_mmaps; i++)
		__perf_evlist__munmap(evlist, i);
515 516 517

	free(evlist->mmap);
	evlist->mmap = NULL;
518 519
}

520
static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
521
{
522
	evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
523
	if (cpu_map__empty(evlist->cpus))
524
		evlist->nr_mmaps = thread_map__nr(evlist->threads);
525
	evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
526 527 528
	return evlist->mmap != NULL ? 0 : -ENOMEM;
}

529
static int __perf_evlist__mmap(struct perf_evlist *evlist,
530
			       int idx, int prot, int mask, int fd)
531
{
532 533 534
	evlist->mmap[idx].prev = 0;
	evlist->mmap[idx].mask = mask;
	evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
535
				      MAP_SHARED, fd, 0);
536 537
	if (evlist->mmap[idx].base == MAP_FAILED) {
		evlist->mmap[idx].base = NULL;
538
		return -1;
539
	}
540 541 542 543 544

	perf_evlist__add_pollfd(evlist, fd);
	return 0;
}

545 546 547 548
static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
{
	struct perf_evsel *evsel;
	int cpu, thread;
549 550
	int nr_cpus = cpu_map__nr(evlist->cpus);
	int nr_threads = thread_map__nr(evlist->threads);
551

A
Adrian Hunter 已提交
552
	pr_debug2("perf event ring buffer mmapped per cpu\n");
553
	for (cpu = 0; cpu < nr_cpus; cpu++) {
554 555
		int output = -1;

556
		for (thread = 0; thread < nr_threads; thread++) {
557 558 559 560 561
			list_for_each_entry(evsel, &evlist->entries, node) {
				int fd = FD(evsel, cpu, thread);

				if (output == -1) {
					output = fd;
562
					if (__perf_evlist__mmap(evlist, cpu,
563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579
								prot, mask, output) < 0)
						goto out_unmap;
				} else {
					if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
						goto out_unmap;
				}

				if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
				    perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
					goto out_unmap;
			}
		}
	}

	return 0;

out_unmap:
580 581
	for (cpu = 0; cpu < nr_cpus; cpu++)
		__perf_evlist__munmap(evlist, cpu);
582 583 584 585 586 587 588
	return -1;
}

static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
{
	struct perf_evsel *evsel;
	int thread;
589
	int nr_threads = thread_map__nr(evlist->threads);
590

A
Adrian Hunter 已提交
591
	pr_debug2("perf event ring buffer mmapped per thread\n");
592
	for (thread = 0; thread < nr_threads; thread++) {
593 594 595 596 597 598 599
		int output = -1;

		list_for_each_entry(evsel, &evlist->entries, node) {
			int fd = FD(evsel, 0, thread);

			if (output == -1) {
				output = fd;
600
				if (__perf_evlist__mmap(evlist, thread,
601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
							prot, mask, output) < 0)
					goto out_unmap;
			} else {
				if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
					goto out_unmap;
			}

			if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
			    perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
				goto out_unmap;
		}
	}

	return 0;

out_unmap:
617 618
	for (thread = 0; thread < nr_threads; thread++)
		__perf_evlist__munmap(evlist, thread);
619 620 621
	return -1;
}

622 623 624 625 626 627 628 629 630 631 632 633
/** perf_evlist__mmap - Create per cpu maps to receive events
 *
 * @evlist - list of events
 * @pages - map length in pages
 * @overwrite - overwrite older events?
 *
 * If overwrite is false the user needs to signal event consuption using:
 *
 *	struct perf_mmap *m = &evlist->mmap[cpu];
 *	unsigned int head = perf_mmap__read_head(m);
 *
 *	perf_mmap__write_tail(m, head)
634 635
 *
 * Using perf_evlist__read_on_cpu does this automatically.
636
 */
637 638
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
		      bool overwrite)
639
{
640
	struct perf_evsel *evsel;
641 642
	const struct cpu_map *cpus = evlist->cpus;
	const struct thread_map *threads = evlist->threads;
643 644 645 646 647
	int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;

        /* 512 kiB: default amount of unprivileged mlocked memory */
        if (pages == UINT_MAX)
                pages = (512 * 1024) / page_size;
648 649
	else if (!is_power_of_2(pages))
		return -EINVAL;
650 651

	mask = pages * page_size - 1;
652

653
	if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
654 655
		return -ENOMEM;

656
	if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
657 658 659 660 661 662 663
		return -ENOMEM;

	evlist->overwrite = overwrite;
	evlist->mmap_len = (pages + 1) * page_size;

	list_for_each_entry(evsel, &evlist->entries, node) {
		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
664
		    evsel->sample_id == NULL &&
665
		    perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
666 667 668
			return -ENOMEM;
	}

669
	if (cpu_map__empty(cpus))
670
		return perf_evlist__mmap_per_thread(evlist, prot, mask);
671

672
	return perf_evlist__mmap_per_cpu(evlist, prot, mask);
673
}
674

675 676
int perf_evlist__create_maps(struct perf_evlist *evlist,
			     struct perf_target *target)
677
{
678 679
	evlist->threads = thread_map__new_str(target->pid, target->tid,
					      target->uid);
680 681 682 683

	if (evlist->threads == NULL)
		return -1;

684
	if (perf_target__has_task(target))
685
		evlist->cpus = cpu_map__dummy_new();
N
Namhyung Kim 已提交
686 687
	else if (!perf_target__has_cpu(target) && !target->uses_mmap)
		evlist->cpus = cpu_map__dummy_new();
688 689
	else
		evlist->cpus = cpu_map__new(target->cpu_list);
690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707

	if (evlist->cpus == NULL)
		goto out_delete_threads;

	return 0;

out_delete_threads:
	thread_map__delete(evlist->threads);
	return -1;
}

void perf_evlist__delete_maps(struct perf_evlist *evlist)
{
	cpu_map__delete(evlist->cpus);
	thread_map__delete(evlist->threads);
	evlist->cpus	= NULL;
	evlist->threads = NULL;
}
708

709
int perf_evlist__apply_filters(struct perf_evlist *evlist)
710 711
{
	struct perf_evsel *evsel;
712 713
	int err = 0;
	const int ncpus = cpu_map__nr(evlist->cpus),
714
		  nthreads = thread_map__nr(evlist->threads);
715 716

	list_for_each_entry(evsel, &evlist->entries, node) {
717
		if (evsel->filter == NULL)
718
			continue;
719 720 721 722

		err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
		if (err)
			break;
723 724
	}

725 726 727 728 729 730 731 732
	return err;
}

int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
{
	struct perf_evsel *evsel;
	int err = 0;
	const int ncpus = cpu_map__nr(evlist->cpus),
733
		  nthreads = thread_map__nr(evlist->threads);
734 735 736 737 738 739 740 741

	list_for_each_entry(evsel, &evlist->entries, node) {
		err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
		if (err)
			break;
	}

	return err;
742
}
743

744
bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
745
{
746
	struct perf_evsel *pos;
747

748 749 750 751 752 753 754 755 756
	if (evlist->nr_entries == 1)
		return true;

	if (evlist->id_pos < 0 || evlist->is_pos < 0)
		return false;

	list_for_each_entry(pos, &evlist->entries, node) {
		if (pos->id_pos != evlist->id_pos ||
		    pos->is_pos != evlist->is_pos)
757
			return false;
758 759
	}

760
	return true;
761 762
}

763
u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
764
{
765 766 767 768 769 770 771 772 773 774 775 776 777 778 779
	struct perf_evsel *evsel;

	if (evlist->combined_sample_type)
		return evlist->combined_sample_type;

	list_for_each_entry(evsel, &evlist->entries, node)
		evlist->combined_sample_type |= evsel->attr.sample_type;

	return evlist->combined_sample_type;
}

u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
{
	evlist->combined_sample_type = 0;
	return __perf_evlist__combined_sample_type(evlist);
780 781
}

782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807
bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
{
	struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
	u64 read_format = first->attr.read_format;
	u64 sample_type = first->attr.sample_type;

	list_for_each_entry_continue(pos, &evlist->entries, node) {
		if (read_format != pos->attr.read_format)
			return false;
	}

	/* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
	if ((sample_type & PERF_SAMPLE_READ) &&
	    !(read_format & PERF_FORMAT_ID)) {
		return false;
	}

	return true;
}

u64 perf_evlist__read_format(struct perf_evlist *evlist)
{
	struct perf_evsel *first = perf_evlist__first(evlist);
	return first->attr.read_format;
}

808
u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
809
{
810
	struct perf_evsel *first = perf_evlist__first(evlist);
811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833
	struct perf_sample *data;
	u64 sample_type;
	u16 size = 0;

	if (!first->attr.sample_id_all)
		goto out;

	sample_type = first->attr.sample_type;

	if (sample_type & PERF_SAMPLE_TID)
		size += sizeof(data->tid) * 2;

       if (sample_type & PERF_SAMPLE_TIME)
		size += sizeof(data->time);

	if (sample_type & PERF_SAMPLE_ID)
		size += sizeof(data->id);

	if (sample_type & PERF_SAMPLE_STREAM_ID)
		size += sizeof(data->stream_id);

	if (sample_type & PERF_SAMPLE_CPU)
		size += sizeof(data->cpu) * 2;
834 835 836

	if (sample_type & PERF_SAMPLE_IDENTIFIER)
		size += sizeof(data->id);
837 838 839 840
out:
	return size;
}

841
bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
842
{
843
	struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
844 845 846 847

	list_for_each_entry_continue(pos, &evlist->entries, node) {
		if (first->attr.sample_id_all != pos->attr.sample_id_all)
			return false;
848 849
	}

850 851 852
	return true;
}

853
bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
854
{
855
	struct perf_evsel *first = perf_evlist__first(evlist);
856
	return first->attr.sample_id_all;
857
}
858 859 860 861 862 863

void perf_evlist__set_selected(struct perf_evlist *evlist,
			       struct perf_evsel *evsel)
{
	evlist->selected = evsel;
}
864

865 866 867 868 869 870 871 872 873 874
void perf_evlist__close(struct perf_evlist *evlist)
{
	struct perf_evsel *evsel;
	int ncpus = cpu_map__nr(evlist->cpus);
	int nthreads = thread_map__nr(evlist->threads);

	list_for_each_entry_reverse(evsel, &evlist->entries, node)
		perf_evsel__close(evsel, ncpus, nthreads);
}

875
int perf_evlist__open(struct perf_evlist *evlist)
876
{
877
	struct perf_evsel *evsel;
878
	int err;
879 880

	list_for_each_entry(evsel, &evlist->entries, node) {
881
		err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
882 883 884 885 886 887
		if (err < 0)
			goto out_err;
	}

	return 0;
out_err:
888
	perf_evlist__close(evlist);
889
	errno = -err;
890 891
	return err;
}
892 893

int perf_evlist__prepare_workload(struct perf_evlist *evlist,
894
				  struct perf_target *target,
895 896
				  const char *argv[], bool pipe_output,
				  bool want_signal)
897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917
{
	int child_ready_pipe[2], go_pipe[2];
	char bf;

	if (pipe(child_ready_pipe) < 0) {
		perror("failed to create 'ready' pipe");
		return -1;
	}

	if (pipe(go_pipe) < 0) {
		perror("failed to create 'go' pipe");
		goto out_close_ready_pipe;
	}

	evlist->workload.pid = fork();
	if (evlist->workload.pid < 0) {
		perror("failed to fork");
		goto out_close_pipes;
	}

	if (!evlist->workload.pid) {
918
		if (pipe_output)
919 920
			dup2(2, 1);

921 922
		signal(SIGTERM, SIG_DFL);

923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940
		close(child_ready_pipe[0]);
		close(go_pipe[1]);
		fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);

		/*
		 * Tell the parent we're ready to go
		 */
		close(child_ready_pipe[1]);

		/*
		 * Wait until the parent tells us to go.
		 */
		if (read(go_pipe[0], &bf, 1) == -1)
			perror("unable to read pipe");

		execvp(argv[0], (char **)argv);

		perror(argv[0]);
941 942
		if (want_signal)
			kill(getppid(), SIGUSR1);
943 944 945
		exit(-1);
	}

946
	if (perf_target__none(target))
947 948 949 950 951 952 953 954 955 956 957 958
		evlist->threads->map[0] = evlist->workload.pid;

	close(child_ready_pipe[1]);
	close(go_pipe[0]);
	/*
	 * wait for child to settle
	 */
	if (read(child_ready_pipe[0], &bf, 1) == -1) {
		perror("unable to read pipe");
		goto out_close_pipes;
	}

959
	fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975
	evlist->workload.cork_fd = go_pipe[1];
	close(child_ready_pipe[0]);
	return 0;

out_close_pipes:
	close(go_pipe[0]);
	close(go_pipe[1]);
out_close_ready_pipe:
	close(child_ready_pipe[0]);
	close(child_ready_pipe[1]);
	return -1;
}

int perf_evlist__start_workload(struct perf_evlist *evlist)
{
	if (evlist->workload.cork_fd > 0) {
976
		char bf = 0;
977
		int ret;
978 979 980
		/*
		 * Remove the cork, let it rip!
		 */
981 982 983 984 985 986
		ret = write(evlist->workload.cork_fd, &bf, 1);
		if (ret < 0)
			perror("enable to write to pipe");

		close(evlist->workload.cork_fd);
		return ret;
987 988 989 990
	}

	return 0;
}
991

992
int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
993
			      struct perf_sample *sample)
994
{
995 996 997 998
	struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);

	if (!evsel)
		return -EFAULT;
999
	return perf_evsel__parse_sample(evsel, event, sample);
1000
}
1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013

size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
{
	struct perf_evsel *evsel;
	size_t printed = 0;

	list_for_each_entry(evsel, &evlist->entries, node) {
		printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
				   perf_evsel__name(evsel));
	}

	return printed + fprintf(fp, "\n");;
}