evlist.c 27.6 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
 *
 * Parts came from builtin-{top,stat,record}.c, see those files for further
 * copyright notes.
 *
 * Released under the GPL v2. (and only v2, not any later version)
 */
9
#include "util.h"
10
#include <lk/debugfs.h>
11
#include <poll.h>
12 13
#include "cpumap.h"
#include "thread_map.h"
14
#include "target.h"
15 16
#include "evlist.h"
#include "evsel.h"
A
Adrian Hunter 已提交
17
#include "debug.h"
18
#include <unistd.h>
19

20
#include "parse-events.h"
21
#include "parse-options.h"
22

23 24
#include <sys/mman.h>

25 26 27
#include <linux/bitops.h>
#include <linux/hash.h>

28
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
29
#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
30

31 32
void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
		       struct thread_map *threads)
33 34 35 36 37 38
{
	int i;

	for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
		INIT_HLIST_HEAD(&evlist->heads[i]);
	INIT_LIST_HEAD(&evlist->entries);
39
	perf_evlist__set_maps(evlist, cpus, threads);
40
	evlist->workload.pid = -1;
41 42
}

43
struct perf_evlist *perf_evlist__new(void)
44 45 46
{
	struct perf_evlist *evlist = zalloc(sizeof(*evlist));

47
	if (evlist != NULL)
48
		perf_evlist__init(evlist, NULL, NULL);
49 50 51 52

	return evlist;
}

53 54 55 56 57 58 59 60 61 62 63 64
struct perf_evlist *perf_evlist__new_default(void)
{
	struct perf_evlist *evlist = perf_evlist__new();

	if (evlist && perf_evlist__add_default(evlist)) {
		perf_evlist__delete(evlist);
		evlist = NULL;
	}

	return evlist;
}

65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
/**
 * perf_evlist__set_id_pos - set the positions of event ids.
 * @evlist: selected event list
 *
 * Events with compatible sample types all have the same id_pos
 * and is_pos.  For convenience, put a copy on evlist.
 */
void perf_evlist__set_id_pos(struct perf_evlist *evlist)
{
	struct perf_evsel *first = perf_evlist__first(evlist);

	evlist->id_pos = first->id_pos;
	evlist->is_pos = first->is_pos;
}

80 81 82 83 84 85 86 87 88 89
static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
{
	struct perf_evsel *evsel;

	list_for_each_entry(evsel, &evlist->entries, node)
		perf_evsel__calc_id_pos(evsel);

	perf_evlist__set_id_pos(evlist);
}

90 91 92 93 94 95 96 97 98 99 100 101
static void perf_evlist__purge(struct perf_evlist *evlist)
{
	struct perf_evsel *pos, *n;

	list_for_each_entry_safe(pos, n, &evlist->entries, node) {
		list_del_init(&pos->node);
		perf_evsel__delete(pos);
	}

	evlist->nr_entries = 0;
}

102
void perf_evlist__exit(struct perf_evlist *evlist)
103
{
104
	free(evlist->mmap);
105
	free(evlist->pollfd);
106 107 108 109 110 111 112 113
	evlist->mmap = NULL;
	evlist->pollfd = NULL;
}

void perf_evlist__delete(struct perf_evlist *evlist)
{
	perf_evlist__purge(evlist);
	perf_evlist__exit(evlist);
114 115 116 117 118 119
	free(evlist);
}

void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
{
	list_add_tail(&entry->node, &evlist->entries);
120 121
	if (!evlist->nr_entries++)
		perf_evlist__set_id_pos(evlist);
122 123
}

124 125 126
void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
				   struct list_head *list,
				   int nr_entries)
127
{
128 129
	bool set_id_pos = !evlist->nr_entries;

130 131
	list_splice_tail(list, &evlist->entries);
	evlist->nr_entries += nr_entries;
132 133
	if (set_id_pos)
		perf_evlist__set_id_pos(evlist);
134 135
}

136 137 138 139 140
void __perf_evlist__set_leader(struct list_head *list)
{
	struct perf_evsel *evsel, *leader;

	leader = list_entry(list->next, struct perf_evsel, node);
141 142 143
	evsel = list_entry(list->prev, struct perf_evsel, node);

	leader->nr_members = evsel->idx - leader->idx + 1;
144 145

	list_for_each_entry(evsel, list, node) {
146
		evsel->leader = leader;
147 148 149 150
	}
}

void perf_evlist__set_leader(struct perf_evlist *evlist)
151
{
152 153
	if (evlist->nr_entries) {
		evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
154
		__perf_evlist__set_leader(&evlist->entries);
155
	}
156 157
}

158 159 160 161 162 163
int perf_evlist__add_default(struct perf_evlist *evlist)
{
	struct perf_event_attr attr = {
		.type = PERF_TYPE_HARDWARE,
		.config = PERF_COUNT_HW_CPU_CYCLES,
	};
164 165 166
	struct perf_evsel *evsel;

	event_attr_init(&attr);
167

168
	evsel = perf_evsel__new(&attr, 0);
169
	if (evsel == NULL)
170 171 172 173 174 175
		goto error;

	/* use strdup() because free(evsel) assumes name is allocated */
	evsel->name = strdup("cycles");
	if (!evsel->name)
		goto error_free;
176 177 178

	perf_evlist__add(evlist, evsel);
	return 0;
179 180 181 182
error_free:
	perf_evsel__delete(evsel);
error:
	return -ENOMEM;
183
}
184

185 186
static int perf_evlist__add_attrs(struct perf_evlist *evlist,
				  struct perf_event_attr *attrs, size_t nr_attrs)
187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
{
	struct perf_evsel *evsel, *n;
	LIST_HEAD(head);
	size_t i;

	for (i = 0; i < nr_attrs; i++) {
		evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
		if (evsel == NULL)
			goto out_delete_partial_list;
		list_add_tail(&evsel->node, &head);
	}

	perf_evlist__splice_list_tail(evlist, &head, nr_attrs);

	return 0;

out_delete_partial_list:
	list_for_each_entry_safe(evsel, n, &head, node)
		perf_evsel__delete(evsel);
	return -1;
}

209 210 211 212 213 214 215 216 217 218 219
int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
				     struct perf_event_attr *attrs, size_t nr_attrs)
{
	size_t i;

	for (i = 0; i < nr_attrs; i++)
		event_attr_init(attrs + i);

	return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
}

220 221
struct perf_evsel *
perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
222 223 224 225 226 227 228 229 230 231 232 233
{
	struct perf_evsel *evsel;

	list_for_each_entry(evsel, &evlist->entries, node) {
		if (evsel->attr.type   == PERF_TYPE_TRACEPOINT &&
		    (int)evsel->attr.config == id)
			return evsel;
	}

	return NULL;
}

234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
struct perf_evsel *
perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
				     const char *name)
{
	struct perf_evsel *evsel;

	list_for_each_entry(evsel, &evlist->entries, node) {
		if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
		    (strcmp(evsel->name, name) == 0))
			return evsel;
	}

	return NULL;
}

249 250 251 252 253 254 255 256 257 258 259 260 261 262
int perf_evlist__add_newtp(struct perf_evlist *evlist,
			   const char *sys, const char *name, void *handler)
{
	struct perf_evsel *evsel;

	evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
	if (evsel == NULL)
		return -1;

	evsel->handler.func = handler;
	perf_evlist__add(evlist, evsel);
	return 0;
}

263 264 265 266
void perf_evlist__disable(struct perf_evlist *evlist)
{
	int cpu, thread;
	struct perf_evsel *pos;
267 268
	int nr_cpus = cpu_map__nr(evlist->cpus);
	int nr_threads = thread_map__nr(evlist->threads);
269

270
	for (cpu = 0; cpu < nr_cpus; cpu++) {
271
		list_for_each_entry(pos, &evlist->entries, node) {
272
			if (!perf_evsel__is_group_leader(pos) || !pos->fd)
273
				continue;
274
			for (thread = 0; thread < nr_threads; thread++)
275 276
				ioctl(FD(pos, cpu, thread),
				      PERF_EVENT_IOC_DISABLE, 0);
277 278 279 280
		}
	}
}

281 282 283 284
void perf_evlist__enable(struct perf_evlist *evlist)
{
	int cpu, thread;
	struct perf_evsel *pos;
285 286
	int nr_cpus = cpu_map__nr(evlist->cpus);
	int nr_threads = thread_map__nr(evlist->threads);
287

288
	for (cpu = 0; cpu < nr_cpus; cpu++) {
289
		list_for_each_entry(pos, &evlist->entries, node) {
290
			if (!perf_evsel__is_group_leader(pos) || !pos->fd)
291
				continue;
292
			for (thread = 0; thread < nr_threads; thread++)
293 294
				ioctl(FD(pos, cpu, thread),
				      PERF_EVENT_IOC_ENABLE, 0);
295 296 297 298
		}
	}
}

299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
int perf_evlist__disable_event(struct perf_evlist *evlist,
			       struct perf_evsel *evsel)
{
	int cpu, thread, err;

	if (!evsel->fd)
		return 0;

	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
		for (thread = 0; thread < evlist->threads->nr; thread++) {
			err = ioctl(FD(evsel, cpu, thread),
				    PERF_EVENT_IOC_DISABLE, 0);
			if (err)
				return err;
		}
	}
	return 0;
}

int perf_evlist__enable_event(struct perf_evlist *evlist,
			      struct perf_evsel *evsel)
{
	int cpu, thread, err;

	if (!evsel->fd)
		return -EINVAL;

	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
		for (thread = 0; thread < evlist->threads->nr; thread++) {
			err = ioctl(FD(evsel, cpu, thread),
				    PERF_EVENT_IOC_ENABLE, 0);
			if (err)
				return err;
		}
	}
	return 0;
}

337
static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
338
{
339 340 341
	int nr_cpus = cpu_map__nr(evlist->cpus);
	int nr_threads = thread_map__nr(evlist->threads);
	int nfds = nr_cpus * nr_threads * evlist->nr_entries;
342 343 344
	evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
	return evlist->pollfd != NULL ? 0 : -ENOMEM;
}
345 346 347 348 349 350 351 352

void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
{
	fcntl(fd, F_SETFL, O_NONBLOCK);
	evlist->pollfd[evlist->nr_fds].fd = fd;
	evlist->pollfd[evlist->nr_fds].events = POLLIN;
	evlist->nr_fds++;
}
353

354 355 356
static void perf_evlist__id_hash(struct perf_evlist *evlist,
				 struct perf_evsel *evsel,
				 int cpu, int thread, u64 id)
357 358 359 360 361 362 363 364 365 366
{
	int hash;
	struct perf_sample_id *sid = SID(evsel, cpu, thread);

	sid->id = id;
	sid->evsel = evsel;
	hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
	hlist_add_head(&sid->node, &evlist->heads[hash]);
}

367 368 369 370 371 372 373 374 375 376
void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
			 int cpu, int thread, u64 id)
{
	perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
	evsel->id[evsel->ids++] = id;
}

static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
				  struct perf_evsel *evsel,
				  int cpu, int thread, int fd)
377 378
{
	u64 read_data[4] = { 0, };
379
	int id_idx = 1; /* The first entry is the counter value */
380 381 382 383 384 385 386 387 388 389 390
	u64 id;
	int ret;

	ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
	if (!ret)
		goto add;

	if (errno != ENOTTY)
		return -1;

	/* Legacy way to get event id.. All hail to old kernels! */
391

392 393 394 395 396 397 398
	/*
	 * This way does not work with group format read, so bail
	 * out in that case.
	 */
	if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
		return -1;

399 400 401 402 403 404 405 406 407
	if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
	    read(fd, &read_data, sizeof(read_data)) == -1)
		return -1;

	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		++id_idx;
	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		++id_idx;

408 409 410 411
	id = read_data[id_idx];

 add:
	perf_evlist__id_add(evlist, evsel, cpu, thread, id);
412 413 414
	return 0;
}

415
struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
416 417 418 419 420 421 422 423
{
	struct hlist_head *head;
	struct perf_sample_id *sid;
	int hash;

	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
	head = &evlist->heads[hash];

424
	hlist_for_each_entry(sid, head, node)
425
		if (sid->id == id)
426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
			return sid;

	return NULL;
}

struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
{
	struct perf_sample_id *sid;

	if (evlist->nr_entries == 1)
		return perf_evlist__first(evlist);

	sid = perf_evlist__id2sid(evlist, id);
	if (sid)
		return sid->evsel;
441 442

	if (!perf_evlist__sample_id_all(evlist))
443
		return perf_evlist__first(evlist);
444

445 446
	return NULL;
}
447

448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
static int perf_evlist__event2id(struct perf_evlist *evlist,
				 union perf_event *event, u64 *id)
{
	const u64 *array = event->sample.array;
	ssize_t n;

	n = (event->header.size - sizeof(event->header)) >> 3;

	if (event->header.type == PERF_RECORD_SAMPLE) {
		if (evlist->id_pos >= n)
			return -1;
		*id = array[evlist->id_pos];
	} else {
		if (evlist->is_pos > n)
			return -1;
		n -= evlist->is_pos;
		*id = array[n];
	}
	return 0;
}

static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
						   union perf_event *event)
{
472
	struct perf_evsel *first = perf_evlist__first(evlist);
473 474 475 476 477 478
	struct hlist_head *head;
	struct perf_sample_id *sid;
	int hash;
	u64 id;

	if (evlist->nr_entries == 1)
479 480 481 482 483
		return first;

	if (!first->attr.sample_id_all &&
	    event->header.type != PERF_RECORD_SAMPLE)
		return first;
484 485 486 487 488 489

	if (perf_evlist__event2id(evlist, event, &id))
		return NULL;

	/* Synthesized events have an id of zero */
	if (!id)
490
		return first;
491 492 493 494 495 496 497 498 499 500 501

	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
	head = &evlist->heads[hash];

	hlist_for_each_entry(sid, head, node) {
		if (sid->id == id)
			return sid->evsel;
	}
	return NULL;
}

502
union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
503
{
504
	struct perf_mmap *md = &evlist->mmap[idx];
505 506 507
	unsigned int head = perf_mmap__read_head(md);
	unsigned int old = md->prev;
	unsigned char *data = md->base + page_size;
508
	union perf_event *event = NULL;
509

510
	if (evlist->overwrite) {
511
		/*
512 513 514 515 516 517
		 * If we're further behind than half the buffer, there's a chance
		 * the writer will bite our tail and mess up the samples under us.
		 *
		 * If we somehow ended up ahead of the head, we got messed up.
		 *
		 * In either case, truncate and restart at head.
518
		 */
519 520 521 522 523 524 525 526 527
		int diff = head - old;
		if (diff > md->mask / 2 || diff < 0) {
			fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");

			/*
			 * head points to a known good entry, start there.
			 */
			old = head;
		}
528 529 530 531 532
	}

	if (old != head) {
		size_t size;

533
		event = (union perf_event *)&data[old & md->mask];
534 535 536 537 538 539 540 541 542
		size = event->header.size;

		/*
		 * Event straddles the mmap boundary -- header should always
		 * be inside due to u64 alignment of output.
		 */
		if ((old & md->mask) + size != ((old + size) & md->mask)) {
			unsigned int offset = old;
			unsigned int len = min(sizeof(*event), size), cpy;
543
			void *dst = md->event_copy;
544 545 546 547 548 549 550 551 552

			do {
				cpy = min(md->mask + 1 - (offset & md->mask), len);
				memcpy(dst, &data[offset & md->mask], cpy);
				offset += cpy;
				dst += cpy;
				len -= cpy;
			} while (len);

553
			event = (union perf_event *) md->event_copy;
554 555 556 557 558 559
		}

		old += size;
	}

	md->prev = old;
560

561 562
	return event;
}
563

564 565 566 567 568 569 570 571 572 573
void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
{
	if (!evlist->overwrite) {
		struct perf_mmap *md = &evlist->mmap[idx];
		unsigned int old = md->prev;

		perf_mmap__write_tail(md, old);
	}
}

574 575 576 577 578 579 580 581
static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
{
	if (evlist->mmap[idx].base != NULL) {
		munmap(evlist->mmap[idx].base, evlist->mmap_len);
		evlist->mmap[idx].base = NULL;
	}
}

582
void perf_evlist__munmap(struct perf_evlist *evlist)
583
{
584
	int i;
585

586 587
	for (i = 0; i < evlist->nr_mmaps; i++)
		__perf_evlist__munmap(evlist, i);
588 589 590

	free(evlist->mmap);
	evlist->mmap = NULL;
591 592
}

593
static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
594
{
595
	evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
596
	if (cpu_map__empty(evlist->cpus))
597
		evlist->nr_mmaps = thread_map__nr(evlist->threads);
598
	evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
599 600 601
	return evlist->mmap != NULL ? 0 : -ENOMEM;
}

602
static int __perf_evlist__mmap(struct perf_evlist *evlist,
603
			       int idx, int prot, int mask, int fd)
604
{
605 606 607
	evlist->mmap[idx].prev = 0;
	evlist->mmap[idx].mask = mask;
	evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
608
				      MAP_SHARED, fd, 0);
609
	if (evlist->mmap[idx].base == MAP_FAILED) {
610 611
		pr_debug2("failed to mmap perf event ring buffer, error %d\n",
			  errno);
612
		evlist->mmap[idx].base = NULL;
613
		return -1;
614
	}
615 616 617 618 619

	perf_evlist__add_pollfd(evlist, fd);
	return 0;
}

620 621 622
static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
				       int prot, int mask, int cpu, int thread,
				       int *output)
623 624
{
	struct perf_evsel *evsel;
625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649

	list_for_each_entry(evsel, &evlist->entries, node) {
		int fd = FD(evsel, cpu, thread);

		if (*output == -1) {
			*output = fd;
			if (__perf_evlist__mmap(evlist, idx, prot, mask,
						*output) < 0)
				return -1;
		} else {
			if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
				return -1;
		}

		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
		    perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
			return -1;
	}

	return 0;
}

static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot,
				     int mask)
{
650
	int cpu, thread;
651 652
	int nr_cpus = cpu_map__nr(evlist->cpus);
	int nr_threads = thread_map__nr(evlist->threads);
653

A
Adrian Hunter 已提交
654
	pr_debug2("perf event ring buffer mmapped per cpu\n");
655
	for (cpu = 0; cpu < nr_cpus; cpu++) {
656 657
		int output = -1;

658
		for (thread = 0; thread < nr_threads; thread++) {
659 660 661
			if (perf_evlist__mmap_per_evsel(evlist, cpu, prot, mask,
							cpu, thread, &output))
				goto out_unmap;
662 663 664 665 666 667
		}
	}

	return 0;

out_unmap:
668 669
	for (cpu = 0; cpu < nr_cpus; cpu++)
		__perf_evlist__munmap(evlist, cpu);
670 671 672
	return -1;
}

673 674
static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot,
					int mask)
675 676
{
	int thread;
677
	int nr_threads = thread_map__nr(evlist->threads);
678

A
Adrian Hunter 已提交
679
	pr_debug2("perf event ring buffer mmapped per thread\n");
680
	for (thread = 0; thread < nr_threads; thread++) {
681 682
		int output = -1;

683 684 685
		if (perf_evlist__mmap_per_evsel(evlist, thread, prot, mask, 0,
						thread, &output))
			goto out_unmap;
686 687 688 689 690
	}

	return 0;

out_unmap:
691 692
	for (thread = 0; thread < nr_threads; thread++)
		__perf_evlist__munmap(evlist, thread);
693 694 695
	return -1;
}

696 697 698 699 700 701 702 703 704 705 706 707 708 709
static size_t perf_evlist__mmap_size(unsigned long pages)
{
	/* 512 kiB: default amount of unprivileged mlocked memory */
	if (pages == UINT_MAX)
		pages = (512 * 1024) / page_size;
	else if (!is_power_of_2(pages))
		return 0;

	return (pages + 1) * page_size;
}

int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
				  int unset __maybe_unused)
{
710 711
	unsigned int *mmap_pages = opt->value;
	unsigned long pages, val;
712
	size_t size;
713 714 715 716 717 718 719
	static struct parse_tag tags[] = {
		{ .tag  = 'B', .mult = 1       },
		{ .tag  = 'K', .mult = 1 << 10 },
		{ .tag  = 'M', .mult = 1 << 20 },
		{ .tag  = 'G', .mult = 1 << 30 },
		{ .tag  = 0 },
	};
720

721
	val = parse_tag_value(str, tags);
722
	if (val != (unsigned long) -1) {
723 724
		/* we got file size value */
		pages = PERF_ALIGN(val, page_size) / page_size;
725
		if (pages < (1UL << 31) && !is_power_of_2(pages)) {
726
			pages = next_pow2(pages);
727
			pr_info("rounding mmap pages size to %lu (%lu pages)\n",
728 729 730 731 732 733 734 735 736 737
				pages * page_size, pages);
		}
	} else {
		/* we got pages count value */
		char *eptr;
		pages = strtoul(str, &eptr, 10);
		if (*eptr != '\0') {
			pr_err("failed to parse --mmap_pages/-m value\n");
			return -1;
		}
738 739
	}

740 741 742 743 744
	if (pages > UINT_MAX || pages > SIZE_MAX / page_size) {
		pr_err("--mmap_pages/-m value too big\n");
		return -1;
	}

745 746 747 748 749 750 751 752 753 754
	size = perf_evlist__mmap_size(pages);
	if (!size) {
		pr_err("--mmap_pages/-m value must be a power of two.");
		return -1;
	}

	*mmap_pages = pages;
	return 0;
}

755 756 757 758 759
/**
 * perf_evlist__mmap - Create mmaps to receive events.
 * @evlist: list of events
 * @pages: map length in pages
 * @overwrite: overwrite older events?
760
 *
761 762 763
 * If @overwrite is %false the user needs to signal event consumption using
 * perf_mmap__write_tail().  Using perf_evlist__mmap_read() does this
 * automatically.
764
 *
765
 * Return: %0 on success, negative error code otherwise.
766
 */
767 768
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
		      bool overwrite)
769
{
770
	struct perf_evsel *evsel;
771 772
	const struct cpu_map *cpus = evlist->cpus;
	const struct thread_map *threads = evlist->threads;
773 774
	int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;

775
	if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
776 777
		return -ENOMEM;

778
	if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
779 780 781
		return -ENOMEM;

	evlist->overwrite = overwrite;
782
	evlist->mmap_len = perf_evlist__mmap_size(pages);
783
	pr_debug("mmap size %zuB\n", evlist->mmap_len);
784
	mask = evlist->mmap_len - page_size - 1;
785 786 787

	list_for_each_entry(evsel, &evlist->entries, node) {
		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
788
		    evsel->sample_id == NULL &&
789
		    perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
790 791 792
			return -ENOMEM;
	}

793
	if (cpu_map__empty(cpus))
794
		return perf_evlist__mmap_per_thread(evlist, prot, mask);
795

796
	return perf_evlist__mmap_per_cpu(evlist, prot, mask);
797
}
798

799 800
int perf_evlist__create_maps(struct perf_evlist *evlist,
			     struct perf_target *target)
801
{
802 803
	evlist->threads = thread_map__new_str(target->pid, target->tid,
					      target->uid);
804 805 806 807

	if (evlist->threads == NULL)
		return -1;

808
	if (perf_target__has_task(target))
809
		evlist->cpus = cpu_map__dummy_new();
N
Namhyung Kim 已提交
810 811
	else if (!perf_target__has_cpu(target) && !target->uses_mmap)
		evlist->cpus = cpu_map__dummy_new();
812 813
	else
		evlist->cpus = cpu_map__new(target->cpu_list);
814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831

	if (evlist->cpus == NULL)
		goto out_delete_threads;

	return 0;

out_delete_threads:
	thread_map__delete(evlist->threads);
	return -1;
}

void perf_evlist__delete_maps(struct perf_evlist *evlist)
{
	cpu_map__delete(evlist->cpus);
	thread_map__delete(evlist->threads);
	evlist->cpus	= NULL;
	evlist->threads = NULL;
}
832

833
int perf_evlist__apply_filters(struct perf_evlist *evlist)
834 835
{
	struct perf_evsel *evsel;
836 837
	int err = 0;
	const int ncpus = cpu_map__nr(evlist->cpus),
838
		  nthreads = thread_map__nr(evlist->threads);
839 840

	list_for_each_entry(evsel, &evlist->entries, node) {
841
		if (evsel->filter == NULL)
842
			continue;
843 844 845 846

		err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
		if (err)
			break;
847 848
	}

849 850 851 852 853 854 855 856
	return err;
}

int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
{
	struct perf_evsel *evsel;
	int err = 0;
	const int ncpus = cpu_map__nr(evlist->cpus),
857
		  nthreads = thread_map__nr(evlist->threads);
858 859 860 861 862 863 864 865

	list_for_each_entry(evsel, &evlist->entries, node) {
		err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
		if (err)
			break;
	}

	return err;
866
}
867

868
bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
869
{
870
	struct perf_evsel *pos;
871

872 873 874 875 876 877 878 879 880
	if (evlist->nr_entries == 1)
		return true;

	if (evlist->id_pos < 0 || evlist->is_pos < 0)
		return false;

	list_for_each_entry(pos, &evlist->entries, node) {
		if (pos->id_pos != evlist->id_pos ||
		    pos->is_pos != evlist->is_pos)
881
			return false;
882 883
	}

884
	return true;
885 886
}

887
u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
888
{
889 890 891 892 893 894 895 896 897 898 899 900 901 902 903
	struct perf_evsel *evsel;

	if (evlist->combined_sample_type)
		return evlist->combined_sample_type;

	list_for_each_entry(evsel, &evlist->entries, node)
		evlist->combined_sample_type |= evsel->attr.sample_type;

	return evlist->combined_sample_type;
}

u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
{
	evlist->combined_sample_type = 0;
	return __perf_evlist__combined_sample_type(evlist);
904 905
}

906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931
bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
{
	struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
	u64 read_format = first->attr.read_format;
	u64 sample_type = first->attr.sample_type;

	list_for_each_entry_continue(pos, &evlist->entries, node) {
		if (read_format != pos->attr.read_format)
			return false;
	}

	/* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
	if ((sample_type & PERF_SAMPLE_READ) &&
	    !(read_format & PERF_FORMAT_ID)) {
		return false;
	}

	return true;
}

u64 perf_evlist__read_format(struct perf_evlist *evlist)
{
	struct perf_evsel *first = perf_evlist__first(evlist);
	return first->attr.read_format;
}

932
u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
933
{
934
	struct perf_evsel *first = perf_evlist__first(evlist);
935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957
	struct perf_sample *data;
	u64 sample_type;
	u16 size = 0;

	if (!first->attr.sample_id_all)
		goto out;

	sample_type = first->attr.sample_type;

	if (sample_type & PERF_SAMPLE_TID)
		size += sizeof(data->tid) * 2;

       if (sample_type & PERF_SAMPLE_TIME)
		size += sizeof(data->time);

	if (sample_type & PERF_SAMPLE_ID)
		size += sizeof(data->id);

	if (sample_type & PERF_SAMPLE_STREAM_ID)
		size += sizeof(data->stream_id);

	if (sample_type & PERF_SAMPLE_CPU)
		size += sizeof(data->cpu) * 2;
958 959 960

	if (sample_type & PERF_SAMPLE_IDENTIFIER)
		size += sizeof(data->id);
961 962 963 964
out:
	return size;
}

965
bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
966
{
967
	struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
968 969 970 971

	list_for_each_entry_continue(pos, &evlist->entries, node) {
		if (first->attr.sample_id_all != pos->attr.sample_id_all)
			return false;
972 973
	}

974 975 976
	return true;
}

977
bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
978
{
979
	struct perf_evsel *first = perf_evlist__first(evlist);
980
	return first->attr.sample_id_all;
981
}
982 983 984 985 986 987

void perf_evlist__set_selected(struct perf_evlist *evlist,
			       struct perf_evsel *evsel)
{
	evlist->selected = evsel;
}
988

989 990 991 992 993 994 995 996 997 998
void perf_evlist__close(struct perf_evlist *evlist)
{
	struct perf_evsel *evsel;
	int ncpus = cpu_map__nr(evlist->cpus);
	int nthreads = thread_map__nr(evlist->threads);

	list_for_each_entry_reverse(evsel, &evlist->entries, node)
		perf_evsel__close(evsel, ncpus, nthreads);
}

999
int perf_evlist__open(struct perf_evlist *evlist)
1000
{
1001
	struct perf_evsel *evsel;
1002
	int err;
1003

1004 1005
	perf_evlist__update_id_pos(evlist);

1006
	list_for_each_entry(evsel, &evlist->entries, node) {
1007
		err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
1008 1009 1010 1011 1012 1013
		if (err < 0)
			goto out_err;
	}

	return 0;
out_err:
1014
	perf_evlist__close(evlist);
1015
	errno = -err;
1016 1017
	return err;
}
1018 1019

int perf_evlist__prepare_workload(struct perf_evlist *evlist,
1020
				  struct perf_target *target,
1021 1022
				  const char *argv[], bool pipe_output,
				  bool want_signal)
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
{
	int child_ready_pipe[2], go_pipe[2];
	char bf;

	if (pipe(child_ready_pipe) < 0) {
		perror("failed to create 'ready' pipe");
		return -1;
	}

	if (pipe(go_pipe) < 0) {
		perror("failed to create 'go' pipe");
		goto out_close_ready_pipe;
	}

	evlist->workload.pid = fork();
	if (evlist->workload.pid < 0) {
		perror("failed to fork");
		goto out_close_pipes;
	}

	if (!evlist->workload.pid) {
1044
		if (pipe_output)
1045 1046
			dup2(2, 1);

1047 1048
		signal(SIGTERM, SIG_DFL);

1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
		close(child_ready_pipe[0]);
		close(go_pipe[1]);
		fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);

		/*
		 * Tell the parent we're ready to go
		 */
		close(child_ready_pipe[1]);

		/*
		 * Wait until the parent tells us to go.
		 */
		if (read(go_pipe[0], &bf, 1) == -1)
			perror("unable to read pipe");

		execvp(argv[0], (char **)argv);

		perror(argv[0]);
1067 1068
		if (want_signal)
			kill(getppid(), SIGUSR1);
1069 1070 1071
		exit(-1);
	}

1072
	if (perf_target__none(target))
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
		evlist->threads->map[0] = evlist->workload.pid;

	close(child_ready_pipe[1]);
	close(go_pipe[0]);
	/*
	 * wait for child to settle
	 */
	if (read(child_ready_pipe[0], &bf, 1) == -1) {
		perror("unable to read pipe");
		goto out_close_pipes;
	}

1085
	fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
	evlist->workload.cork_fd = go_pipe[1];
	close(child_ready_pipe[0]);
	return 0;

out_close_pipes:
	close(go_pipe[0]);
	close(go_pipe[1]);
out_close_ready_pipe:
	close(child_ready_pipe[0]);
	close(child_ready_pipe[1]);
	return -1;
}

int perf_evlist__start_workload(struct perf_evlist *evlist)
{
	if (evlist->workload.cork_fd > 0) {
1102
		char bf = 0;
1103
		int ret;
1104 1105 1106
		/*
		 * Remove the cork, let it rip!
		 */
1107 1108 1109 1110 1111 1112
		ret = write(evlist->workload.cork_fd, &bf, 1);
		if (ret < 0)
			perror("enable to write to pipe");

		close(evlist->workload.cork_fd);
		return ret;
1113 1114 1115 1116
	}

	return 0;
}
1117

1118
int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
1119
			      struct perf_sample *sample)
1120
{
1121 1122 1123 1124
	struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);

	if (!evsel)
		return -EFAULT;
1125
	return perf_evsel__parse_sample(evsel, event, sample);
1126
}
1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139

size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
{
	struct perf_evsel *evsel;
	size_t printed = 0;

	list_for_each_entry(evsel, &evlist->entries, node) {
		printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
				   perf_evsel__name(evsel));
	}

	return printed + fprintf(fp, "\n");;
}
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166

int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused,
			     int err, char *buf, size_t size)
{
	char sbuf[128];

	switch (err) {
	case ENOENT:
		scnprintf(buf, size, "%s",
			  "Error:\tUnable to find debugfs\n"
			  "Hint:\tWas your kernel was compiled with debugfs support?\n"
			  "Hint:\tIs the debugfs filesystem mounted?\n"
			  "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
		break;
	case EACCES:
		scnprintf(buf, size,
			  "Error:\tNo permissions to read %s/tracing/events/raw_syscalls\n"
			  "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
			  debugfs_mountpoint, debugfs_mountpoint);
		break;
	default:
		scnprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
		break;
	}

	return 0;
}
1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202

int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
			       int err, char *buf, size_t size)
{
	int printed, value;
	char sbuf[128], *emsg = strerror_r(err, sbuf, sizeof(sbuf));

	switch (err) {
	case EACCES:
	case EPERM:
		printed = scnprintf(buf, size,
				    "Error:\t%s.\n"
				    "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);

		if (filename__read_int("/proc/sys/kernel/perf_event_paranoid", &value))
			break;

		printed += scnprintf(buf + printed, size - printed, "\nHint:\t");

		if (value >= 2) {
			printed += scnprintf(buf + printed, size - printed,
					     "For your workloads it needs to be <= 1\nHint:\t");
		}
		printed += scnprintf(buf + printed, size - printed,
				     "For system wide tracing it needs to be set to -1");

		printed += scnprintf(buf + printed, size - printed,
				    ".\nHint:\tThe current value is %d.", value);
		break;
	default:
		scnprintf(buf, size, "%s", emsg);
		break;
	}

	return 0;
}