evlist.c 40.5 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
 *
 * Parts came from builtin-{top,stat,record}.c, see those files for further
 * copyright notes.
 *
 * Released under the GPL v2. (and only v2, not any later version)
 */
9
#include "util.h"
10
#include <api/fs/fs.h>
11
#include <poll.h>
12 13
#include "cpumap.h"
#include "thread_map.h"
14
#include "target.h"
15 16
#include "evlist.h"
#include "evsel.h"
A
Adrian Hunter 已提交
17
#include "debug.h"
18
#include <unistd.h>
19

20
#include "parse-events.h"
21
#include <subcmd/parse-options.h>
22

23 24
#include <sys/mman.h>

25 26
#include <linux/bitops.h>
#include <linux/hash.h>
27
#include <linux/log2.h>
28
#include <linux/err.h>
29

30 31 32
static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx);
static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx);

33
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
34
#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
35

36 37
void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
		       struct thread_map *threads)
38 39 40 41 42 43
{
	int i;

	for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
		INIT_HLIST_HEAD(&evlist->heads[i]);
	INIT_LIST_HEAD(&evlist->entries);
44
	perf_evlist__set_maps(evlist, cpus, threads);
45
	fdarray__init(&evlist->pollfd, 64);
46
	evlist->workload.pid = -1;
47 48
}

49
struct perf_evlist *perf_evlist__new(void)
50 51 52
{
	struct perf_evlist *evlist = zalloc(sizeof(*evlist));

53
	if (evlist != NULL)
54
		perf_evlist__init(evlist, NULL, NULL);
55 56 57 58

	return evlist;
}

59 60 61 62 63 64 65 66 67 68 69 70
struct perf_evlist *perf_evlist__new_default(void)
{
	struct perf_evlist *evlist = perf_evlist__new();

	if (evlist && perf_evlist__add_default(evlist)) {
		perf_evlist__delete(evlist);
		evlist = NULL;
	}

	return evlist;
}

71 72 73 74 75 76 77 78 79 80 81 82
struct perf_evlist *perf_evlist__new_dummy(void)
{
	struct perf_evlist *evlist = perf_evlist__new();

	if (evlist && perf_evlist__add_dummy(evlist)) {
		perf_evlist__delete(evlist);
		evlist = NULL;
	}

	return evlist;
}

83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
/**
 * perf_evlist__set_id_pos - set the positions of event ids.
 * @evlist: selected event list
 *
 * Events with compatible sample types all have the same id_pos
 * and is_pos.  For convenience, put a copy on evlist.
 */
void perf_evlist__set_id_pos(struct perf_evlist *evlist)
{
	struct perf_evsel *first = perf_evlist__first(evlist);

	evlist->id_pos = first->id_pos;
	evlist->is_pos = first->is_pos;
}

98 99 100 101
static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
{
	struct perf_evsel *evsel;

102
	evlist__for_each(evlist, evsel)
103 104 105 106 107
		perf_evsel__calc_id_pos(evsel);

	perf_evlist__set_id_pos(evlist);
}

108 109 110 111
static void perf_evlist__purge(struct perf_evlist *evlist)
{
	struct perf_evsel *pos, *n;

112
	evlist__for_each_safe(evlist, n, pos) {
113
		list_del_init(&pos->node);
114
		pos->evlist = NULL;
115 116 117 118 119 120
		perf_evsel__delete(pos);
	}

	evlist->nr_entries = 0;
}

121
void perf_evlist__exit(struct perf_evlist *evlist)
122
{
123
	zfree(&evlist->mmap);
124
	fdarray__exit(&evlist->pollfd);
125 126 127 128
}

void perf_evlist__delete(struct perf_evlist *evlist)
{
129
	perf_evlist__munmap(evlist);
130
	perf_evlist__close(evlist);
131
	cpu_map__put(evlist->cpus);
132
	thread_map__put(evlist->threads);
133 134
	evlist->cpus = NULL;
	evlist->threads = NULL;
135 136
	perf_evlist__purge(evlist);
	perf_evlist__exit(evlist);
137 138 139
	free(evlist);
}

140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
					  struct perf_evsel *evsel)
{
	/*
	 * We already have cpus for evsel (via PMU sysfs) so
	 * keep it, if there's no target cpu list defined.
	 */
	if (!evsel->own_cpus || evlist->has_user_cpus) {
		cpu_map__put(evsel->cpus);
		evsel->cpus = cpu_map__get(evlist->cpus);
	} else if (evsel->cpus != evsel->own_cpus) {
		cpu_map__put(evsel->cpus);
		evsel->cpus = cpu_map__get(evsel->own_cpus);
	}

	thread_map__put(evsel->threads);
	evsel->threads = thread_map__get(evlist->threads);
}

static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
{
	struct perf_evsel *evsel;

	evlist__for_each(evlist, evsel)
		__perf_evlist__propagate_maps(evlist, evsel);
}

167 168
void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
{
169
	entry->evlist = evlist;
170
	list_add_tail(&entry->node, &evlist->entries);
171
	entry->idx = evlist->nr_entries;
172
	entry->tracking = !entry->idx;
173

174 175
	if (!evlist->nr_entries++)
		perf_evlist__set_id_pos(evlist);
176 177

	__perf_evlist__propagate_maps(evlist, entry);
178 179
}

180 181 182 183 184 185 186
void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel)
{
	evsel->evlist = NULL;
	list_del_init(&evsel->node);
	evlist->nr_entries -= 1;
}

187
void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
188
				   struct list_head *list)
189
{
190
	struct perf_evsel *evsel, *temp;
191

192 193 194 195
	__evlist__for_each_safe(list, temp, evsel) {
		list_del_init(&evsel->node);
		perf_evlist__add(evlist, evsel);
	}
196 197
}

198 199 200 201 202
void __perf_evlist__set_leader(struct list_head *list)
{
	struct perf_evsel *evsel, *leader;

	leader = list_entry(list->next, struct perf_evsel, node);
203 204 205
	evsel = list_entry(list->prev, struct perf_evsel, node);

	leader->nr_members = evsel->idx - leader->idx + 1;
206

207
	__evlist__for_each(list, evsel) {
208
		evsel->leader = leader;
209 210 211 212
	}
}

void perf_evlist__set_leader(struct perf_evlist *evlist)
213
{
214 215
	if (evlist->nr_entries) {
		evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
216
		__perf_evlist__set_leader(&evlist->entries);
217
	}
218 219
}

220
void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
221 222 223 224 225 226 227 228 229 230 231 232 233
{
	attr->precise_ip = 3;

	while (attr->precise_ip != 0) {
		int fd = sys_perf_event_open(attr, 0, -1, -1, 0);
		if (fd != -1) {
			close(fd);
			break;
		}
		--attr->precise_ip;
	}
}

234 235 236 237 238 239
int perf_evlist__add_default(struct perf_evlist *evlist)
{
	struct perf_event_attr attr = {
		.type = PERF_TYPE_HARDWARE,
		.config = PERF_COUNT_HW_CPU_CYCLES,
	};
240 241 242
	struct perf_evsel *evsel;

	event_attr_init(&attr);
243

244 245
	perf_event_attr__set_max_precise_ip(&attr);

246
	evsel = perf_evsel__new(&attr);
247
	if (evsel == NULL)
248 249
		goto error;

250 251 252
	/* use asprintf() because free(evsel) assumes name is allocated */
	if (asprintf(&evsel->name, "cycles%.*s",
		     attr.precise_ip ? attr.precise_ip + 1 : 0, ":ppp") < 0)
253
		goto error_free;
254 255 256

	perf_evlist__add(evlist, evsel);
	return 0;
257 258 259 260
error_free:
	perf_evsel__delete(evsel);
error:
	return -ENOMEM;
261
}
262

263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
int perf_evlist__add_dummy(struct perf_evlist *evlist)
{
	struct perf_event_attr attr = {
		.type	= PERF_TYPE_SOFTWARE,
		.config = PERF_COUNT_SW_DUMMY,
		.size	= sizeof(attr), /* to capture ABI version */
	};
	struct perf_evsel *evsel = perf_evsel__new(&attr);

	if (evsel == NULL)
		return -ENOMEM;

	perf_evlist__add(evlist, evsel);
	return 0;
}

279 280
static int perf_evlist__add_attrs(struct perf_evlist *evlist,
				  struct perf_event_attr *attrs, size_t nr_attrs)
281 282 283 284 285 286
{
	struct perf_evsel *evsel, *n;
	LIST_HEAD(head);
	size_t i;

	for (i = 0; i < nr_attrs; i++) {
287
		evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
288 289 290 291 292
		if (evsel == NULL)
			goto out_delete_partial_list;
		list_add_tail(&evsel->node, &head);
	}

293
	perf_evlist__splice_list_tail(evlist, &head);
294 295 296 297

	return 0;

out_delete_partial_list:
298
	__evlist__for_each_safe(&head, n, evsel)
299 300 301 302
		perf_evsel__delete(evsel);
	return -1;
}

303 304 305 306 307 308 309 310 311 312 313
int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
				     struct perf_event_attr *attrs, size_t nr_attrs)
{
	size_t i;

	for (i = 0; i < nr_attrs; i++)
		event_attr_init(attrs + i);

	return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
}

314 315
struct perf_evsel *
perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
316 317 318
{
	struct perf_evsel *evsel;

319
	evlist__for_each(evlist, evsel) {
320 321 322 323 324 325 326 327
		if (evsel->attr.type   == PERF_TYPE_TRACEPOINT &&
		    (int)evsel->attr.config == id)
			return evsel;
	}

	return NULL;
}

328 329 330 331 332 333
struct perf_evsel *
perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
				     const char *name)
{
	struct perf_evsel *evsel;

334
	evlist__for_each(evlist, evsel) {
335 336 337 338 339 340 341 342
		if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
		    (strcmp(evsel->name, name) == 0))
			return evsel;
	}

	return NULL;
}

343 344 345
int perf_evlist__add_newtp(struct perf_evlist *evlist,
			   const char *sys, const char *name, void *handler)
{
346
	struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
347

348
	if (IS_ERR(evsel))
349 350
		return -1;

351
	evsel->handler = handler;
352 353 354 355
	perf_evlist__add(evlist, evsel);
	return 0;
}

356 357 358 359 360 361 362 363 364
static int perf_evlist__nr_threads(struct perf_evlist *evlist,
				   struct perf_evsel *evsel)
{
	if (evsel->system_wide)
		return 1;
	else
		return thread_map__nr(evlist->threads);
}

365 366 367
void perf_evlist__disable(struct perf_evlist *evlist)
{
	struct perf_evsel *pos;
368 369 370 371 372

	evlist__for_each(evlist, pos) {
		if (!perf_evsel__is_group_leader(pos) || !pos->fd)
			continue;
		perf_evsel__disable(pos);
373
	}
374 375

	evlist->enabled = false;
376 377
}

378 379 380
void perf_evlist__enable(struct perf_evlist *evlist)
{
	struct perf_evsel *pos;
381 382 383 384 385

	evlist__for_each(evlist, pos) {
		if (!perf_evsel__is_group_leader(pos) || !pos->fd)
			continue;
		perf_evsel__enable(pos);
386
	}
387 388 389 390 391 392 393

	evlist->enabled = true;
}

void perf_evlist__toggle_enable(struct perf_evlist *evlist)
{
	(evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist);
394 395
}

396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
					 struct perf_evsel *evsel, int cpu)
{
	int thread, err;
	int nr_threads = perf_evlist__nr_threads(evlist, evsel);

	if (!evsel->fd)
		return -EINVAL;

	for (thread = 0; thread < nr_threads; thread++) {
		err = ioctl(FD(evsel, cpu, thread),
			    PERF_EVENT_IOC_ENABLE, 0);
		if (err)
			return err;
	}
	return 0;
}

static int perf_evlist__enable_event_thread(struct perf_evlist *evlist,
					    struct perf_evsel *evsel,
					    int thread)
{
	int cpu, err;
	int nr_cpus = cpu_map__nr(evlist->cpus);

	if (!evsel->fd)
		return -EINVAL;

	for (cpu = 0; cpu < nr_cpus; cpu++) {
		err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
		if (err)
			return err;
	}
	return 0;
}

int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
				  struct perf_evsel *evsel, int idx)
{
	bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus);

	if (per_cpu_mmaps)
		return perf_evlist__enable_event_cpu(evlist, evsel, idx);
	else
		return perf_evlist__enable_event_thread(evlist, evsel, idx);
}

443
int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
444
{
445 446
	int nr_cpus = cpu_map__nr(evlist->cpus);
	int nr_threads = thread_map__nr(evlist->threads);
447 448 449
	int nfds = 0;
	struct perf_evsel *evsel;

450
	evlist__for_each(evlist, evsel) {
451 452 453 454 455 456
		if (evsel->system_wide)
			nfds += nr_cpus;
		else
			nfds += nr_cpus * nr_threads;
	}

457 458
	if (fdarray__available_entries(&evlist->pollfd) < nfds &&
	    fdarray__grow(&evlist->pollfd, nfds) < 0)
459 460 461
		return -ENOMEM;

	return 0;
462
}
463

464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, int idx)
{
	int pos = fdarray__add(&evlist->pollfd, fd, POLLIN | POLLERR | POLLHUP);
	/*
	 * Save the idx so that when we filter out fds POLLHUP'ed we can
	 * close the associated evlist->mmap[] entry.
	 */
	if (pos >= 0) {
		evlist->pollfd.priv[pos].idx = idx;

		fcntl(fd, F_SETFL, O_NONBLOCK);
	}

	return pos;
}

480
int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
481
{
482 483 484 485 486 487
	return __perf_evlist__add_pollfd(evlist, fd, -1);
}

static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd)
{
	struct perf_evlist *evlist = container_of(fda, struct perf_evlist, pollfd);
488

489
	perf_evlist__mmap_put(evlist, fda->priv[fd].idx);
490
}
491

492 493
int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
{
494 495
	return fdarray__filter(&evlist->pollfd, revents_and_mask,
			       perf_evlist__munmap_filtered);
496 497
}

498 499
int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
{
500
	return fdarray__poll(&evlist->pollfd, timeout);
501 502
}

503 504 505
static void perf_evlist__id_hash(struct perf_evlist *evlist,
				 struct perf_evsel *evsel,
				 int cpu, int thread, u64 id)
506 507 508 509 510 511 512 513 514 515
{
	int hash;
	struct perf_sample_id *sid = SID(evsel, cpu, thread);

	sid->id = id;
	sid->evsel = evsel;
	hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
	hlist_add_head(&sid->node, &evlist->heads[hash]);
}

516 517 518 519 520 521 522
void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
			 int cpu, int thread, u64 id)
{
	perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
	evsel->id[evsel->ids++] = id;
}

J
Jiri Olsa 已提交
523 524 525
int perf_evlist__id_add_fd(struct perf_evlist *evlist,
			   struct perf_evsel *evsel,
			   int cpu, int thread, int fd)
526 527
{
	u64 read_data[4] = { 0, };
528
	int id_idx = 1; /* The first entry is the counter value */
529 530 531 532 533 534 535 536 537 538 539
	u64 id;
	int ret;

	ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
	if (!ret)
		goto add;

	if (errno != ENOTTY)
		return -1;

	/* Legacy way to get event id.. All hail to old kernels! */
540

541 542 543 544 545 546 547
	/*
	 * This way does not work with group format read, so bail
	 * out in that case.
	 */
	if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
		return -1;

548 549 550 551 552 553 554 555 556
	if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
	    read(fd, &read_data, sizeof(read_data)) == -1)
		return -1;

	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		++id_idx;
	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		++id_idx;

557 558 559 560
	id = read_data[id_idx];

 add:
	perf_evlist__id_add(evlist, evsel, cpu, thread, id);
561 562 563
	return 0;
}

A
Adrian Hunter 已提交
564 565 566 567 568 569 570 571 572 573 574
static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
				     struct perf_evsel *evsel, int idx, int cpu,
				     int thread)
{
	struct perf_sample_id *sid = SID(evsel, cpu, thread);
	sid->idx = idx;
	if (evlist->cpus && cpu >= 0)
		sid->cpu = evlist->cpus->map[cpu];
	else
		sid->cpu = -1;
	if (!evsel->system_wide && evlist->threads && thread >= 0)
575
		sid->tid = thread_map__pid(evlist->threads, thread);
A
Adrian Hunter 已提交
576 577 578 579
	else
		sid->tid = -1;
}

580
struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
581 582 583 584 585 586 587 588
{
	struct hlist_head *head;
	struct perf_sample_id *sid;
	int hash;

	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
	head = &evlist->heads[hash];

589
	hlist_for_each_entry(sid, head, node)
590
		if (sid->id == id)
591 592 593 594 595 596 597 598 599
			return sid;

	return NULL;
}

struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
{
	struct perf_sample_id *sid;

600
	if (evlist->nr_entries == 1 || !id)
601 602 603 604 605
		return perf_evlist__first(evlist);

	sid = perf_evlist__id2sid(evlist, id);
	if (sid)
		return sid->evsel;
606 607

	if (!perf_evlist__sample_id_all(evlist))
608
		return perf_evlist__first(evlist);
609

610 611
	return NULL;
}
612

613 614 615 616 617 618 619 620 621 622 623 624 625 626 627
struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
						u64 id)
{
	struct perf_sample_id *sid;

	if (!id)
		return NULL;

	sid = perf_evlist__id2sid(evlist, id);
	if (sid)
		return sid->evsel;

	return NULL;
}

628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
static int perf_evlist__event2id(struct perf_evlist *evlist,
				 union perf_event *event, u64 *id)
{
	const u64 *array = event->sample.array;
	ssize_t n;

	n = (event->header.size - sizeof(event->header)) >> 3;

	if (event->header.type == PERF_RECORD_SAMPLE) {
		if (evlist->id_pos >= n)
			return -1;
		*id = array[evlist->id_pos];
	} else {
		if (evlist->is_pos > n)
			return -1;
		n -= evlist->is_pos;
		*id = array[n];
	}
	return 0;
}

static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
						   union perf_event *event)
{
652
	struct perf_evsel *first = perf_evlist__first(evlist);
653 654 655 656 657 658
	struct hlist_head *head;
	struct perf_sample_id *sid;
	int hash;
	u64 id;

	if (evlist->nr_entries == 1)
659 660 661 662 663
		return first;

	if (!first->attr.sample_id_all &&
	    event->header.type != PERF_RECORD_SAMPLE)
		return first;
664 665 666 667 668 669

	if (perf_evlist__event2id(evlist, event, &id))
		return NULL;

	/* Synthesized events have an id of zero */
	if (!id)
670
		return first;
671 672 673 674 675 676 677 678 679 680 681

	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
	head = &evlist->heads[hash];

	hlist_for_each_entry(sid, head, node) {
		if (sid->id == id)
			return sid->evsel;
	}
	return NULL;
}

682
union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
683
{
684
	struct perf_mmap *md = &evlist->mmap[idx];
685
	u64 head;
686
	u64 old = md->prev;
687
	unsigned char *data = md->base + page_size;
688
	union perf_event *event = NULL;
689

690 691 692 693 694 695 696
	/*
	 * Check if event was unmapped due to a POLLHUP/POLLERR.
	 */
	if (!atomic_read(&md->refcnt))
		return NULL;

	head = perf_mmap__read_head(md);
697
	if (evlist->overwrite) {
698
		/*
699 700 701 702 703 704
		 * If we're further behind than half the buffer, there's a chance
		 * the writer will bite our tail and mess up the samples under us.
		 *
		 * If we somehow ended up ahead of the head, we got messed up.
		 *
		 * In either case, truncate and restart at head.
705
		 */
706 707 708 709 710 711 712 713 714
		int diff = head - old;
		if (diff > md->mask / 2 || diff < 0) {
			fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");

			/*
			 * head points to a known good entry, start there.
			 */
			old = head;
		}
715 716 717 718 719
	}

	if (old != head) {
		size_t size;

720
		event = (union perf_event *)&data[old & md->mask];
721 722 723 724 725 726 727 728 729
		size = event->header.size;

		/*
		 * Event straddles the mmap boundary -- header should always
		 * be inside due to u64 alignment of output.
		 */
		if ((old & md->mask) + size != ((old + size) & md->mask)) {
			unsigned int offset = old;
			unsigned int len = min(sizeof(*event), size), cpy;
730
			void *dst = md->event_copy;
731 732 733 734 735 736 737 738 739

			do {
				cpy = min(md->mask + 1 - (offset & md->mask), len);
				memcpy(dst, &data[offset & md->mask], cpy);
				offset += cpy;
				dst += cpy;
				len -= cpy;
			} while (len);

740
			event = (union perf_event *) md->event_copy;
741 742 743 744 745 746
		}

		old += size;
	}

	md->prev = old;
747

748 749
	return event;
}
750

751 752
static bool perf_mmap__empty(struct perf_mmap *md)
{
753
	return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base;
754 755 756 757
}

static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx)
{
758
	atomic_inc(&evlist->mmap[idx].refcnt);
759 760 761 762
}

static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx)
{
763
	BUG_ON(atomic_read(&evlist->mmap[idx].refcnt) == 0);
764

765
	if (atomic_dec_and_test(&evlist->mmap[idx].refcnt))
766 767 768
		__perf_evlist__munmap(evlist, idx);
}

769 770
void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
{
771 772
	struct perf_mmap *md = &evlist->mmap[idx];

773
	if (!evlist->overwrite) {
774
		u64 old = md->prev;
775 776 777

		perf_mmap__write_tail(md, old);
	}
778

779
	if (atomic_read(&md->refcnt) == 1 && perf_mmap__empty(md))
780
		perf_evlist__mmap_put(evlist, idx);
781 782
}

783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810
int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
			       struct auxtrace_mmap_params *mp __maybe_unused,
			       void *userpg __maybe_unused,
			       int fd __maybe_unused)
{
	return 0;
}

void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
{
}

void __weak auxtrace_mmap_params__init(
			struct auxtrace_mmap_params *mp __maybe_unused,
			off_t auxtrace_offset __maybe_unused,
			unsigned int auxtrace_pages __maybe_unused,
			bool auxtrace_overwrite __maybe_unused)
{
}

void __weak auxtrace_mmap_params__set_idx(
			struct auxtrace_mmap_params *mp __maybe_unused,
			struct perf_evlist *evlist __maybe_unused,
			int idx __maybe_unused,
			bool per_cpu __maybe_unused)
{
}

811 812 813 814 815
static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
{
	if (evlist->mmap[idx].base != NULL) {
		munmap(evlist->mmap[idx].base, evlist->mmap_len);
		evlist->mmap[idx].base = NULL;
816
		atomic_set(&evlist->mmap[idx].refcnt, 0);
817
	}
818
	auxtrace_mmap__munmap(&evlist->mmap[idx].auxtrace_mmap);
819 820
}

821
void perf_evlist__munmap(struct perf_evlist *evlist)
822
{
823
	int i;
824

825 826 827
	if (evlist->mmap == NULL)
		return;

828 829
	for (i = 0; i < evlist->nr_mmaps; i++)
		__perf_evlist__munmap(evlist, i);
830

831
	zfree(&evlist->mmap);
832 833
}

834
static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
835
{
836
	evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
837
	if (cpu_map__empty(evlist->cpus))
838
		evlist->nr_mmaps = thread_map__nr(evlist->threads);
839
	evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
840 841 842
	return evlist->mmap != NULL ? 0 : -ENOMEM;
}

843 844 845
struct mmap_params {
	int prot;
	int mask;
846
	struct auxtrace_mmap_params auxtrace_mp;
847 848 849 850
};

static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
			       struct mmap_params *mp, int fd)
851
{
852 853 854 855 856 857 858 859 860 861 862 863 864
	/*
	 * The last one will be done at perf_evlist__mmap_consume(), so that we
	 * make sure we don't prevent tools from consuming every last event in
	 * the ring buffer.
	 *
	 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
	 * anymore, but the last events for it are still in the ring buffer,
	 * waiting to be consumed.
	 *
	 * Tools can chose to ignore this at their own discretion, but the
	 * evlist layer can't just drop it when filtering events in
	 * perf_evlist__filter_pollfd().
	 */
865
	atomic_set(&evlist->mmap[idx].refcnt, 2);
866
	evlist->mmap[idx].prev = 0;
867 868
	evlist->mmap[idx].mask = mp->mask;
	evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, mp->prot,
869
				      MAP_SHARED, fd, 0);
870
	if (evlist->mmap[idx].base == MAP_FAILED) {
871 872
		pr_debug2("failed to mmap perf event ring buffer, error %d\n",
			  errno);
873
		evlist->mmap[idx].base = NULL;
874
		return -1;
875
	}
876

877 878 879 880
	if (auxtrace_mmap__mmap(&evlist->mmap[idx].auxtrace_mmap,
				&mp->auxtrace_mp, evlist->mmap[idx].base, fd))
		return -1;

881 882 883
	return 0;
}

884
static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
885 886
				       struct mmap_params *mp, int cpu,
				       int thread, int *output)
887 888
{
	struct perf_evsel *evsel;
889

890
	evlist__for_each(evlist, evsel) {
891 892 893 894 895 896
		int fd;

		if (evsel->system_wide && thread)
			continue;

		fd = FD(evsel, cpu, thread);
897 898 899

		if (*output == -1) {
			*output = fd;
900
			if (__perf_evlist__mmap(evlist, idx, mp, *output) < 0)
901 902 903 904
				return -1;
		} else {
			if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
				return -1;
905 906

			perf_evlist__mmap_get(evlist, idx);
907 908
		}

909 910 911 912 913 914 915 916 917
		/*
		 * The system_wide flag causes a selected event to be opened
		 * always without a pid.  Consequently it will never get a
		 * POLLHUP, but it is used for tracking in combination with
		 * other events, so it should not need to be polled anyway.
		 * Therefore don't add it for polling.
		 */
		if (!evsel->system_wide &&
		    __perf_evlist__add_pollfd(evlist, fd, idx) < 0) {
918
			perf_evlist__mmap_put(evlist, idx);
919
			return -1;
920
		}
921

A
Adrian Hunter 已提交
922 923 924 925 926 927 928
		if (evsel->attr.read_format & PERF_FORMAT_ID) {
			if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
						   fd) < 0)
				return -1;
			perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
						 thread);
		}
929 930 931 932 933
	}

	return 0;
}

934 935
static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
				     struct mmap_params *mp)
936
{
937
	int cpu, thread;
938 939
	int nr_cpus = cpu_map__nr(evlist->cpus);
	int nr_threads = thread_map__nr(evlist->threads);
940

A
Adrian Hunter 已提交
941
	pr_debug2("perf event ring buffer mmapped per cpu\n");
942
	for (cpu = 0; cpu < nr_cpus; cpu++) {
943 944
		int output = -1;

945 946 947
		auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
					      true);

948
		for (thread = 0; thread < nr_threads; thread++) {
949 950
			if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
							thread, &output))
951
				goto out_unmap;
952 953 954 955 956 957
		}
	}

	return 0;

out_unmap:
958 959
	for (cpu = 0; cpu < nr_cpus; cpu++)
		__perf_evlist__munmap(evlist, cpu);
960 961 962
	return -1;
}

963 964
static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
					struct mmap_params *mp)
965 966
{
	int thread;
967
	int nr_threads = thread_map__nr(evlist->threads);
968

A
Adrian Hunter 已提交
969
	pr_debug2("perf event ring buffer mmapped per thread\n");
970
	for (thread = 0; thread < nr_threads; thread++) {
971 972
		int output = -1;

973 974 975
		auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
					      false);

976 977
		if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
						&output))
978
			goto out_unmap;
979 980 981 982 983
	}

	return 0;

out_unmap:
984 985
	for (thread = 0; thread < nr_threads; thread++)
		__perf_evlist__munmap(evlist, thread);
986 987 988
	return -1;
}

989 990
static size_t perf_evlist__mmap_size(unsigned long pages)
{
991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005
	if (pages == UINT_MAX) {
		int max;

		if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
			/*
			 * Pick a once upon a time good value, i.e. things look
			 * strange since we can't read a sysctl value, but lets not
			 * die yet...
			 */
			max = 512;
		} else {
			max -= (page_size / 1024);
		}

		pages = (max * 1024) / page_size;
1006 1007
		if (!is_power_of_2(pages))
			pages = rounddown_pow_of_two(pages);
1008
	} else if (!is_power_of_2(pages))
1009 1010 1011 1012 1013
		return 0;

	return (pages + 1) * page_size;
}

1014 1015
static long parse_pages_arg(const char *str, unsigned long min,
			    unsigned long max)
1016
{
1017
	unsigned long pages, val;
1018 1019 1020 1021 1022 1023 1024
	static struct parse_tag tags[] = {
		{ .tag  = 'B', .mult = 1       },
		{ .tag  = 'K', .mult = 1 << 10 },
		{ .tag  = 'M', .mult = 1 << 20 },
		{ .tag  = 'G', .mult = 1 << 30 },
		{ .tag  = 0 },
	};
1025

1026
	if (str == NULL)
1027
		return -EINVAL;
1028

1029
	val = parse_tag_value(str, tags);
1030
	if (val != (unsigned long) -1) {
1031 1032 1033 1034 1035 1036
		/* we got file size value */
		pages = PERF_ALIGN(val, page_size) / page_size;
	} else {
		/* we got pages count value */
		char *eptr;
		pages = strtoul(str, &eptr, 10);
1037 1038
		if (*eptr != '\0')
			return -EINVAL;
1039 1040
	}

1041
	if (pages == 0 && min == 0) {
1042
		/* leave number of pages at 0 */
1043
	} else if (!is_power_of_2(pages)) {
1044
		/* round pages up to next power of 2 */
1045
		pages = roundup_pow_of_two(pages);
1046 1047
		if (!pages)
			return -EINVAL;
1048 1049
		pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
			pages * page_size, pages);
1050 1051
	}

1052 1053 1054 1055 1056 1057
	if (pages > max)
		return -EINVAL;

	return pages;
}

1058
int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
1059 1060 1061 1062
{
	unsigned long max = UINT_MAX;
	long pages;

A
Adrian Hunter 已提交
1063
	if (max > SIZE_MAX / page_size)
1064 1065 1066 1067 1068
		max = SIZE_MAX / page_size;

	pages = parse_pages_arg(str, 1, max);
	if (pages < 0) {
		pr_err("Invalid argument for --mmap_pages/-m\n");
1069 1070 1071 1072 1073 1074 1075
		return -1;
	}

	*mmap_pages = pages;
	return 0;
}

1076 1077 1078 1079 1080 1081
int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
				  int unset __maybe_unused)
{
	return __perf_evlist__parse_mmap_pages(opt->value, str);
}

1082
/**
1083
 * perf_evlist__mmap_ex - Create mmaps to receive events.
1084 1085 1086
 * @evlist: list of events
 * @pages: map length in pages
 * @overwrite: overwrite older events?
1087 1088
 * @auxtrace_pages - auxtrace map length in pages
 * @auxtrace_overwrite - overwrite older auxtrace data?
1089
 *
1090 1091 1092
 * If @overwrite is %false the user needs to signal event consumption using
 * perf_mmap__write_tail().  Using perf_evlist__mmap_read() does this
 * automatically.
1093
 *
1094 1095 1096
 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
 * consumption using auxtrace_mmap__write_tail().
 *
1097
 * Return: %0 on success, negative error code otherwise.
1098
 */
1099 1100 1101
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
			 bool overwrite, unsigned int auxtrace_pages,
			 bool auxtrace_overwrite)
1102
{
1103
	struct perf_evsel *evsel;
1104 1105
	const struct cpu_map *cpus = evlist->cpus;
	const struct thread_map *threads = evlist->threads;
1106 1107 1108
	struct mmap_params mp = {
		.prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
	};
1109

1110
	if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
1111 1112
		return -ENOMEM;

1113
	if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
1114 1115 1116
		return -ENOMEM;

	evlist->overwrite = overwrite;
1117
	evlist->mmap_len = perf_evlist__mmap_size(pages);
1118
	pr_debug("mmap size %zuB\n", evlist->mmap_len);
1119
	mp.mask = evlist->mmap_len - page_size - 1;
1120

1121 1122 1123
	auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len,
				   auxtrace_pages, auxtrace_overwrite);

1124
	evlist__for_each(evlist, evsel) {
1125
		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
1126
		    evsel->sample_id == NULL &&
1127
		    perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
1128 1129 1130
			return -ENOMEM;
	}

1131
	if (cpu_map__empty(cpus))
1132
		return perf_evlist__mmap_per_thread(evlist, &mp);
1133

1134
	return perf_evlist__mmap_per_cpu(evlist, &mp);
1135
}
1136

1137 1138 1139 1140 1141 1142
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
		      bool overwrite)
{
	return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false);
}

1143
int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
1144
{
1145 1146
	struct cpu_map *cpus;
	struct thread_map *threads;
1147

1148
	threads = thread_map__new_str(target->pid, target->tid, target->uid);
1149

1150
	if (!threads)
1151 1152
		return -1;

1153
	if (target__uses_dummy_map(target))
1154
		cpus = cpu_map__dummy_new();
1155
	else
1156
		cpus = cpu_map__new(target->cpu_list);
1157

1158
	if (!cpus)
1159 1160
		goto out_delete_threads;

1161 1162
	evlist->has_user_cpus = !!target->cpu_list;

1163
	perf_evlist__set_maps(evlist, cpus, threads);
1164 1165

	return 0;
1166 1167

out_delete_threads:
1168
	thread_map__put(threads);
1169 1170 1171
	return -1;
}

1172 1173
void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
			   struct thread_map *threads)
1174
{
1175 1176 1177 1178 1179 1180 1181 1182
	/*
	 * Allow for the possibility that one or another of the maps isn't being
	 * changed i.e. don't put it.  Note we are assuming the maps that are
	 * being applied are brand new and evlist is taking ownership of the
	 * original reference count of 1.  If that is not the case it is up to
	 * the caller to increase the reference count.
	 */
	if (cpus != evlist->cpus) {
1183
		cpu_map__put(evlist->cpus);
1184
		evlist->cpus = cpu_map__get(cpus);
1185
	}
1186

1187
	if (threads != evlist->threads) {
1188
		thread_map__put(evlist->threads);
1189
		evlist->threads = thread_map__get(threads);
1190
	}
1191

1192
	perf_evlist__propagate_maps(evlist);
1193 1194
}

1195
int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel)
1196 1197
{
	struct perf_evsel *evsel;
1198 1199
	int err = 0;
	const int ncpus = cpu_map__nr(evlist->cpus),
1200
		  nthreads = thread_map__nr(evlist->threads);
1201

1202
	evlist__for_each(evlist, evsel) {
1203
		if (evsel->filter == NULL)
1204
			continue;
1205

1206 1207 1208 1209
		/*
		 * filters only work for tracepoint event, which doesn't have cpu limit.
		 * So evlist and evsel should always be same.
		 */
1210
		err = perf_evsel__apply_filter(evsel, ncpus, nthreads, evsel->filter);
1211 1212
		if (err) {
			*err_evsel = evsel;
1213
			break;
1214
		}
1215 1216
	}

1217 1218 1219 1220 1221 1222 1223 1224
	return err;
}

int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
{
	struct perf_evsel *evsel;
	int err = 0;

1225
	evlist__for_each(evlist, evsel) {
1226 1227 1228
		if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
			continue;

1229
		err = perf_evsel__set_filter(evsel, filter);
1230 1231 1232 1233 1234
		if (err)
			break;
	}

	return err;
1235
}
1236

1237
int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids)
1238 1239
{
	char *filter;
1240 1241
	int ret = -1;
	size_t i;
1242

1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256
	for (i = 0; i < npids; ++i) {
		if (i == 0) {
			if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
				return -1;
		} else {
			char *tmp;

			if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
				goto out_free;

			free(filter);
			filter = tmp;
		}
	}
1257 1258

	ret = perf_evlist__set_filter(evlist, filter);
1259
out_free:
1260 1261 1262 1263
	free(filter);
	return ret;
}

1264 1265 1266 1267 1268
int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid)
{
	return perf_evlist__set_filter_pids(evlist, 1, &pid);
}

1269
bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
1270
{
1271
	struct perf_evsel *pos;
1272

1273 1274 1275 1276 1277 1278
	if (evlist->nr_entries == 1)
		return true;

	if (evlist->id_pos < 0 || evlist->is_pos < 0)
		return false;

1279
	evlist__for_each(evlist, pos) {
1280 1281
		if (pos->id_pos != evlist->id_pos ||
		    pos->is_pos != evlist->is_pos)
1282
			return false;
1283 1284
	}

1285
	return true;
1286 1287
}

1288
u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1289
{
1290 1291 1292 1293 1294
	struct perf_evsel *evsel;

	if (evlist->combined_sample_type)
		return evlist->combined_sample_type;

1295
	evlist__for_each(evlist, evsel)
1296 1297 1298 1299 1300 1301 1302 1303 1304
		evlist->combined_sample_type |= evsel->attr.sample_type;

	return evlist->combined_sample_type;
}

u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
{
	evlist->combined_sample_type = 0;
	return __perf_evlist__combined_sample_type(evlist);
1305 1306
}

1307 1308 1309 1310 1311 1312 1313 1314 1315 1316
u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist)
{
	struct perf_evsel *evsel;
	u64 branch_type = 0;

	evlist__for_each(evlist, evsel)
		branch_type |= evsel->attr.branch_sample_type;
	return branch_type;
}

1317 1318 1319 1320 1321 1322
bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
{
	struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
	u64 read_format = first->attr.read_format;
	u64 sample_type = first->attr.sample_type;

1323
	evlist__for_each(evlist, pos) {
1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342
		if (read_format != pos->attr.read_format)
			return false;
	}

	/* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
	if ((sample_type & PERF_SAMPLE_READ) &&
	    !(read_format & PERF_FORMAT_ID)) {
		return false;
	}

	return true;
}

u64 perf_evlist__read_format(struct perf_evlist *evlist)
{
	struct perf_evsel *first = perf_evlist__first(evlist);
	return first->attr.read_format;
}

1343
u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
1344
{
1345
	struct perf_evsel *first = perf_evlist__first(evlist);
1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368
	struct perf_sample *data;
	u64 sample_type;
	u16 size = 0;

	if (!first->attr.sample_id_all)
		goto out;

	sample_type = first->attr.sample_type;

	if (sample_type & PERF_SAMPLE_TID)
		size += sizeof(data->tid) * 2;

       if (sample_type & PERF_SAMPLE_TIME)
		size += sizeof(data->time);

	if (sample_type & PERF_SAMPLE_ID)
		size += sizeof(data->id);

	if (sample_type & PERF_SAMPLE_STREAM_ID)
		size += sizeof(data->stream_id);

	if (sample_type & PERF_SAMPLE_CPU)
		size += sizeof(data->cpu) * 2;
1369 1370 1371

	if (sample_type & PERF_SAMPLE_IDENTIFIER)
		size += sizeof(data->id);
1372 1373 1374 1375
out:
	return size;
}

1376
bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
1377
{
1378
	struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1379

1380
	evlist__for_each_continue(evlist, pos) {
1381 1382
		if (first->attr.sample_id_all != pos->attr.sample_id_all)
			return false;
1383 1384
	}

1385 1386 1387
	return true;
}

1388
bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
1389
{
1390
	struct perf_evsel *first = perf_evlist__first(evlist);
1391
	return first->attr.sample_id_all;
1392
}
1393 1394 1395 1396 1397 1398

void perf_evlist__set_selected(struct perf_evlist *evlist,
			       struct perf_evsel *evsel)
{
	evlist->selected = evsel;
}
1399

1400 1401 1402 1403 1404
void perf_evlist__close(struct perf_evlist *evlist)
{
	struct perf_evsel *evsel;
	int ncpus = cpu_map__nr(evlist->cpus);
	int nthreads = thread_map__nr(evlist->threads);
1405
	int n;
1406

1407 1408 1409 1410
	evlist__for_each_reverse(evlist, evsel) {
		n = evsel->cpus ? evsel->cpus->nr : ncpus;
		perf_evsel__close(evsel, n, nthreads);
	}
1411 1412
}

1413 1414
static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
{
1415 1416
	struct cpu_map	  *cpus;
	struct thread_map *threads;
1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427
	int err = -ENOMEM;

	/*
	 * Try reading /sys/devices/system/cpu/online to get
	 * an all cpus map.
	 *
	 * FIXME: -ENOMEM is the best we can do here, the cpu_map
	 * code needs an overhaul to properly forward the
	 * error, and we may not want to do that fallback to a
	 * default cpu identity map :-\
	 */
1428 1429
	cpus = cpu_map__new(NULL);
	if (!cpus)
1430 1431
		goto out;

1432 1433 1434
	threads = thread_map__new_dummy();
	if (!threads)
		goto out_put;
1435

1436
	perf_evlist__set_maps(evlist, cpus, threads);
1437 1438
out:
	return err;
1439 1440
out_put:
	cpu_map__put(cpus);
1441 1442 1443
	goto out;
}

1444
int perf_evlist__open(struct perf_evlist *evlist)
1445
{
1446
	struct perf_evsel *evsel;
1447
	int err;
1448

1449 1450 1451 1452 1453 1454 1455 1456 1457 1458
	/*
	 * Default: one fd per CPU, all threads, aka systemwide
	 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
	 */
	if (evlist->threads == NULL && evlist->cpus == NULL) {
		err = perf_evlist__create_syswide_maps(evlist);
		if (err < 0)
			goto out_err;
	}

1459 1460
	perf_evlist__update_id_pos(evlist);

1461
	evlist__for_each(evlist, evsel) {
1462
		err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
1463 1464 1465 1466 1467 1468
		if (err < 0)
			goto out_err;
	}

	return 0;
out_err:
1469
	perf_evlist__close(evlist);
1470
	errno = -err;
1471 1472
	return err;
}
1473

1474
int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
1475
				  const char *argv[], bool pipe_output,
1476
				  void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497
{
	int child_ready_pipe[2], go_pipe[2];
	char bf;

	if (pipe(child_ready_pipe) < 0) {
		perror("failed to create 'ready' pipe");
		return -1;
	}

	if (pipe(go_pipe) < 0) {
		perror("failed to create 'go' pipe");
		goto out_close_ready_pipe;
	}

	evlist->workload.pid = fork();
	if (evlist->workload.pid < 0) {
		perror("failed to fork");
		goto out_close_pipes;
	}

	if (!evlist->workload.pid) {
1498 1499
		int ret;

1500
		if (pipe_output)
1501 1502
			dup2(2, 1);

1503 1504
		signal(SIGTERM, SIG_DFL);

1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516
		close(child_ready_pipe[0]);
		close(go_pipe[1]);
		fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);

		/*
		 * Tell the parent we're ready to go
		 */
		close(child_ready_pipe[1]);

		/*
		 * Wait until the parent tells us to go.
		 */
1517 1518 1519 1520 1521 1522
		ret = read(go_pipe[0], &bf, 1);
		/*
		 * The parent will ask for the execvp() to be performed by
		 * writing exactly one byte, in workload.cork_fd, usually via
		 * perf_evlist__start_workload().
		 *
1523
		 * For cancelling the workload without actually running it,
1524 1525 1526 1527 1528 1529 1530 1531 1532
		 * the parent will just close workload.cork_fd, without writing
		 * anything, i.e. read will return zero and we just exit()
		 * here.
		 */
		if (ret != 1) {
			if (ret == -1)
				perror("unable to read pipe");
			exit(ret);
		}
1533 1534 1535

		execvp(argv[0], (char **)argv);

1536
		if (exec_error) {
1537 1538 1539 1540 1541 1542 1543
			union sigval val;

			val.sival_int = errno;
			if (sigqueue(getppid(), SIGUSR1, val))
				perror(argv[0]);
		} else
			perror(argv[0]);
1544 1545 1546
		exit(-1);
	}

1547 1548 1549 1550 1551 1552 1553 1554
	if (exec_error) {
		struct sigaction act = {
			.sa_flags     = SA_SIGINFO,
			.sa_sigaction = exec_error,
		};
		sigaction(SIGUSR1, &act, NULL);
	}

1555 1556 1557 1558 1559 1560
	if (target__none(target)) {
		if (evlist->threads == NULL) {
			fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
				__func__, __LINE__);
			goto out_close_pipes;
		}
1561
		thread_map__set_pid(evlist->threads, 0, evlist->workload.pid);
1562
	}
1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573

	close(child_ready_pipe[1]);
	close(go_pipe[0]);
	/*
	 * wait for child to settle
	 */
	if (read(child_ready_pipe[0], &bf, 1) == -1) {
		perror("unable to read pipe");
		goto out_close_pipes;
	}

1574
	fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590
	evlist->workload.cork_fd = go_pipe[1];
	close(child_ready_pipe[0]);
	return 0;

out_close_pipes:
	close(go_pipe[0]);
	close(go_pipe[1]);
out_close_ready_pipe:
	close(child_ready_pipe[0]);
	close(child_ready_pipe[1]);
	return -1;
}

int perf_evlist__start_workload(struct perf_evlist *evlist)
{
	if (evlist->workload.cork_fd > 0) {
1591
		char bf = 0;
1592
		int ret;
1593 1594 1595
		/*
		 * Remove the cork, let it rip!
		 */
1596 1597 1598 1599 1600 1601
		ret = write(evlist->workload.cork_fd, &bf, 1);
		if (ret < 0)
			perror("enable to write to pipe");

		close(evlist->workload.cork_fd);
		return ret;
1602 1603 1604 1605
	}

	return 0;
}
1606

1607
int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
1608
			      struct perf_sample *sample)
1609
{
1610 1611 1612 1613
	struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);

	if (!evsel)
		return -EFAULT;
1614
	return perf_evsel__parse_sample(evsel, event, sample);
1615
}
1616 1617 1618 1619 1620 1621

size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
{
	struct perf_evsel *evsel;
	size_t printed = 0;

1622
	evlist__for_each(evlist, evsel) {
1623 1624 1625 1626
		printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
				   perf_evsel__name(evsel));
	}

1627
	return printed + fprintf(fp, "\n");
1628
}
1629

1630
int perf_evlist__strerror_open(struct perf_evlist *evlist,
1631 1632 1633
			       int err, char *buf, size_t size)
{
	int printed, value;
1634
	char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
1635 1636 1637 1638 1639 1640 1641 1642

	switch (err) {
	case EACCES:
	case EPERM:
		printed = scnprintf(buf, size,
				    "Error:\t%s.\n"
				    "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);

1643
		value = perf_event_paranoid();
1644 1645 1646 1647 1648 1649 1650 1651

		printed += scnprintf(buf + printed, size - printed, "\nHint:\t");

		if (value >= 2) {
			printed += scnprintf(buf + printed, size - printed,
					     "For your workloads it needs to be <= 1\nHint:\t");
		}
		printed += scnprintf(buf + printed, size - printed,
1652
				     "For system wide tracing it needs to be set to -1.\n");
1653 1654

		printed += scnprintf(buf + printed, size - printed,
1655 1656
				    "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
				    "Hint:\tThe current value is %d.", value);
1657
		break;
1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674
	case EINVAL: {
		struct perf_evsel *first = perf_evlist__first(evlist);
		int max_freq;

		if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
			goto out_default;

		if (first->attr.sample_freq < (u64)max_freq)
			goto out_default;

		printed = scnprintf(buf, size,
				    "Error:\t%s.\n"
				    "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
				    "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
				    emsg, max_freq, first->attr.sample_freq);
		break;
	}
1675
	default:
1676
out_default:
1677 1678 1679 1680 1681 1682
		scnprintf(buf, size, "%s", emsg);
		break;
	}

	return 0;
}
1683

1684 1685 1686
int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size)
{
	char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
1687
	int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0;
1688 1689 1690

	switch (err) {
	case EPERM:
1691
		sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
1692 1693
		printed += scnprintf(buf + printed, size - printed,
				     "Error:\t%s.\n"
1694
				     "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
1695
				     "Hint:\tTried using %zd kB.\n",
1696
				     emsg, pages_max_per_user, pages_attempted);
1697 1698 1699 1700 1701 1702 1703 1704 1705

		if (pages_attempted >= pages_max_per_user) {
			printed += scnprintf(buf + printed, size - printed,
					     "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
					     pages_max_per_user + pages_attempted);
		}

		printed += scnprintf(buf + printed, size - printed,
				     "Hint:\tTry using a smaller -m/--mmap-pages value.");
1706 1707 1708 1709 1710 1711 1712 1713 1714
		break;
	default:
		scnprintf(buf, size, "%s", emsg);
		break;
	}

	return 0;
}

1715 1716 1717 1718 1719 1720 1721 1722 1723
void perf_evlist__to_front(struct perf_evlist *evlist,
			   struct perf_evsel *move_evsel)
{
	struct perf_evsel *evsel, *n;
	LIST_HEAD(move);

	if (move_evsel == perf_evlist__first(evlist))
		return;

1724
	evlist__for_each_safe(evlist, n, evsel) {
1725 1726 1727 1728 1729 1730
		if (evsel->leader == move_evsel->leader)
			list_move_tail(&evsel->node, &move);
	}

	list_splice(&move, &evlist->entries);
}
1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746

void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
				     struct perf_evsel *tracking_evsel)
{
	struct perf_evsel *evsel;

	if (tracking_evsel->tracking)
		return;

	evlist__for_each(evlist, evsel) {
		if (evsel != tracking_evsel)
			evsel->tracking = false;
	}

	tracking_evsel->tracking = true;
}
1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762

struct perf_evsel *
perf_evlist__find_evsel_by_str(struct perf_evlist *evlist,
			       const char *str)
{
	struct perf_evsel *evsel;

	evlist__for_each(evlist, evsel) {
		if (!evsel->name)
			continue;
		if (strcmp(str, evsel->name) == 0)
			return evsel;
	}

	return NULL;
}