evlist.c 49.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6 7
/*
 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
 *
 * Parts came from builtin-{top,stat,record}.c, see those files for further
 * copyright notes.
 */
8
#include <api/fs/fs.h>
9
#include <errno.h>
10
#include <inttypes.h>
11
#include <poll.h>
12
#include "cpumap.h"
13
#include "util/mmap.h"
14
#include "thread_map.h"
15
#include "target.h"
16 17
#include "evlist.h"
#include "evsel.h"
A
Adrian Hunter 已提交
18
#include "debug.h"
19
#include "units.h"
20
#include "bpf_counter.h"
21
#include <internal/lib.h> // page_size
22
#include "affinity.h"
23
#include "../perf.h"
24
#include "asm/bug.h"
25
#include "bpf-event.h"
26
#include "util/string2.h"
27
#include "util/perf_api_probe.h"
28
#include "util/evsel_fprintf.h"
29
#include "util/evlist-hybrid.h"
30
#include "util/pmu.h"
31
#include <signal.h>
32
#include <unistd.h>
33
#include <sched.h>
34
#include <stdlib.h>
35

36
#include "parse-events.h"
37
#include <subcmd/parse-options.h>
38

39
#include <fcntl.h>
40
#include <sys/ioctl.h>
41
#include <sys/mman.h>
42
#include <sys/prctl.h>
43

44 45
#include <linux/bitops.h>
#include <linux/hash.h>
46
#include <linux/log2.h>
47
#include <linux/err.h>
48
#include <linux/string.h>
49
#include <linux/zalloc.h>
50
#include <perf/evlist.h>
51
#include <perf/evsel.h>
52
#include <perf/cpumap.h>
53
#include <perf/mmap.h>
54

55 56
#include <internal/xyarray.h>

57 58 59 60
#ifdef LACKS_SIGQUEUE_PROTOTYPE
int sigqueue(pid_t pid, int sig, const union sigval value);
#endif

61
#define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
62
#define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
63

64 65
void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
		  struct perf_thread_map *threads)
66
{
67
	perf_evlist__init(&evlist->core);
68
	perf_evlist__set_maps(&evlist->core, cpus, threads);
69
	evlist->workload.pid = -1;
70
	evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
71 72 73
	evlist->ctl_fd.fd = -1;
	evlist->ctl_fd.ack = -1;
	evlist->ctl_fd.pos = -1;
74 75
}

76
struct evlist *evlist__new(void)
77
{
78
	struct evlist *evlist = zalloc(sizeof(*evlist));
79

80
	if (evlist != NULL)
81
		evlist__init(evlist, NULL, NULL);
82 83 84 85

	return evlist;
}

86
struct evlist *evlist__new_default(void)
87
{
88
	struct evlist *evlist = evlist__new();
89

90
	if (evlist && evlist__add_default(evlist)) {
91
		evlist__delete(evlist);
92 93 94 95 96 97
		evlist = NULL;
	}

	return evlist;
}

98
struct evlist *evlist__new_dummy(void)
99
{
100
	struct evlist *evlist = evlist__new();
101

102
	if (evlist && evlist__add_dummy(evlist)) {
103
		evlist__delete(evlist);
104 105 106 107 108 109
		evlist = NULL;
	}

	return evlist;
}

110
/**
111
 * evlist__set_id_pos - set the positions of event ids.
112 113 114 115 116
 * @evlist: selected event list
 *
 * Events with compatible sample types all have the same id_pos
 * and is_pos.  For convenience, put a copy on evlist.
 */
117
void evlist__set_id_pos(struct evlist *evlist)
118
{
119
	struct evsel *first = evlist__first(evlist);
120 121 122 123 124

	evlist->id_pos = first->id_pos;
	evlist->is_pos = first->is_pos;
}

125
static void evlist__update_id_pos(struct evlist *evlist)
126
{
127
	struct evsel *evsel;
128

129
	evlist__for_each_entry(evlist, evsel)
130
		evsel__calc_id_pos(evsel);
131

132
	evlist__set_id_pos(evlist);
133 134
}

135
static void evlist__purge(struct evlist *evlist)
136
{
137
	struct evsel *pos, *n;
138

139
	evlist__for_each_entry_safe(evlist, n, pos) {
140
		list_del_init(&pos->core.node);
141
		pos->evlist = NULL;
142
		evsel__delete(pos);
143 144
	}

145
	evlist->core.nr_entries = 0;
146 147
}

148
void evlist__exit(struct evlist *evlist)
149
{
150
	zfree(&evlist->mmap);
151
	zfree(&evlist->overwrite_mmap);
J
Jiri Olsa 已提交
152
	perf_evlist__exit(&evlist->core);
153 154
}

155
void evlist__delete(struct evlist *evlist)
156
{
157 158 159
	if (evlist == NULL)
		return;

160
	evlist__munmap(evlist);
161
	evlist__close(evlist);
162
	evlist__purge(evlist);
163
	evlist__exit(evlist);
164 165 166
	free(evlist);
}

167
void evlist__add(struct evlist *evlist, struct evsel *entry)
168
{
169
	perf_evlist__add(&evlist->core, &entry->core);
170 171
	entry->evlist = evlist;
	entry->tracking = !entry->core.idx;
172 173

	if (evlist->core.nr_entries == 1)
174
		evlist__set_id_pos(evlist);
175 176
}

177
void evlist__remove(struct evlist *evlist, struct evsel *evsel)
178 179
{
	evsel->evlist = NULL;
180
	perf_evlist__remove(&evlist->core, &evsel->core);
181 182
}

183
void evlist__splice_list_tail(struct evlist *evlist, struct list_head *list)
184
{
185 186 187 188 189 190 191 192 193 194 195
	while (!list_empty(list)) {
		struct evsel *evsel, *temp, *leader = NULL;

		__evlist__for_each_entry_safe(list, temp, evsel) {
			list_del_init(&evsel->core.node);
			evlist__add(evlist, evsel);
			leader = evsel;
			break;
		}

		__evlist__for_each_entry_safe(list, temp, evsel) {
196
			if (evsel__has_leader(evsel, leader)) {
197 198 199 200
				list_del_init(&evsel->core.node);
				evlist__add(evlist, evsel);
			}
		}
201
	}
202 203
}

204 205 206 207 208 209 210 211
int __evlist__set_tracepoints_handlers(struct evlist *evlist,
				       const struct evsel_str_handler *assocs, size_t nr_assocs)
{
	size_t i;
	int err;

	for (i = 0; i < nr_assocs; i++) {
		// Adding a handler for an event not in this evlist, just ignore it.
212
		struct evsel *evsel = evlist__find_tracepoint_by_name(evlist, assocs[i].name);
213 214 215 216 217 218 219 220 221 222 223 224 225 226
		if (evsel == NULL)
			continue;

		err = -EEXIST;
		if (evsel->handler != NULL)
			goto out;
		evsel->handler = assocs[i].handler;
	}

	err = 0;
out:
	return err;
}

227
void evlist__set_leader(struct evlist *evlist)
228
{
229
	perf_evlist__set_leader(&evlist->core);
230 231
}

232
int __evlist__add_default(struct evlist *evlist, bool precise)
233
{
234
	struct evsel *evsel;
235

236 237
	evsel = evsel__new_cycles(precise, PERF_TYPE_HARDWARE,
				  PERF_COUNT_HW_CPU_CYCLES);
238
	if (evsel == NULL)
239
		return -ENOMEM;
240

241
	evlist__add(evlist, evsel);
242 243
	return 0;
}
244

245
int evlist__add_dummy(struct evlist *evlist)
246 247 248 249 250 251
{
	struct perf_event_attr attr = {
		.type	= PERF_TYPE_SOFTWARE,
		.config = PERF_COUNT_SW_DUMMY,
		.size	= sizeof(attr), /* to capture ABI version */
	};
252
	struct evsel *evsel = evsel__new_idx(&attr, evlist->core.nr_entries);
253 254 255 256

	if (evsel == NULL)
		return -ENOMEM;

257
	evlist__add(evlist, evsel);
258 259 260
	return 0;
}

261
static int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
262
{
263
	struct evsel *evsel, *n;
264 265 266 267
	LIST_HEAD(head);
	size_t i;

	for (i = 0; i < nr_attrs; i++) {
268
		evsel = evsel__new_idx(attrs + i, evlist->core.nr_entries + i);
269 270
		if (evsel == NULL)
			goto out_delete_partial_list;
271
		list_add_tail(&evsel->core.node, &head);
272 273
	}

274
	evlist__splice_list_tail(evlist, &head);
275 276 277 278

	return 0;

out_delete_partial_list:
279
	__evlist__for_each_entry_safe(&head, n, evsel)
280
		evsel__delete(evsel);
281 282 283
	return -1;
}

284
int __evlist__add_default_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
285 286 287 288 289 290
{
	size_t i;

	for (i = 0; i < nr_attrs; i++)
		event_attr_init(attrs + i);

291
	return evlist__add_attrs(evlist, attrs, nr_attrs);
292 293
}

294 295 296 297 298
__weak int arch_evlist__add_default_attrs(struct evlist *evlist __maybe_unused)
{
	return 0;
}

299
struct evsel *evlist__find_tracepoint_by_id(struct evlist *evlist, int id)
300
{
301
	struct evsel *evsel;
302

303
	evlist__for_each_entry(evlist, evsel) {
304 305
		if (evsel->core.attr.type   == PERF_TYPE_TRACEPOINT &&
		    (int)evsel->core.attr.config == id)
306 307 308 309 310 311
			return evsel;
	}

	return NULL;
}

312
struct evsel *evlist__find_tracepoint_by_name(struct evlist *evlist, const char *name)
313
{
314
	struct evsel *evsel;
315

316
	evlist__for_each_entry(evlist, evsel) {
317
		if ((evsel->core.attr.type == PERF_TYPE_TRACEPOINT) &&
318 319 320 321 322 323 324
		    (strcmp(evsel->name, name) == 0))
			return evsel;
	}

	return NULL;
}

325
int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name, void *handler)
326
{
327
	struct evsel *evsel = evsel__newtp(sys, name);
328

329
	if (IS_ERR(evsel))
330 331
		return -1;

332
	evsel->handler = handler;
333
	evlist__add(evlist, evsel);
334 335 336
	return 0;
}

337 338 339 340
struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity)
{
	struct evlist_cpu_iterator itr = {
		.container = evlist,
341
		.evsel = NULL,
342 343 344
		.cpu_map_idx = 0,
		.evlist_cpu_map_idx = 0,
		.evlist_cpu_map_nr = perf_cpu_map__nr(evlist->core.all_cpus),
345
		.cpu = (struct perf_cpu){ .cpu = -1},
346 347
		.affinity = affinity,
	};
348

349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
	if (evlist__empty(evlist)) {
		/* Ensure the empty list doesn't iterate. */
		itr.evlist_cpu_map_idx = itr.evlist_cpu_map_nr;
	} else {
		itr.evsel = evlist__first(evlist);
		if (itr.affinity) {
			itr.cpu = perf_cpu_map__cpu(evlist->core.all_cpus, 0);
			affinity__set(itr.affinity, itr.cpu.cpu);
			itr.cpu_map_idx = perf_cpu_map__idx(itr.evsel->core.cpus, itr.cpu);
			/*
			 * If this CPU isn't in the evsel's cpu map then advance
			 * through the list.
			 */
			if (itr.cpu_map_idx == -1)
				evlist_cpu_iterator__next(&itr);
		}
365 366
	}
	return itr;
367 368
}

369
void evlist_cpu_iterator__next(struct evlist_cpu_iterator *evlist_cpu_itr)
370
{
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
	while (evlist_cpu_itr->evsel != evlist__last(evlist_cpu_itr->container)) {
		evlist_cpu_itr->evsel = evsel__next(evlist_cpu_itr->evsel);
		evlist_cpu_itr->cpu_map_idx =
			perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus,
					  evlist_cpu_itr->cpu);
		if (evlist_cpu_itr->cpu_map_idx != -1)
			return;
	}
	evlist_cpu_itr->evlist_cpu_map_idx++;
	if (evlist_cpu_itr->evlist_cpu_map_idx < evlist_cpu_itr->evlist_cpu_map_nr) {
		evlist_cpu_itr->evsel = evlist__first(evlist_cpu_itr->container);
		evlist_cpu_itr->cpu =
			perf_cpu_map__cpu(evlist_cpu_itr->container->core.all_cpus,
					  evlist_cpu_itr->evlist_cpu_map_idx);
		if (evlist_cpu_itr->affinity)
386
			affinity__set(evlist_cpu_itr->affinity, evlist_cpu_itr->cpu.cpu);
387 388 389 390 391 392 393 394 395 396
		evlist_cpu_itr->cpu_map_idx =
			perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus,
					  evlist_cpu_itr->cpu);
		/*
		 * If this CPU isn't in the evsel's cpu map then advance through
		 * the list.
		 */
		if (evlist_cpu_itr->cpu_map_idx == -1)
			evlist_cpu_iterator__next(evlist_cpu_itr);
	}
397 398
}

399
bool evlist_cpu_iterator__end(const struct evlist_cpu_iterator *evlist_cpu_itr)
400
{
401
	return evlist_cpu_itr->evlist_cpu_map_idx >= evlist_cpu_itr->evlist_cpu_map_nr;
402 403
}

404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
static int evsel__strcmp(struct evsel *pos, char *evsel_name)
{
	if (!evsel_name)
		return 0;
	if (evsel__is_dummy_event(pos))
		return 1;
	return strcmp(pos->name, evsel_name);
}

static int evlist__is_enabled(struct evlist *evlist)
{
	struct evsel *pos;

	evlist__for_each_entry(evlist, pos) {
		if (!evsel__is_group_leader(pos) || !pos->core.fd)
			continue;
		/* If at least one event is enabled, evlist is enabled. */
		if (!pos->disabled)
			return true;
	}
	return false;
}

static void __evlist__disable(struct evlist *evlist, char *evsel_name)
428
{
429
	struct evsel *pos;
430
	struct evlist_cpu_iterator evlist_cpu_itr;
431
	struct affinity saved_affinity, *affinity = NULL;
432
	bool has_imm = false;
433

434
	// See explanation in evlist__close()
435
	if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
436 437 438 439
		if (affinity__setup(&saved_affinity) < 0)
			return;
		affinity = &saved_affinity;
	}
440

441
	/* Disable 'immediate' events last */
442
	for (int imm = 0; imm <= 1; imm++) {
443
		evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) {
444 445 446 447 448 449 450 451 452 453
			pos = evlist_cpu_itr.evsel;
			if (evsel__strcmp(pos, evsel_name))
				continue;
			if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd)
				continue;
			if (pos->immediate)
				has_imm = true;
			if (pos->immediate != imm)
				continue;
			evsel__disable_cpu(pos, evlist_cpu_itr.cpu_map_idx);
454
		}
455 456
		if (!has_imm)
			break;
457
	}
458

459
	affinity__cleanup(affinity);
460
	evlist__for_each_entry(evlist, pos) {
461 462
		if (evsel__strcmp(pos, evsel_name))
			continue;
463
		if (!evsel__is_group_leader(pos) || !pos->core.fd)
464
			continue;
465
		pos->disabled = true;
466
	}
467

468 469 470 471 472 473 474 475
	/*
	 * If we disabled only single event, we need to check
	 * the enabled state of the evlist manually.
	 */
	if (evsel_name)
		evlist->enabled = evlist__is_enabled(evlist);
	else
		evlist->enabled = false;
476 477
}

478 479 480 481 482 483 484 485 486 487 488
void evlist__disable(struct evlist *evlist)
{
	__evlist__disable(evlist, NULL);
}

void evlist__disable_evsel(struct evlist *evlist, char *evsel_name)
{
	__evlist__disable(evlist, evsel_name);
}

static void __evlist__enable(struct evlist *evlist, char *evsel_name)
489
{
490
	struct evsel *pos;
491
	struct evlist_cpu_iterator evlist_cpu_itr;
492
	struct affinity saved_affinity, *affinity = NULL;
493

494
	// See explanation in evlist__close()
495
	if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
496 497 498 499
		if (affinity__setup(&saved_affinity) < 0)
			return;
		affinity = &saved_affinity;
	}
500

501
	evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) {
502 503 504 505 506 507
		pos = evlist_cpu_itr.evsel;
		if (evsel__strcmp(pos, evsel_name))
			continue;
		if (!evsel__is_group_leader(pos) || !pos->core.fd)
			continue;
		evsel__enable_cpu(pos, evlist_cpu_itr.cpu_map_idx);
508
	}
509
	affinity__cleanup(affinity);
510
	evlist__for_each_entry(evlist, pos) {
511 512
		if (evsel__strcmp(pos, evsel_name))
			continue;
513
		if (!evsel__is_group_leader(pos) || !pos->core.fd)
514
			continue;
515
		pos->disabled = false;
516
	}
517

518 519 520 521 522
	/*
	 * Even single event sets the 'enabled' for evlist,
	 * so the toggle can work properly and toggle to
	 * 'disabled' state.
	 */
523 524 525
	evlist->enabled = true;
}

526 527 528 529 530 531 532 533 534 535
void evlist__enable(struct evlist *evlist)
{
	__evlist__enable(evlist, NULL);
}

void evlist__enable_evsel(struct evlist *evlist, char *evsel_name)
{
	__evlist__enable(evlist, evsel_name);
}

536
void evlist__toggle_enable(struct evlist *evlist)
537
{
538
	(evlist->enabled ? evlist__disable : evlist__enable)(evlist);
539 540
}

541
int evlist__add_pollfd(struct evlist *evlist, int fd)
542
{
543
	return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, fdarray_flag__default);
544 545
}

546
int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
547
{
548
	return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask);
549 550
}

551 552 553 554 555 556 557 558
#ifdef HAVE_EVENTFD_SUPPORT
int evlist__add_wakeup_eventfd(struct evlist *evlist, int fd)
{
	return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN,
				       fdarray_flag__nonfilterable);
}
#endif

559
int evlist__poll(struct evlist *evlist, int timeout)
560
{
561
	return perf_evlist__poll(&evlist->core, timeout);
562 563
}

564
struct perf_sample_id *evlist__id2sid(struct evlist *evlist, u64 id)
565 566 567 568 569 570
{
	struct hlist_head *head;
	struct perf_sample_id *sid;
	int hash;

	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
571
	head = &evlist->core.heads[hash];
572

573
	hlist_for_each_entry(sid, head, node)
574
		if (sid->id == id)
575 576 577 578 579
			return sid;

	return NULL;
}

580
struct evsel *evlist__id2evsel(struct evlist *evlist, u64 id)
581 582 583
{
	struct perf_sample_id *sid;

584
	if (evlist->core.nr_entries == 1 || !id)
585
		return evlist__first(evlist);
586

587
	sid = evlist__id2sid(evlist, id);
588
	if (sid)
589
		return container_of(sid->evsel, struct evsel, core);
590

591
	if (!evlist__sample_id_all(evlist))
592
		return evlist__first(evlist);
593

594 595
	return NULL;
}
596

597
struct evsel *evlist__id2evsel_strict(struct evlist *evlist, u64 id)
598 599 600 601 602 603
{
	struct perf_sample_id *sid;

	if (!id)
		return NULL;

604
	sid = evlist__id2sid(evlist, id);
605
	if (sid)
606
		return container_of(sid->evsel, struct evsel, core);
607 608 609 610

	return NULL;
}

611
static int evlist__event2id(struct evlist *evlist, union perf_event *event, u64 *id)
612
{
613
	const __u64 *array = event->sample.array;
614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
	ssize_t n;

	n = (event->header.size - sizeof(event->header)) >> 3;

	if (event->header.type == PERF_RECORD_SAMPLE) {
		if (evlist->id_pos >= n)
			return -1;
		*id = array[evlist->id_pos];
	} else {
		if (evlist->is_pos > n)
			return -1;
		n -= evlist->is_pos;
		*id = array[n];
	}
	return 0;
}

631
struct evsel *evlist__event2evsel(struct evlist *evlist, union perf_event *event)
632
{
633
	struct evsel *first = evlist__first(evlist);
634 635 636 637 638
	struct hlist_head *head;
	struct perf_sample_id *sid;
	int hash;
	u64 id;

639
	if (evlist->core.nr_entries == 1)
640 641
		return first;

642
	if (!first->core.attr.sample_id_all &&
643 644
	    event->header.type != PERF_RECORD_SAMPLE)
		return first;
645

646
	if (evlist__event2id(evlist, event, &id))
647 648 649 650
		return NULL;

	/* Synthesized events have an id of zero */
	if (!id)
651
		return first;
652 653

	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
654
	head = &evlist->core.heads[hash];
655 656 657

	hlist_for_each_entry(sid, head, node) {
		if (sid->id == id)
658
			return container_of(sid->evsel, struct evsel, core);
659 660 661 662
	}
	return NULL;
}

663
static int evlist__set_paused(struct evlist *evlist, bool value)
W
Wang Nan 已提交
664 665 666
{
	int i;

667
	if (!evlist->overwrite_mmap)
668 669
		return 0;

670
	for (i = 0; i < evlist->core.nr_mmaps; i++) {
671
		int fd = evlist->overwrite_mmap[i].core.fd;
W
Wang Nan 已提交
672 673 674 675 676 677 678 679 680 681 682
		int err;

		if (fd < 0)
			continue;
		err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
		if (err)
			return err;
	}
	return 0;
}

683
static int evlist__pause(struct evlist *evlist)
W
Wang Nan 已提交
684
{
685
	return evlist__set_paused(evlist, true);
W
Wang Nan 已提交
686 687
}

688
static int evlist__resume(struct evlist *evlist)
W
Wang Nan 已提交
689
{
690
	return evlist__set_paused(evlist, false);
W
Wang Nan 已提交
691 692
}

693
static void evlist__munmap_nofree(struct evlist *evlist)
694
{
695
	int i;
696

697
	if (evlist->mmap)
698
		for (i = 0; i < evlist->core.nr_mmaps; i++)
699
			perf_mmap__munmap(&evlist->mmap[i].core);
700

701
	if (evlist->overwrite_mmap)
702
		for (i = 0; i < evlist->core.nr_mmaps; i++)
703
			perf_mmap__munmap(&evlist->overwrite_mmap[i].core);
704
}
705

706
void evlist__munmap(struct evlist *evlist)
707
{
708
	evlist__munmap_nofree(evlist);
709
	zfree(&evlist->mmap);
710
	zfree(&evlist->overwrite_mmap);
711 712
}

713 714 715 716 717 718 719
static void perf_mmap__unmap_cb(struct perf_mmap *map)
{
	struct mmap *m = container_of(map, struct mmap, core);

	mmap__munmap(m);
}

720 721
static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
				       bool overwrite)
722
{
W
Wang Nan 已提交
723
	int i;
724
	struct mmap *map;
W
Wang Nan 已提交
725

726
	map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap));
727 728
	if (!map)
		return NULL;
729

730
	for (i = 0; i < evlist->core.nr_mmaps; i++) {
731 732
		struct perf_mmap *prev = i ? &map[i - 1].core : NULL;

733 734
		/*
		 * When the perf_mmap() call is made we grab one refcount, plus
735
		 * one extra to let perf_mmap__consume() get the last
736 737 738 739 740 741
		 * events after all real references (perf_mmap__get()) are
		 * dropped.
		 *
		 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
		 * thus does perf_mmap__get() on it.
		 */
742
		perf_mmap__init(&map[i].core, prev, overwrite, perf_mmap__unmap_cb);
743
	}
744

745
	return map;
746 747
}

748 749
static void
perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist,
750
			 struct perf_evsel *_evsel __maybe_unused,
751
			 struct perf_mmap_param *_mp,
752
			 int idx)
753 754 755
{
	struct evlist *evlist = container_of(_evlist, struct evlist, core);
	struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
756
	bool per_cpu = !perf_cpu_map__empty(_evlist->user_requested_cpus);
757 758 759 760

	auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, idx, per_cpu);
}

761 762 763 764
static struct perf_mmap*
perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx)
{
	struct evlist *evlist = container_of(_evlist, struct evlist, core);
765
	struct mmap *maps;
766

767
	maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
768

769 770 771 772
	if (!maps) {
		maps = evlist__alloc_mmap(evlist, overwrite);
		if (!maps)
			return NULL;
773

774
		if (overwrite) {
775 776
			evlist->overwrite_mmap = maps;
			if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
777
				evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
778 779
		} else {
			evlist->mmap = maps;
780 781 782 783 784 785
		}
	}

	return &maps[idx].core;
}

786 787
static int
perf_evlist__mmap_cb_mmap(struct perf_mmap *_map, struct perf_mmap_param *_mp,
788
			  int output, struct perf_cpu cpu)
789 790 791 792 793 794 795
{
	struct mmap *map = container_of(_map, struct mmap, core);
	struct mmap_params *mp = container_of(_mp, struct mmap_params, core);

	return mmap__mmap(map, mp, output, cpu);
}

796
unsigned long perf_event_mlock_kb_in_pages(void)
797
{
798 799
	unsigned long pages;
	int max;
800

801 802 803 804 805 806 807 808 809 810
	if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
		/*
		 * Pick a once upon a time good value, i.e. things look
		 * strange since we can't read a sysctl value, but lets not
		 * die yet...
		 */
		max = 512;
	} else {
		max -= (page_size / 1024);
	}
811

812 813 814 815 816 817 818
	pages = (max * 1024) / page_size;
	if (!is_power_of_2(pages))
		pages = rounddown_pow_of_two(pages);

	return pages;
}

819
size_t evlist__mmap_size(unsigned long pages)
820 821 822 823
{
	if (pages == UINT_MAX)
		pages = perf_event_mlock_kb_in_pages();
	else if (!is_power_of_2(pages))
824 825 826 827 828
		return 0;

	return (pages + 1) * page_size;
}

829 830
static long parse_pages_arg(const char *str, unsigned long min,
			    unsigned long max)
831
{
832
	unsigned long pages, val;
833 834 835 836 837 838 839
	static struct parse_tag tags[] = {
		{ .tag  = 'B', .mult = 1       },
		{ .tag  = 'K', .mult = 1 << 10 },
		{ .tag  = 'M', .mult = 1 << 20 },
		{ .tag  = 'G', .mult = 1 << 30 },
		{ .tag  = 0 },
	};
840

841
	if (str == NULL)
842
		return -EINVAL;
843

844
	val = parse_tag_value(str, tags);
845
	if (val != (unsigned long) -1) {
846 847 848 849 850 851
		/* we got file size value */
		pages = PERF_ALIGN(val, page_size) / page_size;
	} else {
		/* we got pages count value */
		char *eptr;
		pages = strtoul(str, &eptr, 10);
852 853
		if (*eptr != '\0')
			return -EINVAL;
854 855
	}

856
	if (pages == 0 && min == 0) {
857
		/* leave number of pages at 0 */
858
	} else if (!is_power_of_2(pages)) {
859 860
		char buf[100];

861
		/* round pages up to next power of 2 */
862
		pages = roundup_pow_of_two(pages);
863 864
		if (!pages)
			return -EINVAL;
865 866 867 868

		unit_number__scnprintf(buf, sizeof(buf), pages * page_size);
		pr_info("rounding mmap pages size to %s (%lu pages)\n",
			buf, pages);
869 870
	}

871 872 873 874 875 876
	if (pages > max)
		return -EINVAL;

	return pages;
}

877
int __evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
878 879 880 881
{
	unsigned long max = UINT_MAX;
	long pages;

A
Adrian Hunter 已提交
882
	if (max > SIZE_MAX / page_size)
883 884 885 886 887
		max = SIZE_MAX / page_size;

	pages = parse_pages_arg(str, 1, max);
	if (pages < 0) {
		pr_err("Invalid argument for --mmap_pages/-m\n");
888 889 890 891 892 893 894
		return -1;
	}

	*mmap_pages = pages;
	return 0;
}

895
int evlist__parse_mmap_pages(const struct option *opt, const char *str, int unset __maybe_unused)
896
{
897
	return __evlist__parse_mmap_pages(opt->value, str);
898 899
}

900
/**
901
 * evlist__mmap_ex - Create mmaps to receive events.
902 903 904
 * @evlist: list of events
 * @pages: map length in pages
 * @overwrite: overwrite older events?
905 906
 * @auxtrace_pages - auxtrace map length in pages
 * @auxtrace_overwrite - overwrite older auxtrace data?
907
 *
908
 * If @overwrite is %false the user needs to signal event consumption using
909
 * perf_mmap__write_tail().  Using evlist__mmap_read() does this
910
 * automatically.
911
 *
912 913 914
 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
 * consumption using auxtrace_mmap__write_tail().
 *
915
 * Return: %0 on success, negative error code otherwise.
916
 */
917
int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
918
			 unsigned int auxtrace_pages,
919 920
			 bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush,
			 int comp_level)
921
{
W
Wang Nan 已提交
922 923 924 925 926
	/*
	 * Delay setting mp.prot: set it before calling perf_mmap__mmap.
	 * Its value is decided by evsel's write_backward.
	 * So &mp should not be passed through const pointer.
	 */
J
Jiri Olsa 已提交
927 928 929 930 931 932
	struct mmap_params mp = {
		.nr_cblocks	= nr_cblocks,
		.affinity	= affinity,
		.flush		= flush,
		.comp_level	= comp_level
	};
933
	struct perf_evlist_mmap_ops ops = {
934 935 936
		.idx  = perf_evlist__mmap_cb_idx,
		.get  = perf_evlist__mmap_cb_get,
		.mmap = perf_evlist__mmap_cb_mmap,
937
	};
938

939 940
	evlist->core.mmap_len = evlist__mmap_size(pages);
	pr_debug("mmap size %zuB\n", evlist->core.mmap_len);
941

942
	auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len,
943 944
				   auxtrace_pages, auxtrace_overwrite);

945
	return perf_evlist__mmap_ops(&evlist->core, &ops, &mp.core);
946
}
947

948
int evlist__mmap(struct evlist *evlist, unsigned int pages)
949
{
950
	return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0);
951 952
}

953
int evlist__create_maps(struct evlist *evlist, struct target *target)
954
{
955
	bool all_threads = (target->per_thread && target->system_wide);
956
	struct perf_cpu_map *cpus;
957
	struct perf_thread_map *threads;
958

959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976
	/*
	 * If specify '-a' and '--per-thread' to perf record, perf record
	 * will override '--per-thread'. target->per_thread = false and
	 * target->system_wide = true.
	 *
	 * If specify '--per-thread' only to perf record,
	 * target->per_thread = true and target->system_wide = false.
	 *
	 * So target->per_thread && target->system_wide is false.
	 * For perf record, thread_map__new_str doesn't call
	 * thread_map__new_all_cpus. That will keep perf record's
	 * current behavior.
	 *
	 * For perf stat, it allows the case that target->per_thread and
	 * target->system_wide are all true. It means to collect system-wide
	 * per-thread data. thread_map__new_str will call
	 * thread_map__new_all_cpus to enumerate all threads.
	 */
977
	threads = thread_map__new_str(target->pid, target->tid, target->uid,
978
				      all_threads);
979

980
	if (!threads)
981 982
		return -1;

983
	if (target__uses_dummy_map(target))
984
		cpus = perf_cpu_map__dummy_new();
985
	else
986
		cpus = perf_cpu_map__new(target->cpu_list);
987

988
	if (!cpus)
989 990
		goto out_delete_threads;

991
	evlist->core.has_user_cpus = !!target->cpu_list && !target->hybrid;
992

993
	perf_evlist__set_maps(&evlist->core, cpus, threads);
994

995 996 997 998
	/* as evlist now has references, put count here */
	perf_cpu_map__put(cpus);
	perf_thread_map__put(threads);

999
	return 0;
1000 1001

out_delete_threads:
1002
	perf_thread_map__put(threads);
1003 1004 1005
	return -1;
}

1006
int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel)
1007
{
1008
	struct evsel *evsel;
1009
	int err = 0;
1010

1011
	evlist__for_each_entry(evlist, evsel) {
1012
		if (evsel->filter == NULL)
1013
			continue;
1014

1015 1016 1017 1018
		/*
		 * filters only work for tracepoint event, which doesn't have cpu limit.
		 * So evlist and evsel should always be same.
		 */
1019
		err = perf_evsel__apply_filter(&evsel->core, evsel->filter);
1020 1021
		if (err) {
			*err_evsel = evsel;
1022
			break;
1023
		}
1024 1025
	}

1026 1027 1028
	return err;
}

1029
int evlist__set_tp_filter(struct evlist *evlist, const char *filter)
1030
{
1031
	struct evsel *evsel;
1032 1033
	int err = 0;

1034 1035 1036
	if (filter == NULL)
		return -1;

1037
	evlist__for_each_entry(evlist, evsel) {
1038
		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1039 1040
			continue;

1041
		err = evsel__set_filter(evsel, filter);
1042 1043 1044 1045 1046
		if (err)
			break;
	}

	return err;
1047
}
1048

1049
int evlist__append_tp_filter(struct evlist *evlist, const char *filter)
1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
{
	struct evsel *evsel;
	int err = 0;

	if (filter == NULL)
		return -1;

	evlist__for_each_entry(evlist, evsel) {
		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
			continue;

1061
		err = evsel__append_tp_filter(evsel, filter);
1062 1063 1064 1065 1066 1067 1068
		if (err)
			break;
	}

	return err;
}

1069
char *asprintf__tp_filter_pids(size_t npids, pid_t *pids)
1070 1071
{
	char *filter;
1072
	size_t i;
1073

1074 1075 1076
	for (i = 0; i < npids; ++i) {
		if (i == 0) {
			if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
1077
				return NULL;
1078 1079 1080 1081 1082 1083 1084 1085 1086 1087
		} else {
			char *tmp;

			if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
				goto out_free;

			free(filter);
			filter = tmp;
		}
	}
1088

1089
	return filter;
1090
out_free:
1091 1092 1093 1094
	free(filter);
	return NULL;
}

1095
int evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
1096 1097
{
	char *filter = asprintf__tp_filter_pids(npids, pids);
1098
	int ret = evlist__set_tp_filter(evlist, filter);
1099

1100 1101 1102 1103
	free(filter);
	return ret;
}

1104
int evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid)
1105
{
1106
	return evlist__set_tp_filter_pids(evlist, 1, &pid);
1107 1108
}

1109
int evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
1110 1111
{
	char *filter = asprintf__tp_filter_pids(npids, pids);
1112
	int ret = evlist__append_tp_filter(evlist, filter);
1113 1114 1115 1116 1117

	free(filter);
	return ret;
}

1118
int evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid)
1119
{
1120
	return evlist__append_tp_filter_pids(evlist, 1, &pid);
1121 1122
}

1123
bool evlist__valid_sample_type(struct evlist *evlist)
1124
{
1125
	struct evsel *pos;
1126

1127
	if (evlist->core.nr_entries == 1)
1128 1129 1130 1131 1132
		return true;

	if (evlist->id_pos < 0 || evlist->is_pos < 0)
		return false;

1133
	evlist__for_each_entry(evlist, pos) {
1134 1135
		if (pos->id_pos != evlist->id_pos ||
		    pos->is_pos != evlist->is_pos)
1136
			return false;
1137 1138
	}

1139
	return true;
1140 1141
}

1142
u64 __evlist__combined_sample_type(struct evlist *evlist)
1143
{
1144
	struct evsel *evsel;
1145 1146 1147 1148

	if (evlist->combined_sample_type)
		return evlist->combined_sample_type;

1149
	evlist__for_each_entry(evlist, evsel)
1150
		evlist->combined_sample_type |= evsel->core.attr.sample_type;
1151 1152 1153 1154

	return evlist->combined_sample_type;
}

1155
u64 evlist__combined_sample_type(struct evlist *evlist)
1156 1157
{
	evlist->combined_sample_type = 0;
1158
	return __evlist__combined_sample_type(evlist);
1159 1160
}

1161
u64 evlist__combined_branch_type(struct evlist *evlist)
1162
{
1163
	struct evsel *evsel;
1164 1165
	u64 branch_type = 0;

1166
	evlist__for_each_entry(evlist, evsel)
1167
		branch_type |= evsel->core.attr.branch_sample_type;
1168 1169 1170
	return branch_type;
}

1171
bool evlist__valid_read_format(struct evlist *evlist)
1172
{
1173
	struct evsel *first = evlist__first(evlist), *pos = first;
1174 1175
	u64 read_format = first->core.attr.read_format;
	u64 sample_type = first->core.attr.sample_type;
1176

1177
	evlist__for_each_entry(evlist, pos) {
1178 1179 1180 1181
		if (read_format != pos->core.attr.read_format) {
			pr_debug("Read format differs %#" PRIx64 " vs %#" PRIx64 "\n",
				 read_format, (u64)pos->core.attr.read_format);
		}
1182 1183
	}

1184
	/* PERF_SAMPLE_READ implies PERF_FORMAT_ID. */
1185 1186 1187 1188 1189 1190 1191 1192
	if ((sample_type & PERF_SAMPLE_READ) &&
	    !(read_format & PERF_FORMAT_ID)) {
		return false;
	}

	return true;
}

1193
u16 evlist__id_hdr_size(struct evlist *evlist)
1194
{
1195
	struct evsel *first = evlist__first(evlist);
1196 1197 1198 1199
	struct perf_sample *data;
	u64 sample_type;
	u16 size = 0;

1200
	if (!first->core.attr.sample_id_all)
1201 1202
		goto out;

1203
	sample_type = first->core.attr.sample_type;
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218

	if (sample_type & PERF_SAMPLE_TID)
		size += sizeof(data->tid) * 2;

       if (sample_type & PERF_SAMPLE_TIME)
		size += sizeof(data->time);

	if (sample_type & PERF_SAMPLE_ID)
		size += sizeof(data->id);

	if (sample_type & PERF_SAMPLE_STREAM_ID)
		size += sizeof(data->stream_id);

	if (sample_type & PERF_SAMPLE_CPU)
		size += sizeof(data->cpu) * 2;
1219 1220 1221

	if (sample_type & PERF_SAMPLE_IDENTIFIER)
		size += sizeof(data->id);
1222 1223 1224 1225
out:
	return size;
}

1226
bool evlist__valid_sample_id_all(struct evlist *evlist)
1227
{
1228
	struct evsel *first = evlist__first(evlist), *pos = first;
1229

1230
	evlist__for_each_entry_continue(evlist, pos) {
1231
		if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all)
1232
			return false;
1233 1234
	}

1235 1236 1237
	return true;
}

1238
bool evlist__sample_id_all(struct evlist *evlist)
1239
{
1240
	struct evsel *first = evlist__first(evlist);
1241
	return first->core.attr.sample_id_all;
1242
}
1243

1244
void evlist__set_selected(struct evlist *evlist, struct evsel *evsel)
1245 1246 1247
{
	evlist->selected = evsel;
}
1248

1249
void evlist__close(struct evlist *evlist)
1250
{
1251
	struct evsel *evsel;
1252
	struct evlist_cpu_iterator evlist_cpu_itr;
1253
	struct affinity affinity;
1254

1255
	/*
1256
	 * With perf record core.user_requested_cpus is usually NULL.
1257 1258
	 * Use the old method to handle this for now.
	 */
1259 1260
	if (!evlist->core.user_requested_cpus ||
	    cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
1261 1262 1263 1264 1265 1266 1267 1268
		evlist__for_each_entry_reverse(evlist, evsel)
			evsel__close(evsel);
		return;
	}

	if (affinity__setup(&affinity) < 0)
		return;

1269 1270 1271
	evlist__for_each_cpu(evlist_cpu_itr, evlist, &affinity) {
		perf_evsel__close_cpu(&evlist_cpu_itr.evsel->core,
				      evlist_cpu_itr.cpu_map_idx);
1272
	}
1273

1274 1275 1276 1277 1278
	affinity__cleanup(&affinity);
	evlist__for_each_entry_reverse(evlist, evsel) {
		perf_evsel__free_fd(&evsel->core);
		perf_evsel__free_id(&evsel->core);
	}
1279
	perf_evlist__reset_id_hash(&evlist->core);
1280 1281
}

1282
static int evlist__create_syswide_maps(struct evlist *evlist)
1283
{
1284
	struct perf_cpu_map *cpus;
1285
	struct perf_thread_map *threads;
1286 1287 1288 1289 1290 1291 1292 1293 1294 1295

	/*
	 * Try reading /sys/devices/system/cpu/online to get
	 * an all cpus map.
	 *
	 * FIXME: -ENOMEM is the best we can do here, the cpu_map
	 * code needs an overhaul to properly forward the
	 * error, and we may not want to do that fallback to a
	 * default cpu identity map :-\
	 */
1296
	cpus = perf_cpu_map__new(NULL);
1297
	if (!cpus)
1298 1299
		goto out;

1300
	threads = perf_thread_map__new_dummy();
1301 1302
	if (!threads)
		goto out_put;
1303

1304
	perf_evlist__set_maps(&evlist->core, cpus, threads);
1305 1306

	perf_thread_map__put(threads);
1307
out_put:
1308
	perf_cpu_map__put(cpus);
1309
out:
1310
	return -ENOMEM;
1311 1312
}

1313
int evlist__open(struct evlist *evlist)
1314
{
1315
	struct evsel *evsel;
1316
	int err;
1317

1318 1319 1320 1321
	/*
	 * Default: one fd per CPU, all threads, aka systemwide
	 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
	 */
1322
	if (evlist->core.threads == NULL && evlist->core.user_requested_cpus == NULL) {
1323
		err = evlist__create_syswide_maps(evlist);
1324 1325 1326 1327
		if (err < 0)
			goto out_err;
	}

1328
	evlist__update_id_pos(evlist);
1329

1330
	evlist__for_each_entry(evlist, evsel) {
1331
		err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads);
1332 1333 1334 1335 1336 1337
		if (err < 0)
			goto out_err;
	}

	return 0;
out_err:
1338
	evlist__close(evlist);
1339
	errno = -err;
1340 1341
	return err;
}
1342

1343 1344
int evlist__prepare_workload(struct evlist *evlist, struct target *target, const char *argv[],
			     bool pipe_output, void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365
{
	int child_ready_pipe[2], go_pipe[2];
	char bf;

	if (pipe(child_ready_pipe) < 0) {
		perror("failed to create 'ready' pipe");
		return -1;
	}

	if (pipe(go_pipe) < 0) {
		perror("failed to create 'go' pipe");
		goto out_close_ready_pipe;
	}

	evlist->workload.pid = fork();
	if (evlist->workload.pid < 0) {
		perror("failed to fork");
		goto out_close_pipes;
	}

	if (!evlist->workload.pid) {
1366 1367
		int ret;

1368
		if (pipe_output)
1369 1370
			dup2(2, 1);

1371 1372
		signal(SIGTERM, SIG_DFL);

1373 1374 1375 1376
		close(child_ready_pipe[0]);
		close(go_pipe[1]);
		fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);

1377 1378 1379 1380 1381 1382 1383
		/*
		 * Change the name of this process not to confuse --exclude-perf users
		 * that sees 'perf' in the window up to the execvp() and thinks that
		 * perf samples are not being excluded.
		 */
		prctl(PR_SET_NAME, "perf-exec");

1384 1385 1386 1387 1388 1389 1390 1391
		/*
		 * Tell the parent we're ready to go
		 */
		close(child_ready_pipe[1]);

		/*
		 * Wait until the parent tells us to go.
		 */
1392 1393 1394 1395
		ret = read(go_pipe[0], &bf, 1);
		/*
		 * The parent will ask for the execvp() to be performed by
		 * writing exactly one byte, in workload.cork_fd, usually via
1396
		 * evlist__start_workload().
1397
		 *
1398
		 * For cancelling the workload without actually running it,
1399 1400 1401 1402 1403 1404 1405 1406 1407
		 * the parent will just close workload.cork_fd, without writing
		 * anything, i.e. read will return zero and we just exit()
		 * here.
		 */
		if (ret != 1) {
			if (ret == -1)
				perror("unable to read pipe");
			exit(ret);
		}
1408 1409 1410

		execvp(argv[0], (char **)argv);

1411
		if (exec_error) {
1412 1413 1414 1415 1416 1417 1418
			union sigval val;

			val.sival_int = errno;
			if (sigqueue(getppid(), SIGUSR1, val))
				perror(argv[0]);
		} else
			perror(argv[0]);
1419 1420 1421
		exit(-1);
	}

1422 1423 1424 1425 1426 1427 1428 1429
	if (exec_error) {
		struct sigaction act = {
			.sa_flags     = SA_SIGINFO,
			.sa_sigaction = exec_error,
		};
		sigaction(SIGUSR1, &act, NULL);
	}

1430
	if (target__none(target)) {
1431
		if (evlist->core.threads == NULL) {
1432 1433 1434 1435
			fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
				__func__, __LINE__);
			goto out_close_pipes;
		}
1436
		perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid);
1437
	}
1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448

	close(child_ready_pipe[1]);
	close(go_pipe[0]);
	/*
	 * wait for child to settle
	 */
	if (read(child_ready_pipe[0], &bf, 1) == -1) {
		perror("unable to read pipe");
		goto out_close_pipes;
	}

1449
	fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462
	evlist->workload.cork_fd = go_pipe[1];
	close(child_ready_pipe[0]);
	return 0;

out_close_pipes:
	close(go_pipe[0]);
	close(go_pipe[1]);
out_close_ready_pipe:
	close(child_ready_pipe[0]);
	close(child_ready_pipe[1]);
	return -1;
}

1463
int evlist__start_workload(struct evlist *evlist)
1464 1465
{
	if (evlist->workload.cork_fd > 0) {
1466
		char bf = 0;
1467
		int ret;
1468 1469 1470
		/*
		 * Remove the cork, let it rip!
		 */
1471 1472
		ret = write(evlist->workload.cork_fd, &bf, 1);
		if (ret < 0)
1473
			perror("unable to write to pipe");
1474 1475 1476

		close(evlist->workload.cork_fd);
		return ret;
1477 1478 1479 1480
	}

	return 0;
}
1481

1482
int evlist__parse_sample(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
1483
{
1484
	struct evsel *evsel = evlist__event2evsel(evlist, event);
1485 1486 1487

	if (!evsel)
		return -EFAULT;
1488
	return evsel__parse_sample(evsel, event, sample);
1489
}
1490

1491
int evlist__parse_sample_timestamp(struct evlist *evlist, union perf_event *event, u64 *timestamp)
1492
{
1493
	struct evsel *evsel = evlist__event2evsel(evlist, event);
1494 1495 1496

	if (!evsel)
		return -EFAULT;
1497
	return evsel__parse_sample_timestamp(evsel, event, timestamp);
1498 1499
}

1500
int evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size)
1501 1502
{
	int printed, value;
1503
	char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1504 1505 1506 1507 1508 1509 1510 1511

	switch (err) {
	case EACCES:
	case EPERM:
		printed = scnprintf(buf, size,
				    "Error:\t%s.\n"
				    "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);

1512
		value = perf_event_paranoid();
1513 1514 1515 1516 1517 1518 1519 1520

		printed += scnprintf(buf + printed, size - printed, "\nHint:\t");

		if (value >= 2) {
			printed += scnprintf(buf + printed, size - printed,
					     "For your workloads it needs to be <= 1\nHint:\t");
		}
		printed += scnprintf(buf + printed, size - printed,
1521
				     "For system wide tracing it needs to be set to -1.\n");
1522 1523

		printed += scnprintf(buf + printed, size - printed,
1524 1525
				    "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
				    "Hint:\tThe current value is %d.", value);
1526
		break;
1527
	case EINVAL: {
1528
		struct evsel *first = evlist__first(evlist);
1529 1530 1531 1532 1533
		int max_freq;

		if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
			goto out_default;

1534
		if (first->core.attr.sample_freq < (u64)max_freq)
1535 1536 1537 1538 1539 1540
			goto out_default;

		printed = scnprintf(buf, size,
				    "Error:\t%s.\n"
				    "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
				    "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1541
				    emsg, max_freq, first->core.attr.sample_freq);
1542 1543
		break;
	}
1544
	default:
1545
out_default:
1546 1547 1548 1549 1550 1551
		scnprintf(buf, size, "%s", emsg);
		break;
	}

	return 0;
}
1552

1553
int evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size)
1554
{
1555
	char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1556
	int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0;
1557 1558 1559

	switch (err) {
	case EPERM:
1560
		sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
1561 1562
		printed += scnprintf(buf + printed, size - printed,
				     "Error:\t%s.\n"
1563
				     "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
1564
				     "Hint:\tTried using %zd kB.\n",
1565
				     emsg, pages_max_per_user, pages_attempted);
1566 1567 1568 1569 1570 1571 1572 1573 1574

		if (pages_attempted >= pages_max_per_user) {
			printed += scnprintf(buf + printed, size - printed,
					     "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
					     pages_max_per_user + pages_attempted);
		}

		printed += scnprintf(buf + printed, size - printed,
				     "Hint:\tTry using a smaller -m/--mmap-pages value.");
1575 1576 1577 1578 1579 1580 1581 1582 1583
		break;
	default:
		scnprintf(buf, size, "%s", emsg);
		break;
	}

	return 0;
}

1584
void evlist__to_front(struct evlist *evlist, struct evsel *move_evsel)
1585
{
1586
	struct evsel *evsel, *n;
1587 1588
	LIST_HEAD(move);

1589
	if (move_evsel == evlist__first(evlist))
1590 1591
		return;

1592
	evlist__for_each_entry_safe(evlist, n, evsel) {
1593
		if (evsel__leader(evsel) == evsel__leader(move_evsel))
1594
			list_move_tail(&evsel->core.node, &move);
1595 1596
	}

1597
	list_splice(&move, &evlist->core.entries);
1598
}
1599

1600
struct evsel *evlist__get_tracking_event(struct evlist *evlist)
1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611
{
	struct evsel *evsel;

	evlist__for_each_entry(evlist, evsel) {
		if (evsel->tracking)
			return evsel;
	}

	return evlist__first(evlist);
}

1612
void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_evsel)
1613
{
1614
	struct evsel *evsel;
1615 1616 1617 1618

	if (tracking_evsel->tracking)
		return;

1619
	evlist__for_each_entry(evlist, evsel) {
1620 1621 1622 1623 1624 1625
		if (evsel != tracking_evsel)
			evsel->tracking = false;
	}

	tracking_evsel->tracking = true;
}
1626

1627
struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str)
1628
{
1629
	struct evsel *evsel;
1630

1631
	evlist__for_each_entry(evlist, evsel) {
1632 1633 1634 1635 1636 1637 1638 1639
		if (!evsel->name)
			continue;
		if (strcmp(str, evsel->name) == 0)
			return evsel;
	}

	return NULL;
}
1640

1641
void evlist__toggle_bkw_mmap(struct evlist *evlist, enum bkw_mmap_state state)
1642 1643 1644 1645 1646 1647 1648 1649
{
	enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
	enum action {
		NONE,
		PAUSE,
		RESUME,
	} action = NONE;

1650
	if (!evlist->overwrite_mmap)
1651 1652 1653 1654 1655
		return;

	switch (old_state) {
	case BKW_MMAP_NOTREADY: {
		if (state != BKW_MMAP_RUNNING)
1656
			goto state_err;
1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683
		break;
	}
	case BKW_MMAP_RUNNING: {
		if (state != BKW_MMAP_DATA_PENDING)
			goto state_err;
		action = PAUSE;
		break;
	}
	case BKW_MMAP_DATA_PENDING: {
		if (state != BKW_MMAP_EMPTY)
			goto state_err;
		break;
	}
	case BKW_MMAP_EMPTY: {
		if (state != BKW_MMAP_RUNNING)
			goto state_err;
		action = RESUME;
		break;
	}
	default:
		WARN_ONCE(1, "Shouldn't get there\n");
	}

	evlist->bkw_mmap_state = state;

	switch (action) {
	case PAUSE:
1684
		evlist__pause(evlist);
1685 1686
		break;
	case RESUME:
1687
		evlist__resume(evlist);
1688 1689 1690 1691 1692 1693 1694 1695 1696
		break;
	case NONE:
	default:
		break;
	}

state_err:
	return;
}
1697

1698
bool evlist__exclude_kernel(struct evlist *evlist)
1699
{
1700
	struct evsel *evsel;
1701 1702

	evlist__for_each_entry(evlist, evsel) {
1703
		if (!evsel->core.attr.exclude_kernel)
1704 1705 1706 1707 1708
			return false;
	}

	return true;
}
1709 1710 1711 1712 1713 1714

/*
 * Events in data file are not collect in groups, but we still want
 * the group display. Set the artificial group and set the leader's
 * forced_leader flag to notify the display code.
 */
1715
void evlist__force_leader(struct evlist *evlist)
1716
{
1717
	if (!evlist->core.nr_groups) {
1718
		struct evsel *leader = evlist__first(evlist);
1719

1720
		evlist__set_leader(evlist);
1721 1722 1723
		leader->forced_leader = true;
	}
}
1724

1725
struct evsel *evlist__reset_weak_group(struct evlist *evsel_list, struct evsel *evsel, bool close)
1726
{
1727
	struct evsel *c2, *leader;
1728 1729
	bool is_open = true;

1730 1731
	leader = evsel__leader(evsel);

1732
	pr_debug("Weak group for %s/%d failed\n",
1733
			leader->name, leader->core.nr_members);
1734 1735 1736 1737 1738 1739 1740 1741

	/*
	 * for_each_group_member doesn't work here because it doesn't
	 * include the first entry.
	 */
	evlist__for_each_entry(evsel_list, c2) {
		if (c2 == evsel)
			is_open = false;
1742
		if (evsel__has_leader(c2, leader)) {
1743
			if (is_open && close)
1744
				perf_evsel__close(&c2->core);
1745 1746 1747 1748 1749
			/*
			 * We want to close all members of the group and reopen
			 * them. Some events, like Intel topdown, require being
			 * in a group and so keep these in the group.
			 */
1750
			evsel__remove_from_group(c2, leader);
1751

1752 1753 1754 1755 1756
			/*
			 * Set this for all former members of the group
			 * to indicate they get reopened.
			 */
			c2->reset_group = true;
1757 1758
		}
	}
1759 1760 1761
	/* Reset the leader count if all entries were removed. */
	if (leader->core.nr_members == 1)
		leader->core.nr_members = 0;
1762 1763
	return leader;
}
1764

1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814
static int evlist__parse_control_fifo(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close)
{
	char *s, *p;
	int ret = 0, fd;

	if (strncmp(str, "fifo:", 5))
		return -EINVAL;

	str += 5;
	if (!*str || *str == ',')
		return -EINVAL;

	s = strdup(str);
	if (!s)
		return -ENOMEM;

	p = strchr(s, ',');
	if (p)
		*p = '\0';

	/*
	 * O_RDWR avoids POLLHUPs which is necessary to allow the other
	 * end of a FIFO to be repeatedly opened and closed.
	 */
	fd = open(s, O_RDWR | O_NONBLOCK | O_CLOEXEC);
	if (fd < 0) {
		pr_err("Failed to open '%s'\n", s);
		ret = -errno;
		goto out_free;
	}
	*ctl_fd = fd;
	*ctl_fd_close = true;

	if (p && *++p) {
		/* O_RDWR | O_NONBLOCK means the other end need not be open */
		fd = open(p, O_RDWR | O_NONBLOCK | O_CLOEXEC);
		if (fd < 0) {
			pr_err("Failed to open '%s'\n", p);
			ret = -errno;
			goto out_free;
		}
		*ctl_fd_ack = fd;
	}

out_free:
	free(s);
	return ret;
}

int evlist__parse_control(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close)
1815 1816 1817
{
	char *comma = NULL, *endptr = NULL;

1818 1819
	*ctl_fd_close = false;

1820
	if (strncmp(str, "fd:", 3))
1821
		return evlist__parse_control_fifo(str, ctl_fd, ctl_fd_ack, ctl_fd_close);
1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839

	*ctl_fd = strtoul(&str[3], &endptr, 0);
	if (endptr == &str[3])
		return -EINVAL;

	comma = strchr(str, ',');
	if (comma) {
		if (endptr != comma)
			return -EINVAL;

		*ctl_fd_ack = strtoul(comma + 1, &endptr, 0);
		if (endptr == comma + 1 || *endptr != '\0')
			return -EINVAL;
	}

	return 0;
}

1840 1841 1842 1843 1844 1845 1846 1847 1848 1849
void evlist__close_control(int ctl_fd, int ctl_fd_ack, bool *ctl_fd_close)
{
	if (*ctl_fd_close) {
		*ctl_fd_close = false;
		close(ctl_fd);
		if (ctl_fd_ack >= 0)
			close(ctl_fd_ack);
	}
}

1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900
int evlist__initialize_ctlfd(struct evlist *evlist, int fd, int ack)
{
	if (fd == -1) {
		pr_debug("Control descriptor is not initialized\n");
		return 0;
	}

	evlist->ctl_fd.pos = perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN,
						     fdarray_flag__nonfilterable);
	if (evlist->ctl_fd.pos < 0) {
		evlist->ctl_fd.pos = -1;
		pr_err("Failed to add ctl fd entry: %m\n");
		return -1;
	}

	evlist->ctl_fd.fd = fd;
	evlist->ctl_fd.ack = ack;

	return 0;
}

bool evlist__ctlfd_initialized(struct evlist *evlist)
{
	return evlist->ctl_fd.pos >= 0;
}

int evlist__finalize_ctlfd(struct evlist *evlist)
{
	struct pollfd *entries = evlist->core.pollfd.entries;

	if (!evlist__ctlfd_initialized(evlist))
		return 0;

	entries[evlist->ctl_fd.pos].fd = -1;
	entries[evlist->ctl_fd.pos].events = 0;
	entries[evlist->ctl_fd.pos].revents = 0;

	evlist->ctl_fd.pos = -1;
	evlist->ctl_fd.ack = -1;
	evlist->ctl_fd.fd = -1;

	return 0;
}

static int evlist__ctlfd_recv(struct evlist *evlist, enum evlist_ctl_cmd *cmd,
			      char *cmd_data, size_t data_size)
{
	int err;
	char c;
	size_t bytes_read = 0;

1901
	*cmd = EVLIST_CTL_CMD_UNSUPPORTED;
1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912
	memset(cmd_data, 0, data_size);
	data_size--;

	do {
		err = read(evlist->ctl_fd.fd, &c, 1);
		if (err > 0) {
			if (c == '\n' || c == '\0')
				break;
			cmd_data[bytes_read++] = c;
			if (bytes_read == data_size)
				break;
1913 1914 1915 1916 1917 1918 1919
			continue;
		} else if (err == -1) {
			if (errno == EINTR)
				continue;
			if (errno == EAGAIN || errno == EWOULDBLOCK)
				err = 0;
			else
1920 1921
				pr_err("Failed to read from ctlfd %d: %m\n", evlist->ctl_fd.fd);
		}
1922
		break;
1923 1924 1925 1926 1927
	} while (1);

	pr_debug("Message from ctl_fd: \"%s%s\"\n", cmd_data,
		 bytes_read == data_size ? "" : c == '\n' ? "\\n" : "\\0");

1928
	if (bytes_read > 0) {
1929 1930 1931 1932 1933 1934
		if (!strncmp(cmd_data, EVLIST_CTL_CMD_ENABLE_TAG,
			     (sizeof(EVLIST_CTL_CMD_ENABLE_TAG)-1))) {
			*cmd = EVLIST_CTL_CMD_ENABLE;
		} else if (!strncmp(cmd_data, EVLIST_CTL_CMD_DISABLE_TAG,
				    (sizeof(EVLIST_CTL_CMD_DISABLE_TAG)-1))) {
			*cmd = EVLIST_CTL_CMD_DISABLE;
1935 1936 1937 1938
		} else if (!strncmp(cmd_data, EVLIST_CTL_CMD_SNAPSHOT_TAG,
				    (sizeof(EVLIST_CTL_CMD_SNAPSHOT_TAG)-1))) {
			*cmd = EVLIST_CTL_CMD_SNAPSHOT;
			pr_debug("is snapshot\n");
1939 1940 1941
		} else if (!strncmp(cmd_data, EVLIST_CTL_CMD_EVLIST_TAG,
				    (sizeof(EVLIST_CTL_CMD_EVLIST_TAG)-1))) {
			*cmd = EVLIST_CTL_CMD_EVLIST;
1942 1943 1944
		} else if (!strncmp(cmd_data, EVLIST_CTL_CMD_STOP_TAG,
				    (sizeof(EVLIST_CTL_CMD_STOP_TAG)-1))) {
			*cmd = EVLIST_CTL_CMD_STOP;
1945 1946 1947
		} else if (!strncmp(cmd_data, EVLIST_CTL_CMD_PING_TAG,
				    (sizeof(EVLIST_CTL_CMD_PING_TAG)-1))) {
			*cmd = EVLIST_CTL_CMD_PING;
1948 1949 1950
		}
	}

1951
	return bytes_read ? (int)bytes_read : err;
1952 1953
}

1954
int evlist__ctlfd_ack(struct evlist *evlist)
1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968
{
	int err;

	if (evlist->ctl_fd.ack == -1)
		return 0;

	err = write(evlist->ctl_fd.ack, EVLIST_CTL_CMD_ACK_TAG,
		    sizeof(EVLIST_CTL_CMD_ACK_TAG));
	if (err == -1)
		pr_err("failed to write to ctl_ack_fd %d: %m\n", evlist->ctl_fd.ack);

	return err;
}

1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026
static int get_cmd_arg(char *cmd_data, size_t cmd_size, char **arg)
{
	char *data = cmd_data + cmd_size;

	/* no argument */
	if (!*data)
		return 0;

	/* there's argument */
	if (*data == ' ') {
		*arg = data + 1;
		return 1;
	}

	/* malformed */
	return -1;
}

static int evlist__ctlfd_enable(struct evlist *evlist, char *cmd_data, bool enable)
{
	struct evsel *evsel;
	char *name;
	int err;

	err = get_cmd_arg(cmd_data,
			  enable ? sizeof(EVLIST_CTL_CMD_ENABLE_TAG) - 1 :
				   sizeof(EVLIST_CTL_CMD_DISABLE_TAG) - 1,
			  &name);
	if (err < 0) {
		pr_info("failed: wrong command\n");
		return -1;
	}

	if (err) {
		evsel = evlist__find_evsel_by_str(evlist, name);
		if (evsel) {
			if (enable)
				evlist__enable_evsel(evlist, name);
			else
				evlist__disable_evsel(evlist, name);
			pr_info("Event %s %s\n", evsel->name,
				enable ? "enabled" : "disabled");
		} else {
			pr_info("failed: can't find '%s' event\n", name);
		}
	} else {
		if (enable) {
			evlist__enable(evlist);
			pr_info(EVLIST_ENABLED_MSG);
		} else {
			evlist__disable(evlist);
			pr_info(EVLIST_DISABLED_MSG);
		}
	}

	return 0;
}

2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060
static int evlist__ctlfd_list(struct evlist *evlist, char *cmd_data)
{
	struct perf_attr_details details = { .verbose = false, };
	struct evsel *evsel;
	char *arg;
	int err;

	err = get_cmd_arg(cmd_data,
			  sizeof(EVLIST_CTL_CMD_EVLIST_TAG) - 1,
			  &arg);
	if (err < 0) {
		pr_info("failed: wrong command\n");
		return -1;
	}

	if (err) {
		if (!strcmp(arg, "-v")) {
			details.verbose = true;
		} else if (!strcmp(arg, "-g")) {
			details.event_group = true;
		} else if (!strcmp(arg, "-F")) {
			details.freq = true;
		} else {
			pr_info("failed: wrong command\n");
			return -1;
		}
	}

	evlist__for_each_entry(evlist, evsel)
		evsel__fprintf(evsel, &details, stderr);

	return 0;
}

2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077
int evlist__ctlfd_process(struct evlist *evlist, enum evlist_ctl_cmd *cmd)
{
	int err = 0;
	char cmd_data[EVLIST_CTL_CMD_MAX_LEN];
	int ctlfd_pos = evlist->ctl_fd.pos;
	struct pollfd *entries = evlist->core.pollfd.entries;

	if (!evlist__ctlfd_initialized(evlist) || !entries[ctlfd_pos].revents)
		return 0;

	if (entries[ctlfd_pos].revents & POLLIN) {
		err = evlist__ctlfd_recv(evlist, cmd, cmd_data,
					 EVLIST_CTL_CMD_MAX_LEN);
		if (err > 0) {
			switch (*cmd) {
			case EVLIST_CTL_CMD_ENABLE:
			case EVLIST_CTL_CMD_DISABLE:
2078 2079
				err = evlist__ctlfd_enable(evlist, cmd_data,
							   *cmd == EVLIST_CTL_CMD_ENABLE);
2080
				break;
2081 2082 2083
			case EVLIST_CTL_CMD_EVLIST:
				err = evlist__ctlfd_list(evlist, cmd_data);
				break;
2084
			case EVLIST_CTL_CMD_SNAPSHOT:
2085
			case EVLIST_CTL_CMD_STOP:
2086
			case EVLIST_CTL_CMD_PING:
2087
				break;
2088 2089 2090 2091 2092 2093
			case EVLIST_CTL_CMD_ACK:
			case EVLIST_CTL_CMD_UNSUPPORTED:
			default:
				pr_debug("ctlfd: unsupported %d\n", *cmd);
				break;
			}
2094 2095
			if (!(*cmd == EVLIST_CTL_CMD_ACK || *cmd == EVLIST_CTL_CMD_UNSUPPORTED ||
			      *cmd == EVLIST_CTL_CMD_SNAPSHOT))
2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106
				evlist__ctlfd_ack(evlist);
		}
	}

	if (entries[ctlfd_pos].revents & (POLLHUP | POLLERR))
		evlist__finalize_ctlfd(evlist);
	else
		entries[ctlfd_pos].revents = 0;

	return err;
}
2107

2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123
int evlist__ctlfd_update(struct evlist *evlist, struct pollfd *update)
{
	int ctlfd_pos = evlist->ctl_fd.pos;
	struct pollfd *entries = evlist->core.pollfd.entries;

	if (!evlist__ctlfd_initialized(evlist))
		return 0;

	if (entries[ctlfd_pos].fd != update->fd ||
	    entries[ctlfd_pos].events != update->events)
		return -1;

	entries[ctlfd_pos].revents = update->revents;
	return 0;
}

2124 2125 2126 2127 2128
struct evsel *evlist__find_evsel(struct evlist *evlist, int idx)
{
	struct evsel *evsel;

	evlist__for_each_entry(evlist, evsel) {
2129
		if (evsel->core.idx == idx)
2130 2131 2132 2133
			return evsel;
	}
	return NULL;
}
2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152

int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf)
{
	struct evsel *evsel;
	int printed = 0;

	evlist__for_each_entry(evlist, evsel) {
		if (evsel__is_dummy_event(evsel))
			continue;
		if (size > (strlen(evsel__name(evsel)) + (printed ? 2 : 1))) {
			printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "," : "", evsel__name(evsel));
		} else {
			printed += scnprintf(bf + printed, size - printed, "%s...", printed ? "," : "");
			break;
		}
	}

	return printed;
}
2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165

void evlist__check_mem_load_aux(struct evlist *evlist)
{
	struct evsel *leader, *evsel, *pos;

	/*
	 * For some platforms, the 'mem-loads' event is required to use
	 * together with 'mem-loads-aux' within a group and 'mem-loads-aux'
	 * must be the group leader. Now we disable this group before reporting
	 * because 'mem-loads-aux' is just an auxiliary event. It doesn't carry
	 * any valid memory load information.
	 */
	evlist__for_each_entry(evlist, evsel) {
2166
		leader = evsel__leader(evsel);
2167 2168 2169 2170 2171
		if (leader == evsel)
			continue;

		if (leader->name && strstr(leader->name, "mem-loads-aux")) {
			for_each_group_evsel(pos, leader) {
2172
				evsel__set_leader(pos, pos);
2173 2174 2175 2176 2177
				pos->core.nr_members = 0;
			}
		}
	}
}