evlist.c 40.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6 7
/*
 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
 *
 * Parts came from builtin-{top,stat,record}.c, see those files for further
 * copyright notes.
 */
8
#include <api/fs/fs.h>
9
#include <errno.h>
10
#include <inttypes.h>
11
#include <poll.h>
12
#include "cpumap.h"
13
#include "util/mmap.h"
14
#include "thread_map.h"
15
#include "target.h"
16 17
#include "evlist.h"
#include "evsel.h"
A
Adrian Hunter 已提交
18
#include "debug.h"
19
#include "units.h"
20
#include <internal/lib.h> // page_size
21
#include "../perf.h"
22
#include "asm/bug.h"
23
#include "bpf-event.h"
24
#include <signal.h>
25
#include <unistd.h>
26
#include <sched.h>
27
#include <stdlib.h>
28

29
#include "parse-events.h"
30
#include <subcmd/parse-options.h>
31

32
#include <fcntl.h>
33
#include <sys/ioctl.h>
34 35
#include <sys/mman.h>

36 37
#include <linux/bitops.h>
#include <linux/hash.h>
38
#include <linux/log2.h>
39
#include <linux/err.h>
40
#include <linux/string.h>
41
#include <linux/zalloc.h>
42
#include <perf/evlist.h>
43
#include <perf/evsel.h>
44
#include <perf/cpumap.h>
45

46 47
#include <internal/xyarray.h>

48 49 50 51
#ifdef LACKS_SIGQUEUE_PROTOTYPE
int sigqueue(pid_t pid, int sig, const union sigval value);
#endif

52
#define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
53
#define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
54

55 56
void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
		  struct perf_thread_map *threads)
57
{
58
	perf_evlist__init(&evlist->core);
59
	perf_evlist__set_maps(&evlist->core, cpus, threads);
60
	fdarray__init(&evlist->core.pollfd, 64);
61
	evlist->workload.pid = -1;
62
	evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
63 64
}

65
struct evlist *evlist__new(void)
66
{
67
	struct evlist *evlist = zalloc(sizeof(*evlist));
68

69
	if (evlist != NULL)
70
		evlist__init(evlist, NULL, NULL);
71 72 73 74

	return evlist;
}

75
struct evlist *perf_evlist__new_default(void)
76
{
77
	struct evlist *evlist = evlist__new();
78 79

	if (evlist && perf_evlist__add_default(evlist)) {
80
		evlist__delete(evlist);
81 82 83 84 85 86
		evlist = NULL;
	}

	return evlist;
}

87
struct evlist *perf_evlist__new_dummy(void)
88
{
89
	struct evlist *evlist = evlist__new();
90 91

	if (evlist && perf_evlist__add_dummy(evlist)) {
92
		evlist__delete(evlist);
93 94 95 96 97 98
		evlist = NULL;
	}

	return evlist;
}

99 100 101 102 103 104 105
/**
 * perf_evlist__set_id_pos - set the positions of event ids.
 * @evlist: selected event list
 *
 * Events with compatible sample types all have the same id_pos
 * and is_pos.  For convenience, put a copy on evlist.
 */
106
void perf_evlist__set_id_pos(struct evlist *evlist)
107
{
108
	struct evsel *first = evlist__first(evlist);
109 110 111 112 113

	evlist->id_pos = first->id_pos;
	evlist->is_pos = first->is_pos;
}

114
static void perf_evlist__update_id_pos(struct evlist *evlist)
115
{
116
	struct evsel *evsel;
117

118
	evlist__for_each_entry(evlist, evsel)
119 120 121 122 123
		perf_evsel__calc_id_pos(evsel);

	perf_evlist__set_id_pos(evlist);
}

124
static void evlist__purge(struct evlist *evlist)
125
{
126
	struct evsel *pos, *n;
127

128
	evlist__for_each_entry_safe(evlist, n, pos) {
129
		list_del_init(&pos->core.node);
130
		pos->evlist = NULL;
131
		evsel__delete(pos);
132 133
	}

134
	evlist->core.nr_entries = 0;
135 136
}

137
void evlist__exit(struct evlist *evlist)
138
{
139
	zfree(&evlist->mmap);
140
	zfree(&evlist->overwrite_mmap);
141
	fdarray__exit(&evlist->core.pollfd);
142 143
}

144
void evlist__delete(struct evlist *evlist)
145
{
146 147 148
	if (evlist == NULL)
		return;

149
	evlist__munmap(evlist);
150
	evlist__close(evlist);
151
	perf_cpu_map__put(evlist->core.cpus);
152
	perf_thread_map__put(evlist->core.threads);
153
	evlist->core.cpus = NULL;
154
	evlist->core.threads = NULL;
155
	evlist__purge(evlist);
156
	evlist__exit(evlist);
157 158 159
	free(evlist);
}

160
void evlist__add(struct evlist *evlist, struct evsel *entry)
161
{
162
	entry->evlist = evlist;
163
	entry->idx = evlist->core.nr_entries;
164
	entry->tracking = !entry->idx;
165

166 167 168
	perf_evlist__add(&evlist->core, &entry->core);

	if (evlist->core.nr_entries == 1)
169
		perf_evlist__set_id_pos(evlist);
170 171
}

172
void evlist__remove(struct evlist *evlist, struct evsel *evsel)
173 174
{
	evsel->evlist = NULL;
175
	perf_evlist__remove(&evlist->core, &evsel->core);
176 177
}

178
void perf_evlist__splice_list_tail(struct evlist *evlist,
179
				   struct list_head *list)
180
{
181
	struct evsel *evsel, *temp;
182

183
	__evlist__for_each_entry_safe(list, temp, evsel) {
184
		list_del_init(&evsel->core.node);
185
		evlist__add(evlist, evsel);
186
	}
187 188
}

189 190
void __perf_evlist__set_leader(struct list_head *list)
{
191
	struct evsel *evsel, *leader;
192

193 194
	leader = list_entry(list->next, struct evsel, core.node);
	evsel = list_entry(list->prev, struct evsel, core.node);
195

196
	leader->core.nr_members = evsel->idx - leader->idx + 1;
197

198
	__evlist__for_each_entry(list, evsel) {
199
		evsel->leader = leader;
200 201 202
	}
}

203
void perf_evlist__set_leader(struct evlist *evlist)
204
{
205 206
	if (evlist->core.nr_entries) {
		evlist->nr_groups = evlist->core.nr_entries > 1 ? 1 : 0;
207
		__perf_evlist__set_leader(&evlist->core.entries);
208
	}
209 210
}

211
int __perf_evlist__add_default(struct evlist *evlist, bool precise)
212
{
213
	struct evsel *evsel = perf_evsel__new_cycles(precise);
214

215
	if (evsel == NULL)
216
		return -ENOMEM;
217

218
	evlist__add(evlist, evsel);
219 220
	return 0;
}
221

222
int perf_evlist__add_dummy(struct evlist *evlist)
223 224 225 226 227 228
{
	struct perf_event_attr attr = {
		.type	= PERF_TYPE_SOFTWARE,
		.config = PERF_COUNT_SW_DUMMY,
		.size	= sizeof(attr), /* to capture ABI version */
	};
229
	struct evsel *evsel = perf_evsel__new_idx(&attr, evlist->core.nr_entries);
230 231 232 233

	if (evsel == NULL)
		return -ENOMEM;

234
	evlist__add(evlist, evsel);
235 236 237
	return 0;
}

238
static int evlist__add_attrs(struct evlist *evlist,
239
				  struct perf_event_attr *attrs, size_t nr_attrs)
240
{
241
	struct evsel *evsel, *n;
242 243 244 245
	LIST_HEAD(head);
	size_t i;

	for (i = 0; i < nr_attrs; i++) {
246
		evsel = perf_evsel__new_idx(attrs + i, evlist->core.nr_entries + i);
247 248
		if (evsel == NULL)
			goto out_delete_partial_list;
249
		list_add_tail(&evsel->core.node, &head);
250 251
	}

252
	perf_evlist__splice_list_tail(evlist, &head);
253 254 255 256

	return 0;

out_delete_partial_list:
257
	__evlist__for_each_entry_safe(&head, n, evsel)
258
		evsel__delete(evsel);
259 260 261
	return -1;
}

262
int __perf_evlist__add_default_attrs(struct evlist *evlist,
263 264 265 266 267 268 269
				     struct perf_event_attr *attrs, size_t nr_attrs)
{
	size_t i;

	for (i = 0; i < nr_attrs; i++)
		event_attr_init(attrs + i);

270
	return evlist__add_attrs(evlist, attrs, nr_attrs);
271 272
}

273
struct evsel *
274
perf_evlist__find_tracepoint_by_id(struct evlist *evlist, int id)
275
{
276
	struct evsel *evsel;
277

278
	evlist__for_each_entry(evlist, evsel) {
279 280
		if (evsel->core.attr.type   == PERF_TYPE_TRACEPOINT &&
		    (int)evsel->core.attr.config == id)
281 282 283 284 285 286
			return evsel;
	}

	return NULL;
}

287
struct evsel *
288
perf_evlist__find_tracepoint_by_name(struct evlist *evlist,
289 290
				     const char *name)
{
291
	struct evsel *evsel;
292

293
	evlist__for_each_entry(evlist, evsel) {
294
		if ((evsel->core.attr.type == PERF_TYPE_TRACEPOINT) &&
295 296 297 298 299 300 301
		    (strcmp(evsel->name, name) == 0))
			return evsel;
	}

	return NULL;
}

302
int perf_evlist__add_newtp(struct evlist *evlist,
303 304
			   const char *sys, const char *name, void *handler)
{
305
	struct evsel *evsel = perf_evsel__newtp(sys, name);
306

307
	if (IS_ERR(evsel))
308 309
		return -1;

310
	evsel->handler = handler;
311
	evlist__add(evlist, evsel);
312 313 314
	return 0;
}

315
static int perf_evlist__nr_threads(struct evlist *evlist,
316
				   struct evsel *evsel)
317
{
318
	if (evsel->core.system_wide)
319 320
		return 1;
	else
321
		return perf_thread_map__nr(evlist->core.threads);
322 323
}

324
void evlist__disable(struct evlist *evlist)
325
{
326
	struct evsel *pos;
327

328
	evlist__for_each_entry(evlist, pos) {
329
		if (pos->disabled || !perf_evsel__is_group_leader(pos) || !pos->core.fd)
330
			continue;
331
		evsel__disable(pos);
332
	}
333 334

	evlist->enabled = false;
335 336
}

337
void evlist__enable(struct evlist *evlist)
338
{
339
	struct evsel *pos;
340

341
	evlist__for_each_entry(evlist, pos) {
342
		if (!perf_evsel__is_group_leader(pos) || !pos->core.fd)
343
			continue;
344
		evsel__enable(pos);
345
	}
346 347 348 349

	evlist->enabled = true;
}

350
void perf_evlist__toggle_enable(struct evlist *evlist)
351
{
352
	(evlist->enabled ? evlist__disable : evlist__enable)(evlist);
353 354
}

355
static int perf_evlist__enable_event_cpu(struct evlist *evlist,
356
					 struct evsel *evsel, int cpu)
357
{
358
	int thread;
359 360
	int nr_threads = perf_evlist__nr_threads(evlist, evsel);

361
	if (!evsel->core.fd)
362 363 364
		return -EINVAL;

	for (thread = 0; thread < nr_threads; thread++) {
365
		int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
366 367 368 369 370 371
		if (err)
			return err;
	}
	return 0;
}

372
static int perf_evlist__enable_event_thread(struct evlist *evlist,
373
					    struct evsel *evsel,
374 375
					    int thread)
{
376
	int cpu;
377
	int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
378

379
	if (!evsel->core.fd)
380 381 382
		return -EINVAL;

	for (cpu = 0; cpu < nr_cpus; cpu++) {
383
		int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
384 385 386 387 388 389
		if (err)
			return err;
	}
	return 0;
}

390
int perf_evlist__enable_event_idx(struct evlist *evlist,
391
				  struct evsel *evsel, int idx)
392
{
393
	bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.cpus);
394 395 396 397 398 399 400

	if (per_cpu_mmaps)
		return perf_evlist__enable_event_cpu(evlist, evsel, idx);
	else
		return perf_evlist__enable_event_thread(evlist, evsel, idx);
}

401
int evlist__add_pollfd(struct evlist *evlist, int fd)
402
{
403
	return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN);
404 405
}

406 407
static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
					 void *arg __maybe_unused)
408
{
409
	struct mmap *map = fda->priv[fd].ptr;
410

411 412
	if (map)
		perf_mmap__put(map);
413
}
414

415
int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
416
{
417
	return fdarray__filter(&evlist->core.pollfd, revents_and_mask,
418
			       perf_evlist__munmap_filtered, NULL);
419 420
}

421
int perf_evlist__poll(struct evlist *evlist, int timeout)
422
{
423
	return fdarray__poll(&evlist->core.pollfd, timeout);
424 425
}

426
static void perf_evlist__set_sid_idx(struct evlist *evlist,
427
				     struct evsel *evsel, int idx, int cpu,
A
Adrian Hunter 已提交
428 429 430 431
				     int thread)
{
	struct perf_sample_id *sid = SID(evsel, cpu, thread);
	sid->idx = idx;
432 433
	if (evlist->core.cpus && cpu >= 0)
		sid->cpu = evlist->core.cpus->map[cpu];
A
Adrian Hunter 已提交
434 435
	else
		sid->cpu = -1;
436
	if (!evsel->core.system_wide && evlist->core.threads && thread >= 0)
437
		sid->tid = perf_thread_map__pid(evlist->core.threads, thread);
A
Adrian Hunter 已提交
438 439 440 441
	else
		sid->tid = -1;
}

442
struct perf_sample_id *perf_evlist__id2sid(struct evlist *evlist, u64 id)
443 444 445 446 447 448
{
	struct hlist_head *head;
	struct perf_sample_id *sid;
	int hash;

	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
449
	head = &evlist->core.heads[hash];
450

451
	hlist_for_each_entry(sid, head, node)
452
		if (sid->id == id)
453 454 455 456 457
			return sid;

	return NULL;
}

458
struct evsel *perf_evlist__id2evsel(struct evlist *evlist, u64 id)
459 460 461
{
	struct perf_sample_id *sid;

462
	if (evlist->core.nr_entries == 1 || !id)
463
		return evlist__first(evlist);
464 465 466

	sid = perf_evlist__id2sid(evlist, id);
	if (sid)
467
		return container_of(sid->evsel, struct evsel, core);
468 469

	if (!perf_evlist__sample_id_all(evlist))
470
		return evlist__first(evlist);
471

472 473
	return NULL;
}
474

475
struct evsel *perf_evlist__id2evsel_strict(struct evlist *evlist,
476 477 478 479 480 481 482 483 484
						u64 id)
{
	struct perf_sample_id *sid;

	if (!id)
		return NULL;

	sid = perf_evlist__id2sid(evlist, id);
	if (sid)
485
		return container_of(sid->evsel, struct evsel, core);
486 487 488 489

	return NULL;
}

490
static int perf_evlist__event2id(struct evlist *evlist,
491 492
				 union perf_event *event, u64 *id)
{
493
	const __u64 *array = event->sample.array;
494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510
	ssize_t n;

	n = (event->header.size - sizeof(event->header)) >> 3;

	if (event->header.type == PERF_RECORD_SAMPLE) {
		if (evlist->id_pos >= n)
			return -1;
		*id = array[evlist->id_pos];
	} else {
		if (evlist->is_pos > n)
			return -1;
		n -= evlist->is_pos;
		*id = array[n];
	}
	return 0;
}

511
struct evsel *perf_evlist__event2evsel(struct evlist *evlist,
J
Jiri Olsa 已提交
512
					    union perf_event *event)
513
{
514
	struct evsel *first = evlist__first(evlist);
515 516 517 518 519
	struct hlist_head *head;
	struct perf_sample_id *sid;
	int hash;
	u64 id;

520
	if (evlist->core.nr_entries == 1)
521 522
		return first;

523
	if (!first->core.attr.sample_id_all &&
524 525
	    event->header.type != PERF_RECORD_SAMPLE)
		return first;
526 527 528 529 530 531

	if (perf_evlist__event2id(evlist, event, &id))
		return NULL;

	/* Synthesized events have an id of zero */
	if (!id)
532
		return first;
533 534

	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
535
	head = &evlist->core.heads[hash];
536 537 538

	hlist_for_each_entry(sid, head, node) {
		if (sid->id == id)
539
			return container_of(sid->evsel, struct evsel, core);
540 541 542 543
	}
	return NULL;
}

544
static int perf_evlist__set_paused(struct evlist *evlist, bool value)
W
Wang Nan 已提交
545 546 547
{
	int i;

548
	if (!evlist->overwrite_mmap)
549 550
		return 0;

551
	for (i = 0; i < evlist->core.nr_mmaps; i++) {
552
		int fd = evlist->overwrite_mmap[i].core.fd;
W
Wang Nan 已提交
553 554 555 556 557 558 559 560 561 562 563
		int err;

		if (fd < 0)
			continue;
		err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
		if (err)
			return err;
	}
	return 0;
}

564
static int perf_evlist__pause(struct evlist *evlist)
W
Wang Nan 已提交
565 566 567 568
{
	return perf_evlist__set_paused(evlist, true);
}

569
static int perf_evlist__resume(struct evlist *evlist)
W
Wang Nan 已提交
570 571 572 573
{
	return perf_evlist__set_paused(evlist, false);
}

574
static void evlist__munmap_nofree(struct evlist *evlist)
575
{
576
	int i;
577

578
	if (evlist->mmap)
579
		for (i = 0; i < evlist->core.nr_mmaps; i++)
580
			perf_mmap__munmap(&evlist->mmap[i]);
581

582
	if (evlist->overwrite_mmap)
583
		for (i = 0; i < evlist->core.nr_mmaps; i++)
584
			perf_mmap__munmap(&evlist->overwrite_mmap[i]);
585
}
586

587
void evlist__munmap(struct evlist *evlist)
588
{
589
	evlist__munmap_nofree(evlist);
590
	zfree(&evlist->mmap);
591
	zfree(&evlist->overwrite_mmap);
592 593
}

594 595
static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
				       bool overwrite)
596
{
W
Wang Nan 已提交
597
	int i;
598
	struct mmap *map;
W
Wang Nan 已提交
599

600
	evlist->core.nr_mmaps = perf_cpu_map__nr(evlist->core.cpus);
601
	if (perf_cpu_map__empty(evlist->core.cpus))
602 603
		evlist->core.nr_mmaps = perf_thread_map__nr(evlist->core.threads);
	map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap));
604 605
	if (!map)
		return NULL;
606

607
	for (i = 0; i < evlist->core.nr_mmaps; i++) {
608
		map[i].core.fd = -1;
609
		map[i].core.overwrite = overwrite;
610 611
		/*
		 * When the perf_mmap() call is made we grab one refcount, plus
612
		 * one extra to let perf_mmap__consume() get the last
613 614 615 616 617 618
		 * events after all real references (perf_mmap__get()) are
		 * dropped.
		 *
		 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
		 * thus does perf_mmap__get() on it.
		 */
619
		refcount_set(&map[i].core.refcnt, 0);
620
	}
621
	return map;
622 623
}

624
static bool
625
perf_evlist__should_poll(struct evlist *evlist __maybe_unused,
626
			 struct evsel *evsel)
627
{
628
	if (evsel->core.attr.write_backward)
629 630 631 632
		return false;
	return true;
}

633
static int evlist__mmap_per_evsel(struct evlist *evlist, int idx,
634
				       struct mmap_params *mp, int cpu_idx,
635
				       int thread, int *_output, int *_output_overwrite)
636
{
637
	struct evsel *evsel;
638
	int revent;
639
	int evlist_cpu = cpu_map__cpu(evlist->core.cpus, cpu_idx);
640

641
	evlist__for_each_entry(evlist, evsel) {
642
		struct mmap *maps = evlist->mmap;
643
		int *output = _output;
644
		int fd;
645
		int cpu;
646

W
Wang Nan 已提交
647
		mp->prot = PROT_READ | PROT_WRITE;
648
		if (evsel->core.attr.write_backward) {
649 650
			output = _output_overwrite;
			maps = evlist->overwrite_mmap;
651 652

			if (!maps) {
653
				maps = evlist__alloc_mmap(evlist, true);
654 655
				if (!maps)
					return -1;
656
				evlist->overwrite_mmap = maps;
657 658
				if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
					perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
659
			}
W
Wang Nan 已提交
660
			mp->prot &= ~PROT_WRITE;
661
		}
662

663
		if (evsel->core.system_wide && thread)
664 665
			continue;

666
		cpu = perf_cpu_map__idx(evsel->core.cpus, evlist_cpu);
667 668 669
		if (cpu == -1)
			continue;

670
		fd = FD(evsel, cpu, thread);
671 672 673

		if (*output == -1) {
			*output = fd;
674

675
			if (perf_mmap__mmap(&maps[idx], mp, *output, evlist_cpu) < 0)
676 677 678 679
				return -1;
		} else {
			if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
				return -1;
680

681
			perf_mmap__get(&maps[idx]);
682 683
		}

684 685
		revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0;

686 687 688 689 690 691 692
		/*
		 * The system_wide flag causes a selected event to be opened
		 * always without a pid.  Consequently it will never get a
		 * POLLHUP, but it is used for tracking in combination with
		 * other events, so it should not need to be polled anyway.
		 * Therefore don't add it for polling.
		 */
693
		if (!evsel->core.system_wide &&
694
		     perf_evlist__add_pollfd(&evlist->core, fd, &maps[idx], revent) < 0) {
695
			perf_mmap__put(&maps[idx]);
696
			return -1;
697
		}
698

699
		if (evsel->core.attr.read_format & PERF_FORMAT_ID) {
700
			if (perf_evlist__id_add_fd(&evlist->core, &evsel->core, cpu, thread,
A
Adrian Hunter 已提交
701 702 703 704 705
						   fd) < 0)
				return -1;
			perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
						 thread);
		}
706 707 708 709 710
	}

	return 0;
}

711
static int evlist__mmap_per_cpu(struct evlist *evlist,
712
				     struct mmap_params *mp)
713
{
714
	int cpu, thread;
715
	int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
716
	int nr_threads = perf_thread_map__nr(evlist->core.threads);
717

A
Adrian Hunter 已提交
718
	pr_debug2("perf event ring buffer mmapped per cpu\n");
719
	for (cpu = 0; cpu < nr_cpus; cpu++) {
720
		int output = -1;
721
		int output_overwrite = -1;
722

723 724 725
		auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
					      true);

726
		for (thread = 0; thread < nr_threads; thread++) {
727
			if (evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
728
							thread, &output, &output_overwrite))
729
				goto out_unmap;
730 731 732 733 734 735
		}
	}

	return 0;

out_unmap:
736
	evlist__munmap_nofree(evlist);
737 738 739
	return -1;
}

740
static int evlist__mmap_per_thread(struct evlist *evlist,
741
					struct mmap_params *mp)
742 743
{
	int thread;
744
	int nr_threads = perf_thread_map__nr(evlist->core.threads);
745

A
Adrian Hunter 已提交
746
	pr_debug2("perf event ring buffer mmapped per thread\n");
747
	for (thread = 0; thread < nr_threads; thread++) {
748
		int output = -1;
749
		int output_overwrite = -1;
750

751 752 753
		auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
					      false);

754
		if (evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
755
						&output, &output_overwrite))
756
			goto out_unmap;
757 758 759 760 761
	}

	return 0;

out_unmap:
762
	evlist__munmap_nofree(evlist);
763 764 765
	return -1;
}

766
unsigned long perf_event_mlock_kb_in_pages(void)
767
{
768 769
	unsigned long pages;
	int max;
770

771 772 773 774 775 776 777 778 779 780
	if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
		/*
		 * Pick a once upon a time good value, i.e. things look
		 * strange since we can't read a sysctl value, but lets not
		 * die yet...
		 */
		max = 512;
	} else {
		max -= (page_size / 1024);
	}
781

782 783 784 785 786 787 788
	pages = (max * 1024) / page_size;
	if (!is_power_of_2(pages))
		pages = rounddown_pow_of_two(pages);

	return pages;
}

789
size_t evlist__mmap_size(unsigned long pages)
790 791 792 793
{
	if (pages == UINT_MAX)
		pages = perf_event_mlock_kb_in_pages();
	else if (!is_power_of_2(pages))
794 795 796 797 798
		return 0;

	return (pages + 1) * page_size;
}

799 800
static long parse_pages_arg(const char *str, unsigned long min,
			    unsigned long max)
801
{
802
	unsigned long pages, val;
803 804 805 806 807 808 809
	static struct parse_tag tags[] = {
		{ .tag  = 'B', .mult = 1       },
		{ .tag  = 'K', .mult = 1 << 10 },
		{ .tag  = 'M', .mult = 1 << 20 },
		{ .tag  = 'G', .mult = 1 << 30 },
		{ .tag  = 0 },
	};
810

811
	if (str == NULL)
812
		return -EINVAL;
813

814
	val = parse_tag_value(str, tags);
815
	if (val != (unsigned long) -1) {
816 817 818 819 820 821
		/* we got file size value */
		pages = PERF_ALIGN(val, page_size) / page_size;
	} else {
		/* we got pages count value */
		char *eptr;
		pages = strtoul(str, &eptr, 10);
822 823
		if (*eptr != '\0')
			return -EINVAL;
824 825
	}

826
	if (pages == 0 && min == 0) {
827
		/* leave number of pages at 0 */
828
	} else if (!is_power_of_2(pages)) {
829 830
		char buf[100];

831
		/* round pages up to next power of 2 */
832
		pages = roundup_pow_of_two(pages);
833 834
		if (!pages)
			return -EINVAL;
835 836 837 838

		unit_number__scnprintf(buf, sizeof(buf), pages * page_size);
		pr_info("rounding mmap pages size to %s (%lu pages)\n",
			buf, pages);
839 840
	}

841 842 843 844 845 846
	if (pages > max)
		return -EINVAL;

	return pages;
}

847
int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
848 849 850 851
{
	unsigned long max = UINT_MAX;
	long pages;

A
Adrian Hunter 已提交
852
	if (max > SIZE_MAX / page_size)
853 854 855 856 857
		max = SIZE_MAX / page_size;

	pages = parse_pages_arg(str, 1, max);
	if (pages < 0) {
		pr_err("Invalid argument for --mmap_pages/-m\n");
858 859 860 861 862 863 864
		return -1;
	}

	*mmap_pages = pages;
	return 0;
}

865 866 867 868 869 870
int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
				  int unset __maybe_unused)
{
	return __perf_evlist__parse_mmap_pages(opt->value, str);
}

871
/**
872
 * evlist__mmap_ex - Create mmaps to receive events.
873 874 875
 * @evlist: list of events
 * @pages: map length in pages
 * @overwrite: overwrite older events?
876 877
 * @auxtrace_pages - auxtrace map length in pages
 * @auxtrace_overwrite - overwrite older auxtrace data?
878
 *
879
 * If @overwrite is %false the user needs to signal event consumption using
880
 * perf_mmap__write_tail().  Using evlist__mmap_read() does this
881
 * automatically.
882
 *
883 884 885
 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
 * consumption using auxtrace_mmap__write_tail().
 *
886
 * Return: %0 on success, negative error code otherwise.
887
 */
888
int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
889
			 unsigned int auxtrace_pages,
890 891
			 bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush,
			 int comp_level)
892
{
893
	struct evsel *evsel;
894
	const struct perf_cpu_map *cpus = evlist->core.cpus;
895
	const struct perf_thread_map *threads = evlist->core.threads;
W
Wang Nan 已提交
896 897 898 899 900
	/*
	 * Delay setting mp.prot: set it before calling perf_mmap__mmap.
	 * Its value is decided by evsel's write_backward.
	 * So &mp should not be passed through const pointer.
	 */
901 902
	struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = affinity, .flush = flush,
				  .comp_level = comp_level };
903

904
	if (!evlist->mmap)
905
		evlist->mmap = evlist__alloc_mmap(evlist, false);
906
	if (!evlist->mmap)
907 908
		return -ENOMEM;

909
	if (evlist->core.pollfd.entries == NULL && perf_evlist__alloc_pollfd(&evlist->core) < 0)
910 911
		return -ENOMEM;

912 913 914
	evlist->core.mmap_len = evlist__mmap_size(pages);
	pr_debug("mmap size %zuB\n", evlist->core.mmap_len);
	mp.mask = evlist->core.mmap_len - page_size - 1;
915

916
	auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len,
917 918
				   auxtrace_pages, auxtrace_overwrite);

919
	evlist__for_each_entry(evlist, evsel) {
920
		if ((evsel->core.attr.read_format & PERF_FORMAT_ID) &&
921
		    evsel->core.sample_id == NULL &&
922
		    perf_evsel__alloc_id(&evsel->core, perf_cpu_map__nr(cpus), threads->nr) < 0)
923 924 925
			return -ENOMEM;
	}

926
	if (perf_cpu_map__empty(cpus))
927
		return evlist__mmap_per_thread(evlist, &mp);
928

929
	return evlist__mmap_per_cpu(evlist, &mp);
930
}
931

932
int evlist__mmap(struct evlist *evlist, unsigned int pages)
933
{
934
	return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0);
935 936
}

937
int perf_evlist__create_maps(struct evlist *evlist, struct target *target)
938
{
939
	bool all_threads = (target->per_thread && target->system_wide);
940
	struct perf_cpu_map *cpus;
941
	struct perf_thread_map *threads;
942

943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960
	/*
	 * If specify '-a' and '--per-thread' to perf record, perf record
	 * will override '--per-thread'. target->per_thread = false and
	 * target->system_wide = true.
	 *
	 * If specify '--per-thread' only to perf record,
	 * target->per_thread = true and target->system_wide = false.
	 *
	 * So target->per_thread && target->system_wide is false.
	 * For perf record, thread_map__new_str doesn't call
	 * thread_map__new_all_cpus. That will keep perf record's
	 * current behavior.
	 *
	 * For perf stat, it allows the case that target->per_thread and
	 * target->system_wide are all true. It means to collect system-wide
	 * per-thread data. thread_map__new_str will call
	 * thread_map__new_all_cpus to enumerate all threads.
	 */
961
	threads = thread_map__new_str(target->pid, target->tid, target->uid,
962
				      all_threads);
963

964
	if (!threads)
965 966
		return -1;

967
	if (target__uses_dummy_map(target))
968
		cpus = perf_cpu_map__dummy_new();
969
	else
970
		cpus = perf_cpu_map__new(target->cpu_list);
971

972
	if (!cpus)
973 974
		goto out_delete_threads;

975
	evlist->core.has_user_cpus = !!target->cpu_list;
976

977
	perf_evlist__set_maps(&evlist->core, cpus, threads);
978 979

	return 0;
980 981

out_delete_threads:
982
	perf_thread_map__put(threads);
983 984 985
	return -1;
}

986
void __perf_evlist__set_sample_bit(struct evlist *evlist,
987 988
				   enum perf_event_sample_format bit)
{
989
	struct evsel *evsel;
990

991
	evlist__for_each_entry(evlist, evsel)
992 993 994
		__perf_evsel__set_sample_bit(evsel, bit);
}

995
void __perf_evlist__reset_sample_bit(struct evlist *evlist,
996 997
				     enum perf_event_sample_format bit)
{
998
	struct evsel *evsel;
999

1000
	evlist__for_each_entry(evlist, evsel)
1001 1002 1003
		__perf_evsel__reset_sample_bit(evsel, bit);
}

1004
int perf_evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel)
1005
{
1006
	struct evsel *evsel;
1007
	int err = 0;
1008

1009
	evlist__for_each_entry(evlist, evsel) {
1010
		if (evsel->filter == NULL)
1011
			continue;
1012

1013 1014 1015 1016
		/*
		 * filters only work for tracepoint event, which doesn't have cpu limit.
		 * So evlist and evsel should always be same.
		 */
1017
		err = perf_evsel__apply_filter(&evsel->core, evsel->filter);
1018 1019
		if (err) {
			*err_evsel = evsel;
1020
			break;
1021
		}
1022 1023
	}

1024 1025 1026
	return err;
}

1027
int perf_evlist__set_tp_filter(struct evlist *evlist, const char *filter)
1028
{
1029
	struct evsel *evsel;
1030 1031
	int err = 0;

1032
	evlist__for_each_entry(evlist, evsel) {
1033
		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1034 1035
			continue;

1036
		err = perf_evsel__set_filter(evsel, filter);
1037 1038 1039 1040 1041
		if (err)
			break;
	}

	return err;
1042
}
1043

1044
int perf_evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
1045 1046
{
	char *filter;
1047 1048
	int ret = -1;
	size_t i;
1049

1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
	for (i = 0; i < npids; ++i) {
		if (i == 0) {
			if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
				return -1;
		} else {
			char *tmp;

			if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
				goto out_free;

			free(filter);
			filter = tmp;
		}
	}
1064

1065
	ret = perf_evlist__set_tp_filter(evlist, filter);
1066
out_free:
1067 1068 1069 1070
	free(filter);
	return ret;
}

1071
int perf_evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid)
1072
{
1073
	return perf_evlist__set_tp_filter_pids(evlist, 1, &pid);
1074 1075
}

1076
bool perf_evlist__valid_sample_type(struct evlist *evlist)
1077
{
1078
	struct evsel *pos;
1079

1080
	if (evlist->core.nr_entries == 1)
1081 1082 1083 1084 1085
		return true;

	if (evlist->id_pos < 0 || evlist->is_pos < 0)
		return false;

1086
	evlist__for_each_entry(evlist, pos) {
1087 1088
		if (pos->id_pos != evlist->id_pos ||
		    pos->is_pos != evlist->is_pos)
1089
			return false;
1090 1091
	}

1092
	return true;
1093 1094
}

1095
u64 __perf_evlist__combined_sample_type(struct evlist *evlist)
1096
{
1097
	struct evsel *evsel;
1098 1099 1100 1101

	if (evlist->combined_sample_type)
		return evlist->combined_sample_type;

1102
	evlist__for_each_entry(evlist, evsel)
1103
		evlist->combined_sample_type |= evsel->core.attr.sample_type;
1104 1105 1106 1107

	return evlist->combined_sample_type;
}

1108
u64 perf_evlist__combined_sample_type(struct evlist *evlist)
1109 1110 1111
{
	evlist->combined_sample_type = 0;
	return __perf_evlist__combined_sample_type(evlist);
1112 1113
}

1114
u64 perf_evlist__combined_branch_type(struct evlist *evlist)
1115
{
1116
	struct evsel *evsel;
1117 1118
	u64 branch_type = 0;

1119
	evlist__for_each_entry(evlist, evsel)
1120
		branch_type |= evsel->core.attr.branch_sample_type;
1121 1122 1123
	return branch_type;
}

1124
bool perf_evlist__valid_read_format(struct evlist *evlist)
1125
{
1126
	struct evsel *first = evlist__first(evlist), *pos = first;
1127 1128
	u64 read_format = first->core.attr.read_format;
	u64 sample_type = first->core.attr.sample_type;
1129

1130
	evlist__for_each_entry(evlist, pos) {
1131
		if (read_format != pos->core.attr.read_format)
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143
			return false;
	}

	/* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
	if ((sample_type & PERF_SAMPLE_READ) &&
	    !(read_format & PERF_FORMAT_ID)) {
		return false;
	}

	return true;
}

1144
u16 perf_evlist__id_hdr_size(struct evlist *evlist)
1145
{
1146
	struct evsel *first = evlist__first(evlist);
1147 1148 1149 1150
	struct perf_sample *data;
	u64 sample_type;
	u16 size = 0;

1151
	if (!first->core.attr.sample_id_all)
1152 1153
		goto out;

1154
	sample_type = first->core.attr.sample_type;
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169

	if (sample_type & PERF_SAMPLE_TID)
		size += sizeof(data->tid) * 2;

       if (sample_type & PERF_SAMPLE_TIME)
		size += sizeof(data->time);

	if (sample_type & PERF_SAMPLE_ID)
		size += sizeof(data->id);

	if (sample_type & PERF_SAMPLE_STREAM_ID)
		size += sizeof(data->stream_id);

	if (sample_type & PERF_SAMPLE_CPU)
		size += sizeof(data->cpu) * 2;
1170 1171 1172

	if (sample_type & PERF_SAMPLE_IDENTIFIER)
		size += sizeof(data->id);
1173 1174 1175 1176
out:
	return size;
}

1177
bool perf_evlist__valid_sample_id_all(struct evlist *evlist)
1178
{
1179
	struct evsel *first = evlist__first(evlist), *pos = first;
1180

1181
	evlist__for_each_entry_continue(evlist, pos) {
1182
		if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all)
1183
			return false;
1184 1185
	}

1186 1187 1188
	return true;
}

1189
bool perf_evlist__sample_id_all(struct evlist *evlist)
1190
{
1191
	struct evsel *first = evlist__first(evlist);
1192
	return first->core.attr.sample_id_all;
1193
}
1194

1195
void perf_evlist__set_selected(struct evlist *evlist,
1196
			       struct evsel *evsel)
1197 1198 1199
{
	evlist->selected = evsel;
}
1200

1201
void evlist__close(struct evlist *evlist)
1202
{
1203
	struct evsel *evsel;
1204

1205
	evlist__for_each_entry_reverse(evlist, evsel)
1206
		evsel__close(evsel);
1207 1208
}

1209
static int perf_evlist__create_syswide_maps(struct evlist *evlist)
1210
{
1211
	struct perf_cpu_map *cpus;
1212
	struct perf_thread_map *threads;
1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223
	int err = -ENOMEM;

	/*
	 * Try reading /sys/devices/system/cpu/online to get
	 * an all cpus map.
	 *
	 * FIXME: -ENOMEM is the best we can do here, the cpu_map
	 * code needs an overhaul to properly forward the
	 * error, and we may not want to do that fallback to a
	 * default cpu identity map :-\
	 */
1224
	cpus = perf_cpu_map__new(NULL);
1225
	if (!cpus)
1226 1227
		goto out;

1228
	threads = perf_thread_map__new_dummy();
1229 1230
	if (!threads)
		goto out_put;
1231

1232
	perf_evlist__set_maps(&evlist->core, cpus, threads);
1233 1234
out:
	return err;
1235
out_put:
1236
	perf_cpu_map__put(cpus);
1237 1238 1239
	goto out;
}

1240
int evlist__open(struct evlist *evlist)
1241
{
1242
	struct evsel *evsel;
1243
	int err;
1244

1245 1246 1247 1248
	/*
	 * Default: one fd per CPU, all threads, aka systemwide
	 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
	 */
1249
	if (evlist->core.threads == NULL && evlist->core.cpus == NULL) {
1250 1251 1252 1253 1254
		err = perf_evlist__create_syswide_maps(evlist);
		if (err < 0)
			goto out_err;
	}

1255 1256
	perf_evlist__update_id_pos(evlist);

1257
	evlist__for_each_entry(evlist, evsel) {
1258
		err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads);
1259 1260 1261 1262 1263 1264
		if (err < 0)
			goto out_err;
	}

	return 0;
out_err:
1265
	evlist__close(evlist);
1266
	errno = -err;
1267 1268
	return err;
}
1269

1270
int perf_evlist__prepare_workload(struct evlist *evlist, struct target *target,
1271
				  const char *argv[], bool pipe_output,
1272
				  void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293
{
	int child_ready_pipe[2], go_pipe[2];
	char bf;

	if (pipe(child_ready_pipe) < 0) {
		perror("failed to create 'ready' pipe");
		return -1;
	}

	if (pipe(go_pipe) < 0) {
		perror("failed to create 'go' pipe");
		goto out_close_ready_pipe;
	}

	evlist->workload.pid = fork();
	if (evlist->workload.pid < 0) {
		perror("failed to fork");
		goto out_close_pipes;
	}

	if (!evlist->workload.pid) {
1294 1295
		int ret;

1296
		if (pipe_output)
1297 1298
			dup2(2, 1);

1299 1300
		signal(SIGTERM, SIG_DFL);

1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312
		close(child_ready_pipe[0]);
		close(go_pipe[1]);
		fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);

		/*
		 * Tell the parent we're ready to go
		 */
		close(child_ready_pipe[1]);

		/*
		 * Wait until the parent tells us to go.
		 */
1313 1314 1315 1316 1317 1318
		ret = read(go_pipe[0], &bf, 1);
		/*
		 * The parent will ask for the execvp() to be performed by
		 * writing exactly one byte, in workload.cork_fd, usually via
		 * perf_evlist__start_workload().
		 *
1319
		 * For cancelling the workload without actually running it,
1320 1321 1322 1323 1324 1325 1326 1327 1328
		 * the parent will just close workload.cork_fd, without writing
		 * anything, i.e. read will return zero and we just exit()
		 * here.
		 */
		if (ret != 1) {
			if (ret == -1)
				perror("unable to read pipe");
			exit(ret);
		}
1329 1330 1331

		execvp(argv[0], (char **)argv);

1332
		if (exec_error) {
1333 1334 1335 1336 1337 1338 1339
			union sigval val;

			val.sival_int = errno;
			if (sigqueue(getppid(), SIGUSR1, val))
				perror(argv[0]);
		} else
			perror(argv[0]);
1340 1341 1342
		exit(-1);
	}

1343 1344 1345 1346 1347 1348 1349 1350
	if (exec_error) {
		struct sigaction act = {
			.sa_flags     = SA_SIGINFO,
			.sa_sigaction = exec_error,
		};
		sigaction(SIGUSR1, &act, NULL);
	}

1351
	if (target__none(target)) {
1352
		if (evlist->core.threads == NULL) {
1353 1354 1355 1356
			fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
				__func__, __LINE__);
			goto out_close_pipes;
		}
1357
		perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid);
1358
	}
1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369

	close(child_ready_pipe[1]);
	close(go_pipe[0]);
	/*
	 * wait for child to settle
	 */
	if (read(child_ready_pipe[0], &bf, 1) == -1) {
		perror("unable to read pipe");
		goto out_close_pipes;
	}

1370
	fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383
	evlist->workload.cork_fd = go_pipe[1];
	close(child_ready_pipe[0]);
	return 0;

out_close_pipes:
	close(go_pipe[0]);
	close(go_pipe[1]);
out_close_ready_pipe:
	close(child_ready_pipe[0]);
	close(child_ready_pipe[1]);
	return -1;
}

1384
int perf_evlist__start_workload(struct evlist *evlist)
1385 1386
{
	if (evlist->workload.cork_fd > 0) {
1387
		char bf = 0;
1388
		int ret;
1389 1390 1391
		/*
		 * Remove the cork, let it rip!
		 */
1392 1393
		ret = write(evlist->workload.cork_fd, &bf, 1);
		if (ret < 0)
1394
			perror("unable to write to pipe");
1395 1396 1397

		close(evlist->workload.cork_fd);
		return ret;
1398 1399 1400 1401
	}

	return 0;
}
1402

1403
int perf_evlist__parse_sample(struct evlist *evlist, union perf_event *event,
1404
			      struct perf_sample *sample)
1405
{
1406
	struct evsel *evsel = perf_evlist__event2evsel(evlist, event);
1407 1408 1409

	if (!evsel)
		return -EFAULT;
1410
	return perf_evsel__parse_sample(evsel, event, sample);
1411
}
1412

1413
int perf_evlist__parse_sample_timestamp(struct evlist *evlist,
1414 1415 1416
					union perf_event *event,
					u64 *timestamp)
{
1417
	struct evsel *evsel = perf_evlist__event2evsel(evlist, event);
1418 1419 1420 1421 1422 1423

	if (!evsel)
		return -EFAULT;
	return perf_evsel__parse_sample_timestamp(evsel, event, timestamp);
}

1424
size_t perf_evlist__fprintf(struct evlist *evlist, FILE *fp)
1425
{
1426
	struct evsel *evsel;
1427 1428
	size_t printed = 0;

1429
	evlist__for_each_entry(evlist, evsel) {
1430 1431 1432 1433
		printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
				   perf_evsel__name(evsel));
	}

1434
	return printed + fprintf(fp, "\n");
1435
}
1436

1437
int perf_evlist__strerror_open(struct evlist *evlist,
1438 1439 1440
			       int err, char *buf, size_t size)
{
	int printed, value;
1441
	char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1442 1443 1444 1445 1446 1447 1448 1449

	switch (err) {
	case EACCES:
	case EPERM:
		printed = scnprintf(buf, size,
				    "Error:\t%s.\n"
				    "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);

1450
		value = perf_event_paranoid();
1451 1452 1453 1454 1455 1456 1457 1458

		printed += scnprintf(buf + printed, size - printed, "\nHint:\t");

		if (value >= 2) {
			printed += scnprintf(buf + printed, size - printed,
					     "For your workloads it needs to be <= 1\nHint:\t");
		}
		printed += scnprintf(buf + printed, size - printed,
1459
				     "For system wide tracing it needs to be set to -1.\n");
1460 1461

		printed += scnprintf(buf + printed, size - printed,
1462 1463
				    "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
				    "Hint:\tThe current value is %d.", value);
1464
		break;
1465
	case EINVAL: {
1466
		struct evsel *first = evlist__first(evlist);
1467 1468 1469 1470 1471
		int max_freq;

		if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
			goto out_default;

1472
		if (first->core.attr.sample_freq < (u64)max_freq)
1473 1474 1475 1476 1477 1478
			goto out_default;

		printed = scnprintf(buf, size,
				    "Error:\t%s.\n"
				    "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
				    "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1479
				    emsg, max_freq, first->core.attr.sample_freq);
1480 1481
		break;
	}
1482
	default:
1483
out_default:
1484 1485 1486 1487 1488 1489
		scnprintf(buf, size, "%s", emsg);
		break;
	}

	return 0;
}
1490

1491
int perf_evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size)
1492
{
1493
	char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1494
	int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0;
1495 1496 1497

	switch (err) {
	case EPERM:
1498
		sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
1499 1500
		printed += scnprintf(buf + printed, size - printed,
				     "Error:\t%s.\n"
1501
				     "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
1502
				     "Hint:\tTried using %zd kB.\n",
1503
				     emsg, pages_max_per_user, pages_attempted);
1504 1505 1506 1507 1508 1509 1510 1511 1512

		if (pages_attempted >= pages_max_per_user) {
			printed += scnprintf(buf + printed, size - printed,
					     "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
					     pages_max_per_user + pages_attempted);
		}

		printed += scnprintf(buf + printed, size - printed,
				     "Hint:\tTry using a smaller -m/--mmap-pages value.");
1513 1514 1515 1516 1517 1518 1519 1520 1521
		break;
	default:
		scnprintf(buf, size, "%s", emsg);
		break;
	}

	return 0;
}

1522
void perf_evlist__to_front(struct evlist *evlist,
1523
			   struct evsel *move_evsel)
1524
{
1525
	struct evsel *evsel, *n;
1526 1527
	LIST_HEAD(move);

1528
	if (move_evsel == evlist__first(evlist))
1529 1530
		return;

1531
	evlist__for_each_entry_safe(evlist, n, evsel) {
1532
		if (evsel->leader == move_evsel->leader)
1533
			list_move_tail(&evsel->core.node, &move);
1534 1535
	}

1536
	list_splice(&move, &evlist->core.entries);
1537
}
1538

1539
void perf_evlist__set_tracking_event(struct evlist *evlist,
1540
				     struct evsel *tracking_evsel)
1541
{
1542
	struct evsel *evsel;
1543 1544 1545 1546

	if (tracking_evsel->tracking)
		return;

1547
	evlist__for_each_entry(evlist, evsel) {
1548 1549 1550 1551 1552 1553
		if (evsel != tracking_evsel)
			evsel->tracking = false;
	}

	tracking_evsel->tracking = true;
}
1554

1555
struct evsel *
1556
perf_evlist__find_evsel_by_str(struct evlist *evlist,
1557 1558
			       const char *str)
{
1559
	struct evsel *evsel;
1560

1561
	evlist__for_each_entry(evlist, evsel) {
1562 1563 1564 1565 1566 1567 1568 1569
		if (!evsel->name)
			continue;
		if (strcmp(str, evsel->name) == 0)
			return evsel;
	}

	return NULL;
}
1570

1571
void perf_evlist__toggle_bkw_mmap(struct evlist *evlist,
1572 1573 1574 1575 1576 1577 1578 1579 1580
				  enum bkw_mmap_state state)
{
	enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
	enum action {
		NONE,
		PAUSE,
		RESUME,
	} action = NONE;

1581
	if (!evlist->overwrite_mmap)
1582 1583 1584 1585 1586
		return;

	switch (old_state) {
	case BKW_MMAP_NOTREADY: {
		if (state != BKW_MMAP_RUNNING)
1587
			goto state_err;
1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627
		break;
	}
	case BKW_MMAP_RUNNING: {
		if (state != BKW_MMAP_DATA_PENDING)
			goto state_err;
		action = PAUSE;
		break;
	}
	case BKW_MMAP_DATA_PENDING: {
		if (state != BKW_MMAP_EMPTY)
			goto state_err;
		break;
	}
	case BKW_MMAP_EMPTY: {
		if (state != BKW_MMAP_RUNNING)
			goto state_err;
		action = RESUME;
		break;
	}
	default:
		WARN_ONCE(1, "Shouldn't get there\n");
	}

	evlist->bkw_mmap_state = state;

	switch (action) {
	case PAUSE:
		perf_evlist__pause(evlist);
		break;
	case RESUME:
		perf_evlist__resume(evlist);
		break;
	case NONE:
	default:
		break;
	}

state_err:
	return;
}
1628

1629
bool perf_evlist__exclude_kernel(struct evlist *evlist)
1630
{
1631
	struct evsel *evsel;
1632 1633

	evlist__for_each_entry(evlist, evsel) {
1634
		if (!evsel->core.attr.exclude_kernel)
1635 1636 1637 1638 1639
			return false;
	}

	return true;
}
1640 1641 1642 1643 1644 1645

/*
 * Events in data file are not collect in groups, but we still want
 * the group display. Set the artificial group and set the leader's
 * forced_leader flag to notify the display code.
 */
1646
void perf_evlist__force_leader(struct evlist *evlist)
1647 1648
{
	if (!evlist->nr_groups) {
1649
		struct evsel *leader = evlist__first(evlist);
1650 1651 1652 1653 1654

		perf_evlist__set_leader(evlist);
		leader->forced_leader = true;
	}
}
1655

1656
struct evsel *perf_evlist__reset_weak_group(struct evlist *evsel_list,
1657
						 struct evsel *evsel)
1658
{
1659
	struct evsel *c2, *leader;
1660 1661 1662 1663
	bool is_open = true;

	leader = evsel->leader;
	pr_debug("Weak group for %s/%d failed\n",
1664
			leader->name, leader->core.nr_members);
1665 1666 1667 1668 1669 1670 1671 1672 1673 1674

	/*
	 * for_each_group_member doesn't work here because it doesn't
	 * include the first entry.
	 */
	evlist__for_each_entry(evsel_list, c2) {
		if (c2 == evsel)
			is_open = false;
		if (c2->leader == leader) {
			if (is_open)
1675
				evsel__close(c2);
1676
			c2->leader = c2;
1677
			c2->core.nr_members = 0;
1678 1679 1680 1681
		}
	}
	return leader;
}
1682

1683
int perf_evlist__add_sb_event(struct evlist **evlist,
1684 1685 1686 1687
			      struct perf_event_attr *attr,
			      perf_evsel__sb_cb_t cb,
			      void *data)
{
1688
	struct evsel *evsel;
1689 1690 1691
	bool new_evlist = (*evlist) == NULL;

	if (*evlist == NULL)
1692
		*evlist = evlist__new();
1693 1694 1695 1696 1697 1698 1699 1700
	if (*evlist == NULL)
		return -1;

	if (!attr->sample_id_all) {
		pr_warning("enabling sample_id_all for all side band events\n");
		attr->sample_id_all = 1;
	}

1701
	evsel = perf_evsel__new_idx(attr, (*evlist)->core.nr_entries);
1702 1703 1704 1705 1706
	if (!evsel)
		goto out_err;

	evsel->side_band.cb = cb;
	evsel->side_band.data = data;
1707
	evlist__add(*evlist, evsel);
1708 1709 1710 1711
	return 0;

out_err:
	if (new_evlist) {
1712
		evlist__delete(*evlist);
1713 1714 1715 1716 1717 1718 1719
		*evlist = NULL;
	}
	return -1;
}

static void *perf_evlist__poll_thread(void *arg)
{
1720
	struct evlist *evlist = arg;
1721
	bool draining = false;
1722
	int i, done = 0;
1723 1724 1725 1726 1727 1728 1729 1730
	/*
	 * In order to read symbols from other namespaces perf to needs to call
	 * setns(2).  This isn't permitted if the struct_fs has multiple users.
	 * unshare(2) the fs so that we may continue to setns into namespaces
	 * that we're observing when, for instance, reading the build-ids at
	 * the end of a 'perf record' session.
	 */
	unshare(CLONE_FS);
1731 1732 1733

	while (!done) {
		bool got_data = false;
1734

1735
		if (evlist->thread.done)
1736 1737 1738 1739 1740
			draining = true;

		if (!draining)
			perf_evlist__poll(evlist, 1000);

1741
		for (i = 0; i < evlist->core.nr_mmaps; i++) {
1742
			struct mmap *map = &evlist->mmap[i];
1743 1744 1745 1746 1747
			union perf_event *event;

			if (perf_mmap__read_init(map))
				continue;
			while ((event = perf_mmap__read_event(map)) != NULL) {
1748
				struct evsel *evsel = perf_evlist__event2evsel(evlist, event);
1749 1750 1751 1752 1753 1754 1755

				if (evsel && evsel->side_band.cb)
					evsel->side_band.cb(event, evsel->side_band.data);
				else
					pr_warning("cannot locate proper evsel for the side band event\n");

				perf_mmap__consume(map);
1756
				got_data = true;
1757 1758 1759
			}
			perf_mmap__read_done(map);
		}
1760 1761 1762

		if (draining && !got_data)
			break;
1763 1764 1765 1766
	}
	return NULL;
}

1767
int perf_evlist__start_sb_thread(struct evlist *evlist,
1768 1769
				 struct target *target)
{
1770
	struct evsel *counter;
1771 1772 1773 1774 1775 1776 1777 1778

	if (!evlist)
		return 0;

	if (perf_evlist__create_maps(evlist, target))
		goto out_delete_evlist;

	evlist__for_each_entry(evlist, counter) {
1779
		if (evsel__open(counter, evlist->core.cpus,
1780
				     evlist->core.threads) < 0)
1781 1782 1783
			goto out_delete_evlist;
	}

1784
	if (evlist__mmap(evlist, UINT_MAX))
1785 1786 1787
		goto out_delete_evlist;

	evlist__for_each_entry(evlist, counter) {
1788
		if (evsel__enable(counter))
1789 1790 1791 1792 1793 1794 1795 1796 1797 1798
			goto out_delete_evlist;
	}

	evlist->thread.done = 0;
	if (pthread_create(&evlist->thread.th, NULL, perf_evlist__poll_thread, evlist))
		goto out_delete_evlist;

	return 0;

out_delete_evlist:
1799
	evlist__delete(evlist);
1800 1801 1802 1803
	evlist = NULL;
	return -1;
}

1804
void perf_evlist__stop_sb_thread(struct evlist *evlist)
1805 1806 1807 1808 1809
{
	if (!evlist)
		return;
	evlist->thread.done = 1;
	pthread_join(evlist->thread.th, NULL);
1810
	evlist__delete(evlist);
1811
}