evsel.c 14.1 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
 *
 * Parts came from builtin-{top,stat,record}.c, see those files for further
 * copyright notes.
 *
 * Released under the GPL v2. (and only v2, not any later version)
 */

10 11
#include <byteswap.h>
#include "asm/bug.h"
12
#include "evsel.h"
13
#include "evlist.h"
14
#include "util.h"
15
#include "cpumap.h"
16
#include "thread_map.h"
17

18
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
19
#define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
20

21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
int __perf_evsel__sample_size(u64 sample_type)
{
	u64 mask = sample_type & PERF_SAMPLE_MASK;
	int size = 0;
	int i;

	for (i = 0; i < 64; i++) {
		if (mask & (1ULL << i))
			size++;
	}

	size *= sizeof(u64);

	return size;
}

37 38 39 40 41 42 43 44 45 46
static void hists__init(struct hists *hists)
{
	memset(hists, 0, sizeof(*hists));
	hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
	hists->entries_in = &hists->entries_in_array[0];
	hists->entries_collapsed = RB_ROOT;
	hists->entries = RB_ROOT;
	pthread_mutex_init(&hists->lock, NULL);
}

47 48 49 50 51 52
void perf_evsel__init(struct perf_evsel *evsel,
		      struct perf_event_attr *attr, int idx)
{
	evsel->idx	   = idx;
	evsel->attr	   = *attr;
	INIT_LIST_HEAD(&evsel->node);
53
	hists__init(&evsel->hists);
54 55
}

56
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
57 58 59
{
	struct perf_evsel *evsel = zalloc(sizeof(*evsel));

60 61
	if (evsel != NULL)
		perf_evsel__init(evsel, attr, idx);
62 63 64 65

	return evsel;
}

66 67
void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
			struct perf_evsel *first)
68 69 70 71
{
	struct perf_event_attr *attr = &evsel->attr;
	int track = !evsel->idx; /* only the first counter needs these */

72
	attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1;
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
	attr->inherit	    = !opts->no_inherit;
	attr->read_format   = PERF_FORMAT_TOTAL_TIME_ENABLED |
			      PERF_FORMAT_TOTAL_TIME_RUNNING |
			      PERF_FORMAT_ID;

	attr->sample_type  |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;

	/*
	 * We default some events to a 1 default interval. But keep
	 * it a weak assumption overridable by the user.
	 */
	if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
				     opts->user_interval != ULLONG_MAX)) {
		if (opts->freq) {
			attr->sample_type	|= PERF_SAMPLE_PERIOD;
			attr->freq		= 1;
			attr->sample_freq	= opts->freq;
		} else {
			attr->sample_period = opts->default_interval;
		}
	}

	if (opts->no_samples)
		attr->sample_freq = 0;

	if (opts->inherit_stat)
		attr->inherit_stat = 1;

	if (opts->sample_address) {
		attr->sample_type	|= PERF_SAMPLE_ADDR;
		attr->mmap_data = track;
	}

	if (opts->call_graph)
		attr->sample_type	|= PERF_SAMPLE_CALLCHAIN;

	if (opts->system_wide)
		attr->sample_type	|= PERF_SAMPLE_CPU;

112 113 114
	if (opts->period)
		attr->sample_type	|= PERF_SAMPLE_PERIOD;

115
	if (!opts->sample_id_all_missing &&
116 117 118 119 120 121 122 123 124 125 126 127 128 129
	    (opts->sample_time || opts->system_wide ||
	     !opts->no_inherit || opts->cpu_list))
		attr->sample_type	|= PERF_SAMPLE_TIME;

	if (opts->raw_samples) {
		attr->sample_type	|= PERF_SAMPLE_TIME;
		attr->sample_type	|= PERF_SAMPLE_RAW;
		attr->sample_type	|= PERF_SAMPLE_CPU;
	}

	if (opts->no_delay) {
		attr->watermark = 0;
		attr->wakeup_events = 1;
	}
130 131 132 133
	if (opts->branch_stack) {
		attr->sample_type	|= PERF_SAMPLE_BRANCH_STACK;
		attr->branch_sample_type = opts->branch_stack;
	}
134 135 136 137

	attr->mmap = track;
	attr->comm = track;

138 139
	if (!opts->target_pid && !opts->target_tid && !opts->system_wide &&
	    (!opts->group || evsel == first)) {
140 141 142 143 144
		attr->disabled = 1;
		attr->enable_on_exec = 1;
	}
}

145 146
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
{
147
	int cpu, thread;
148
	evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
149 150 151 152 153 154 155 156 157

	if (evsel->fd) {
		for (cpu = 0; cpu < ncpus; cpu++) {
			for (thread = 0; thread < nthreads; thread++) {
				FD(evsel, cpu, thread) = -1;
			}
		}
	}

158 159 160
	return evsel->fd != NULL ? 0 : -ENOMEM;
}

161 162
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
{
163 164 165 166 167 168 169 170 171 172 173 174
	evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
	if (evsel->sample_id == NULL)
		return -ENOMEM;

	evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
	if (evsel->id == NULL) {
		xyarray__delete(evsel->sample_id);
		evsel->sample_id = NULL;
		return -ENOMEM;
	}

	return 0;
175 176
}

177 178 179 180 181 182 183
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
{
	evsel->counts = zalloc((sizeof(*evsel->counts) +
				(ncpus * sizeof(struct perf_counts_values))));
	return evsel->counts != NULL ? 0 : -ENOMEM;
}

184 185 186 187 188 189
void perf_evsel__free_fd(struct perf_evsel *evsel)
{
	xyarray__delete(evsel->fd);
	evsel->fd = NULL;
}

190 191
void perf_evsel__free_id(struct perf_evsel *evsel)
{
192 193 194
	xyarray__delete(evsel->sample_id);
	evsel->sample_id = NULL;
	free(evsel->id);
195 196 197
	evsel->id = NULL;
}

198 199 200 201 202 203 204 205 206 207 208
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
{
	int cpu, thread;

	for (cpu = 0; cpu < ncpus; cpu++)
		for (thread = 0; thread < nthreads; ++thread) {
			close(FD(evsel, cpu, thread));
			FD(evsel, cpu, thread) = -1;
		}
}

209
void perf_evsel__exit(struct perf_evsel *evsel)
210 211 212
{
	assert(list_empty(&evsel->node));
	xyarray__delete(evsel->fd);
213 214
	xyarray__delete(evsel->sample_id);
	free(evsel->id);
215 216 217 218 219
}

void perf_evsel__delete(struct perf_evsel *evsel)
{
	perf_evsel__exit(evsel);
S
Stephane Eranian 已提交
220
	close_cgroup(evsel->cgrp);
221
	free(evsel->name);
222 223
	free(evsel);
}
224 225 226 227 228 229 230 231 232 233

int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
			      int cpu, int thread, bool scale)
{
	struct perf_counts_values count;
	size_t nv = scale ? 3 : 1;

	if (FD(evsel, cpu, thread) < 0)
		return -EINVAL;

234 235 236
	if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
		return -ENOMEM;

237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
	if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
		return -errno;

	if (scale) {
		if (count.run == 0)
			count.val = 0;
		else if (count.run < count.ena)
			count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
	} else
		count.ena = count.run = 0;

	evsel->counts->cpu[cpu] = count;
	return 0;
}

int __perf_evsel__read(struct perf_evsel *evsel,
		       int ncpus, int nthreads, bool scale)
{
	size_t nv = scale ? 3 : 1;
	int cpu, thread;
	struct perf_counts_values *aggr = &evsel->counts->aggr, count;

259
	aggr->val = aggr->ena = aggr->run = 0;
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294

	for (cpu = 0; cpu < ncpus; cpu++) {
		for (thread = 0; thread < nthreads; thread++) {
			if (FD(evsel, cpu, thread) < 0)
				continue;

			if (readn(FD(evsel, cpu, thread),
				  &count, nv * sizeof(u64)) < 0)
				return -errno;

			aggr->val += count.val;
			if (scale) {
				aggr->ena += count.ena;
				aggr->run += count.run;
			}
		}
	}

	evsel->counts->scaled = 0;
	if (scale) {
		if (aggr->run == 0) {
			evsel->counts->scaled = -1;
			aggr->val = 0;
			return 0;
		}

		if (aggr->run < aggr->ena) {
			evsel->counts->scaled = 1;
			aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
		}
	} else
		aggr->ena = aggr->run = 0;

	return 0;
}
295

296
static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
297 298
			      struct thread_map *threads, bool group,
			      struct xyarray *group_fds)
299
{
300
	int cpu, thread;
S
Stephane Eranian 已提交
301
	unsigned long flags = 0;
302
	int pid = -1, err;
303

304 305
	if (evsel->fd == NULL &&
	    perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
306
		return -ENOMEM;
307

S
Stephane Eranian 已提交
308 309 310 311 312
	if (evsel->cgrp) {
		flags = PERF_FLAG_PID_CGROUP;
		pid = evsel->cgrp->fd;
	}

313
	for (cpu = 0; cpu < cpus->nr; cpu++) {
314
		int group_fd = group_fds ? GROUP_FD(group_fds, cpu) : -1;
315

316
		for (thread = 0; thread < threads->nr; thread++) {
S
Stephane Eranian 已提交
317 318 319 320

			if (!evsel->cgrp)
				pid = threads->map[thread];

321
			FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
S
Stephane Eranian 已提交
322
								     pid,
323
								     cpus->map[cpu],
S
Stephane Eranian 已提交
324
								     group_fd, flags);
325 326
			if (FD(evsel, cpu, thread) < 0) {
				err = -errno;
327
				goto out_close;
328
			}
329 330 331

			if (group && group_fd == -1)
				group_fd = FD(evsel, cpu, thread);
332
		}
333 334 335 336 337
	}

	return 0;

out_close:
338 339 340 341 342 343 344
	do {
		while (--thread >= 0) {
			close(FD(evsel, cpu, thread));
			FD(evsel, cpu, thread) = -1;
		}
		thread = threads->nr;
	} while (--cpu >= 0);
345 346 347 348 349 350 351 352 353 354 355
	return err;
}

void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
{
	if (evsel->fd == NULL)
		return;

	perf_evsel__close_fd(evsel, ncpus, nthreads);
	perf_evsel__free_fd(evsel);
	evsel->fd = NULL;
356 357
}

358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
static struct {
	struct cpu_map map;
	int cpus[1];
} empty_cpu_map = {
	.map.nr	= 1,
	.cpus	= { -1, },
};

static struct {
	struct thread_map map;
	int threads[1];
} empty_thread_map = {
	.map.nr	 = 1,
	.threads = { -1, },
};

374
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
375 376
		     struct thread_map *threads, bool group,
		     struct xyarray *group_fd)
377
{
378 379 380
	if (cpus == NULL) {
		/* Work around old compiler warnings about strict aliasing */
		cpus = &empty_cpu_map.map;
381 382
	}

383 384
	if (threads == NULL)
		threads = &empty_thread_map.map;
385

386
	return __perf_evsel__open(evsel, cpus, threads, group, group_fd);
387 388
}

389
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
390 391
			     struct cpu_map *cpus, bool group,
			     struct xyarray *group_fd)
392
{
393 394
	return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group,
				  group_fd);
395
}
396

397
int perf_evsel__open_per_thread(struct perf_evsel *evsel,
398 399
				struct thread_map *threads, bool group,
				struct xyarray *group_fd)
400
{
401 402
	return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group,
				  group_fd);
403
}
404

405 406
static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
				       struct perf_sample *sample)
407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
{
	const u64 *array = event->sample.array;

	array += ((event->header.size -
		   sizeof(event->header)) / sizeof(u64)) - 1;

	if (type & PERF_SAMPLE_CPU) {
		u32 *p = (u32 *)array;
		sample->cpu = *p;
		array--;
	}

	if (type & PERF_SAMPLE_STREAM_ID) {
		sample->stream_id = *array;
		array--;
	}

	if (type & PERF_SAMPLE_ID) {
		sample->id = *array;
		array--;
	}

	if (type & PERF_SAMPLE_TIME) {
		sample->time = *array;
		array--;
	}

	if (type & PERF_SAMPLE_TID) {
		u32 *p = (u32 *)array;
		sample->pid = p[0];
		sample->tid = p[1];
	}

	return 0;
}

443 444 445 446 447 448 449 450 451 452 453
static bool sample_overlap(const union perf_event *event,
			   const void *offset, u64 size)
{
	const void *base = event;

	if (offset + size > base + event->header.size)
		return true;

	return false;
}

454
int perf_event__parse_sample(const union perf_event *event, u64 type,
455
			     int sample_size, bool sample_id_all,
456
			     struct perf_sample *data, bool swapped)
457 458 459
{
	const u64 *array;

460 461 462 463 464 465 466 467 468
	/*
	 * used for cross-endian analysis. See git commit 65014ab3
	 * for why this goofiness is needed.
	 */
	union {
		u64 val64;
		u32 val32[2];
	} u;

469
	memset(data, 0, sizeof(*data));
470 471
	data->cpu = data->pid = data->tid = -1;
	data->stream_id = data->id = data->time = -1ULL;
472
	data->period = 1;
473 474 475 476

	if (event->header.type != PERF_RECORD_SAMPLE) {
		if (!sample_id_all)
			return 0;
477
		return perf_event__parse_id_sample(event, type, data);
478 479 480 481
	}

	array = event->sample.array;

482 483 484
	if (sample_size + sizeof(event->header) > event->header.size)
		return -EFAULT;

485 486 487 488 489 490
	if (type & PERF_SAMPLE_IP) {
		data->ip = event->ip.ip;
		array++;
	}

	if (type & PERF_SAMPLE_TID) {
491 492 493 494 495 496 497 498 499 500
		u.val64 = *array;
		if (swapped) {
			/* undo swap of u64, then swap on individual u32s */
			u.val64 = bswap_64(u.val64);
			u.val32[0] = bswap_32(u.val32[0]);
			u.val32[1] = bswap_32(u.val32[1]);
		}

		data->pid = u.val32[0];
		data->tid = u.val32[1];
501 502 503 504 505 506 507 508
		array++;
	}

	if (type & PERF_SAMPLE_TIME) {
		data->time = *array;
		array++;
	}

509
	data->addr = 0;
510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526
	if (type & PERF_SAMPLE_ADDR) {
		data->addr = *array;
		array++;
	}

	data->id = -1ULL;
	if (type & PERF_SAMPLE_ID) {
		data->id = *array;
		array++;
	}

	if (type & PERF_SAMPLE_STREAM_ID) {
		data->stream_id = *array;
		array++;
	}

	if (type & PERF_SAMPLE_CPU) {
527 528 529 530 531 532 533 534 535

		u.val64 = *array;
		if (swapped) {
			/* undo swap of u64, then swap on individual u32s */
			u.val64 = bswap_64(u.val64);
			u.val32[0] = bswap_32(u.val32[0]);
		}

		data->cpu = u.val32[0];
536 537 538 539 540 541 542 543 544
		array++;
	}

	if (type & PERF_SAMPLE_PERIOD) {
		data->period = *array;
		array++;
	}

	if (type & PERF_SAMPLE_READ) {
M
Masanari Iida 已提交
545
		fprintf(stderr, "PERF_SAMPLE_READ is unsupported for now\n");
546 547 548 549
		return -1;
	}

	if (type & PERF_SAMPLE_CALLCHAIN) {
550 551 552
		if (sample_overlap(event, array, sizeof(data->callchain->nr)))
			return -EFAULT;

553
		data->callchain = (struct ip_callchain *)array;
554 555 556 557

		if (sample_overlap(event, array, data->callchain->nr))
			return -EFAULT;

558 559 560 561
		array += 1 + data->callchain->nr;
	}

	if (type & PERF_SAMPLE_RAW) {
J
Jiri Olsa 已提交
562 563
		const u64 *pdata;

564 565 566 567 568 569 570 571
		u.val64 = *array;
		if (WARN_ONCE(swapped,
			      "Endianness of raw data not corrected!\n")) {
			/* undo swap of u64, then swap on individual u32s */
			u.val64 = bswap_64(u.val64);
			u.val32[0] = bswap_32(u.val32[0]);
			u.val32[1] = bswap_32(u.val32[1]);
		}
572 573 574 575

		if (sample_overlap(event, array, sizeof(u32)))
			return -EFAULT;

576
		data->raw_size = u.val32[0];
J
Jiri Olsa 已提交
577
		pdata = (void *) array + sizeof(u32);
578

J
Jiri Olsa 已提交
579
		if (sample_overlap(event, pdata, data->raw_size))
580 581
			return -EFAULT;

J
Jiri Olsa 已提交
582
		data->raw_data = (void *) pdata;
583 584
	}

585 586 587 588 589 590 591 592 593 594
	if (type & PERF_SAMPLE_BRANCH_STACK) {
		u64 sz;

		data->branch_stack = (struct branch_stack *)array;
		array++; /* nr */

		sz = data->branch_stack->nr * sizeof(struct branch_entry);
		sz /= sizeof(u64);
		array += sz;
	}
595 596
	return 0;
}
597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675

int perf_event__synthesize_sample(union perf_event *event, u64 type,
				  const struct perf_sample *sample,
				  bool swapped)
{
	u64 *array;

	/*
	 * used for cross-endian analysis. See git commit 65014ab3
	 * for why this goofiness is needed.
	 */
	union {
		u64 val64;
		u32 val32[2];
	} u;

	array = event->sample.array;

	if (type & PERF_SAMPLE_IP) {
		event->ip.ip = sample->ip;
		array++;
	}

	if (type & PERF_SAMPLE_TID) {
		u.val32[0] = sample->pid;
		u.val32[1] = sample->tid;
		if (swapped) {
			/*
			 * Inverse of what is done in perf_event__parse_sample
			 */
			u.val32[0] = bswap_32(u.val32[0]);
			u.val32[1] = bswap_32(u.val32[1]);
			u.val64 = bswap_64(u.val64);
		}

		*array = u.val64;
		array++;
	}

	if (type & PERF_SAMPLE_TIME) {
		*array = sample->time;
		array++;
	}

	if (type & PERF_SAMPLE_ADDR) {
		*array = sample->addr;
		array++;
	}

	if (type & PERF_SAMPLE_ID) {
		*array = sample->id;
		array++;
	}

	if (type & PERF_SAMPLE_STREAM_ID) {
		*array = sample->stream_id;
		array++;
	}

	if (type & PERF_SAMPLE_CPU) {
		u.val32[0] = sample->cpu;
		if (swapped) {
			/*
			 * Inverse of what is done in perf_event__parse_sample
			 */
			u.val32[0] = bswap_32(u.val32[0]);
			u.val64 = bswap_64(u.val64);
		}
		*array = u.val64;
		array++;
	}

	if (type & PERF_SAMPLE_PERIOD) {
		*array = sample->period;
		array++;
	}

	return 0;
}