evsel.c 14.2 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
 *
 * Parts came from builtin-{top,stat,record}.c, see those files for further
 * copyright notes.
 *
 * Released under the GPL v2. (and only v2, not any later version)
 */

10 11
#include <byteswap.h>
#include "asm/bug.h"
12
#include "evsel.h"
13
#include "evlist.h"
14
#include "util.h"
15
#include "cpumap.h"
16
#include "thread_map.h"
17
#include "target.h"
18

19
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
20
#define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
21

22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
int __perf_evsel__sample_size(u64 sample_type)
{
	u64 mask = sample_type & PERF_SAMPLE_MASK;
	int size = 0;
	int i;

	for (i = 0; i < 64; i++) {
		if (mask & (1ULL << i))
			size++;
	}

	size *= sizeof(u64);

	return size;
}

38
void hists__init(struct hists *hists)
39 40 41 42 43 44 45 46 47
{
	memset(hists, 0, sizeof(*hists));
	hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
	hists->entries_in = &hists->entries_in_array[0];
	hists->entries_collapsed = RB_ROOT;
	hists->entries = RB_ROOT;
	pthread_mutex_init(&hists->lock, NULL);
}

48 49 50 51 52 53
void perf_evsel__init(struct perf_evsel *evsel,
		      struct perf_event_attr *attr, int idx)
{
	evsel->idx	   = idx;
	evsel->attr	   = *attr;
	INIT_LIST_HEAD(&evsel->node);
54
	hists__init(&evsel->hists);
55 56
}

57
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
58 59 60
{
	struct perf_evsel *evsel = zalloc(sizeof(*evsel));

61 62
	if (evsel != NULL)
		perf_evsel__init(evsel, attr, idx);
63 64 65 66

	return evsel;
}

67 68
void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
			struct perf_evsel *first)
69 70 71 72
{
	struct perf_event_attr *attr = &evsel->attr;
	int track = !evsel->idx; /* only the first counter needs these */

73
	attr->disabled = 1;
74
	attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1;
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
	attr->inherit	    = !opts->no_inherit;
	attr->read_format   = PERF_FORMAT_TOTAL_TIME_ENABLED |
			      PERF_FORMAT_TOTAL_TIME_RUNNING |
			      PERF_FORMAT_ID;

	attr->sample_type  |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;

	/*
	 * We default some events to a 1 default interval. But keep
	 * it a weak assumption overridable by the user.
	 */
	if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
				     opts->user_interval != ULLONG_MAX)) {
		if (opts->freq) {
			attr->sample_type	|= PERF_SAMPLE_PERIOD;
			attr->freq		= 1;
			attr->sample_freq	= opts->freq;
		} else {
			attr->sample_period = opts->default_interval;
		}
	}

	if (opts->no_samples)
		attr->sample_freq = 0;

	if (opts->inherit_stat)
		attr->inherit_stat = 1;

	if (opts->sample_address) {
		attr->sample_type	|= PERF_SAMPLE_ADDR;
		attr->mmap_data = track;
	}

	if (opts->call_graph)
		attr->sample_type	|= PERF_SAMPLE_CALLCHAIN;

111
	if (perf_target__has_cpu(&opts->target))
112 113
		attr->sample_type	|= PERF_SAMPLE_CPU;

114 115 116
	if (opts->period)
		attr->sample_type	|= PERF_SAMPLE_PERIOD;

117
	if (!opts->sample_id_all_missing &&
118
	    (opts->sample_time || !opts->no_inherit ||
119
	     perf_target__has_cpu(&opts->target)))
120 121 122 123 124 125 126 127 128 129 130 131
		attr->sample_type	|= PERF_SAMPLE_TIME;

	if (opts->raw_samples) {
		attr->sample_type	|= PERF_SAMPLE_TIME;
		attr->sample_type	|= PERF_SAMPLE_RAW;
		attr->sample_type	|= PERF_SAMPLE_CPU;
	}

	if (opts->no_delay) {
		attr->watermark = 0;
		attr->wakeup_events = 1;
	}
132 133 134 135
	if (opts->branch_stack) {
		attr->sample_type	|= PERF_SAMPLE_BRANCH_STACK;
		attr->branch_sample_type = opts->branch_stack;
	}
136 137 138 139

	attr->mmap = track;
	attr->comm = track;

140 141
	if (perf_target__none(&opts->target) &&
	    (!opts->group || evsel == first)) {
142 143 144 145
		attr->enable_on_exec = 1;
	}
}

146 147
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
{
148
	int cpu, thread;
149
	evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
150 151 152 153 154 155 156 157 158

	if (evsel->fd) {
		for (cpu = 0; cpu < ncpus; cpu++) {
			for (thread = 0; thread < nthreads; thread++) {
				FD(evsel, cpu, thread) = -1;
			}
		}
	}

159 160 161
	return evsel->fd != NULL ? 0 : -ENOMEM;
}

162 163
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
{
164 165 166 167 168 169 170 171 172 173 174 175
	evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
	if (evsel->sample_id == NULL)
		return -ENOMEM;

	evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
	if (evsel->id == NULL) {
		xyarray__delete(evsel->sample_id);
		evsel->sample_id = NULL;
		return -ENOMEM;
	}

	return 0;
176 177
}

178 179 180 181 182 183 184
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
{
	evsel->counts = zalloc((sizeof(*evsel->counts) +
				(ncpus * sizeof(struct perf_counts_values))));
	return evsel->counts != NULL ? 0 : -ENOMEM;
}

185 186 187 188 189 190
void perf_evsel__free_fd(struct perf_evsel *evsel)
{
	xyarray__delete(evsel->fd);
	evsel->fd = NULL;
}

191 192
void perf_evsel__free_id(struct perf_evsel *evsel)
{
193 194 195
	xyarray__delete(evsel->sample_id);
	evsel->sample_id = NULL;
	free(evsel->id);
196 197 198
	evsel->id = NULL;
}

199 200 201 202 203 204 205 206 207 208 209
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
{
	int cpu, thread;

	for (cpu = 0; cpu < ncpus; cpu++)
		for (thread = 0; thread < nthreads; ++thread) {
			close(FD(evsel, cpu, thread));
			FD(evsel, cpu, thread) = -1;
		}
}

210
void perf_evsel__exit(struct perf_evsel *evsel)
211 212 213
{
	assert(list_empty(&evsel->node));
	xyarray__delete(evsel->fd);
214 215
	xyarray__delete(evsel->sample_id);
	free(evsel->id);
216 217 218 219 220
}

void perf_evsel__delete(struct perf_evsel *evsel)
{
	perf_evsel__exit(evsel);
S
Stephane Eranian 已提交
221
	close_cgroup(evsel->cgrp);
222
	free(evsel->name);
223 224
	free(evsel);
}
225 226 227 228 229 230 231 232 233 234

int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
			      int cpu, int thread, bool scale)
{
	struct perf_counts_values count;
	size_t nv = scale ? 3 : 1;

	if (FD(evsel, cpu, thread) < 0)
		return -EINVAL;

235 236 237
	if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
		return -ENOMEM;

238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
	if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
		return -errno;

	if (scale) {
		if (count.run == 0)
			count.val = 0;
		else if (count.run < count.ena)
			count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
	} else
		count.ena = count.run = 0;

	evsel->counts->cpu[cpu] = count;
	return 0;
}

int __perf_evsel__read(struct perf_evsel *evsel,
		       int ncpus, int nthreads, bool scale)
{
	size_t nv = scale ? 3 : 1;
	int cpu, thread;
	struct perf_counts_values *aggr = &evsel->counts->aggr, count;

260
	aggr->val = aggr->ena = aggr->run = 0;
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295

	for (cpu = 0; cpu < ncpus; cpu++) {
		for (thread = 0; thread < nthreads; thread++) {
			if (FD(evsel, cpu, thread) < 0)
				continue;

			if (readn(FD(evsel, cpu, thread),
				  &count, nv * sizeof(u64)) < 0)
				return -errno;

			aggr->val += count.val;
			if (scale) {
				aggr->ena += count.ena;
				aggr->run += count.run;
			}
		}
	}

	evsel->counts->scaled = 0;
	if (scale) {
		if (aggr->run == 0) {
			evsel->counts->scaled = -1;
			aggr->val = 0;
			return 0;
		}

		if (aggr->run < aggr->ena) {
			evsel->counts->scaled = 1;
			aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
		}
	} else
		aggr->ena = aggr->run = 0;

	return 0;
}
296

297
static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
298 299
			      struct thread_map *threads, bool group,
			      struct xyarray *group_fds)
300
{
301
	int cpu, thread;
S
Stephane Eranian 已提交
302
	unsigned long flags = 0;
303
	int pid = -1, err;
304

305 306
	if (evsel->fd == NULL &&
	    perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
307
		return -ENOMEM;
308

S
Stephane Eranian 已提交
309 310 311 312 313
	if (evsel->cgrp) {
		flags = PERF_FLAG_PID_CGROUP;
		pid = evsel->cgrp->fd;
	}

314
	for (cpu = 0; cpu < cpus->nr; cpu++) {
315
		int group_fd = group_fds ? GROUP_FD(group_fds, cpu) : -1;
316

317
		for (thread = 0; thread < threads->nr; thread++) {
S
Stephane Eranian 已提交
318 319 320 321

			if (!evsel->cgrp)
				pid = threads->map[thread];

322
			FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
S
Stephane Eranian 已提交
323
								     pid,
324
								     cpus->map[cpu],
S
Stephane Eranian 已提交
325
								     group_fd, flags);
326 327
			if (FD(evsel, cpu, thread) < 0) {
				err = -errno;
328
				goto out_close;
329
			}
330 331 332

			if (group && group_fd == -1)
				group_fd = FD(evsel, cpu, thread);
333
		}
334 335 336 337 338
	}

	return 0;

out_close:
339 340 341 342 343 344 345
	do {
		while (--thread >= 0) {
			close(FD(evsel, cpu, thread));
			FD(evsel, cpu, thread) = -1;
		}
		thread = threads->nr;
	} while (--cpu >= 0);
346 347 348 349 350 351 352 353 354 355 356
	return err;
}

void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
{
	if (evsel->fd == NULL)
		return;

	perf_evsel__close_fd(evsel, ncpus, nthreads);
	perf_evsel__free_fd(evsel);
	evsel->fd = NULL;
357 358
}

359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
static struct {
	struct cpu_map map;
	int cpus[1];
} empty_cpu_map = {
	.map.nr	= 1,
	.cpus	= { -1, },
};

static struct {
	struct thread_map map;
	int threads[1];
} empty_thread_map = {
	.map.nr	 = 1,
	.threads = { -1, },
};

375
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
376 377
		     struct thread_map *threads, bool group,
		     struct xyarray *group_fd)
378
{
379 380 381
	if (cpus == NULL) {
		/* Work around old compiler warnings about strict aliasing */
		cpus = &empty_cpu_map.map;
382 383
	}

384 385
	if (threads == NULL)
		threads = &empty_thread_map.map;
386

387
	return __perf_evsel__open(evsel, cpus, threads, group, group_fd);
388 389
}

390
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
391 392
			     struct cpu_map *cpus, bool group,
			     struct xyarray *group_fd)
393
{
394 395
	return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group,
				  group_fd);
396
}
397

398
int perf_evsel__open_per_thread(struct perf_evsel *evsel,
399 400
				struct thread_map *threads, bool group,
				struct xyarray *group_fd)
401
{
402 403
	return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group,
				  group_fd);
404
}
405

406 407
static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
				       struct perf_sample *sample)
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
{
	const u64 *array = event->sample.array;

	array += ((event->header.size -
		   sizeof(event->header)) / sizeof(u64)) - 1;

	if (type & PERF_SAMPLE_CPU) {
		u32 *p = (u32 *)array;
		sample->cpu = *p;
		array--;
	}

	if (type & PERF_SAMPLE_STREAM_ID) {
		sample->stream_id = *array;
		array--;
	}

	if (type & PERF_SAMPLE_ID) {
		sample->id = *array;
		array--;
	}

	if (type & PERF_SAMPLE_TIME) {
		sample->time = *array;
		array--;
	}

	if (type & PERF_SAMPLE_TID) {
		u32 *p = (u32 *)array;
		sample->pid = p[0];
		sample->tid = p[1];
	}

	return 0;
}

444 445 446 447 448 449 450 451 452 453 454
static bool sample_overlap(const union perf_event *event,
			   const void *offset, u64 size)
{
	const void *base = event;

	if (offset + size > base + event->header.size)
		return true;

	return false;
}

455
int perf_event__parse_sample(const union perf_event *event, u64 type,
456
			     int sample_size, bool sample_id_all,
457
			     struct perf_sample *data, bool swapped)
458 459 460
{
	const u64 *array;

461 462 463 464 465 466 467 468 469
	/*
	 * used for cross-endian analysis. See git commit 65014ab3
	 * for why this goofiness is needed.
	 */
	union {
		u64 val64;
		u32 val32[2];
	} u;

470
	memset(data, 0, sizeof(*data));
471 472
	data->cpu = data->pid = data->tid = -1;
	data->stream_id = data->id = data->time = -1ULL;
473
	data->period = 1;
474 475 476 477

	if (event->header.type != PERF_RECORD_SAMPLE) {
		if (!sample_id_all)
			return 0;
478
		return perf_event__parse_id_sample(event, type, data);
479 480 481 482
	}

	array = event->sample.array;

483 484 485
	if (sample_size + sizeof(event->header) > event->header.size)
		return -EFAULT;

486 487 488 489 490 491
	if (type & PERF_SAMPLE_IP) {
		data->ip = event->ip.ip;
		array++;
	}

	if (type & PERF_SAMPLE_TID) {
492 493 494 495 496 497 498 499 500 501
		u.val64 = *array;
		if (swapped) {
			/* undo swap of u64, then swap on individual u32s */
			u.val64 = bswap_64(u.val64);
			u.val32[0] = bswap_32(u.val32[0]);
			u.val32[1] = bswap_32(u.val32[1]);
		}

		data->pid = u.val32[0];
		data->tid = u.val32[1];
502 503 504 505 506 507 508 509
		array++;
	}

	if (type & PERF_SAMPLE_TIME) {
		data->time = *array;
		array++;
	}

510
	data->addr = 0;
511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
	if (type & PERF_SAMPLE_ADDR) {
		data->addr = *array;
		array++;
	}

	data->id = -1ULL;
	if (type & PERF_SAMPLE_ID) {
		data->id = *array;
		array++;
	}

	if (type & PERF_SAMPLE_STREAM_ID) {
		data->stream_id = *array;
		array++;
	}

	if (type & PERF_SAMPLE_CPU) {
528 529 530 531 532 533 534 535 536

		u.val64 = *array;
		if (swapped) {
			/* undo swap of u64, then swap on individual u32s */
			u.val64 = bswap_64(u.val64);
			u.val32[0] = bswap_32(u.val32[0]);
		}

		data->cpu = u.val32[0];
537 538 539 540 541 542 543 544 545
		array++;
	}

	if (type & PERF_SAMPLE_PERIOD) {
		data->period = *array;
		array++;
	}

	if (type & PERF_SAMPLE_READ) {
M
Masanari Iida 已提交
546
		fprintf(stderr, "PERF_SAMPLE_READ is unsupported for now\n");
547 548 549 550
		return -1;
	}

	if (type & PERF_SAMPLE_CALLCHAIN) {
551 552 553
		if (sample_overlap(event, array, sizeof(data->callchain->nr)))
			return -EFAULT;

554
		data->callchain = (struct ip_callchain *)array;
555 556 557 558

		if (sample_overlap(event, array, data->callchain->nr))
			return -EFAULT;

559 560 561 562
		array += 1 + data->callchain->nr;
	}

	if (type & PERF_SAMPLE_RAW) {
J
Jiri Olsa 已提交
563 564
		const u64 *pdata;

565 566 567 568 569 570 571 572
		u.val64 = *array;
		if (WARN_ONCE(swapped,
			      "Endianness of raw data not corrected!\n")) {
			/* undo swap of u64, then swap on individual u32s */
			u.val64 = bswap_64(u.val64);
			u.val32[0] = bswap_32(u.val32[0]);
			u.val32[1] = bswap_32(u.val32[1]);
		}
573 574 575 576

		if (sample_overlap(event, array, sizeof(u32)))
			return -EFAULT;

577
		data->raw_size = u.val32[0];
J
Jiri Olsa 已提交
578
		pdata = (void *) array + sizeof(u32);
579

J
Jiri Olsa 已提交
580
		if (sample_overlap(event, pdata, data->raw_size))
581 582
			return -EFAULT;

J
Jiri Olsa 已提交
583
		data->raw_data = (void *) pdata;
584 585

		array = (void *)array + data->raw_size + sizeof(u32);
586 587
	}

588 589 590 591 592 593 594 595 596 597
	if (type & PERF_SAMPLE_BRANCH_STACK) {
		u64 sz;

		data->branch_stack = (struct branch_stack *)array;
		array++; /* nr */

		sz = data->branch_stack->nr * sizeof(struct branch_entry);
		sz /= sizeof(u64);
		array += sz;
	}
598 599
	return 0;
}
600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678

int perf_event__synthesize_sample(union perf_event *event, u64 type,
				  const struct perf_sample *sample,
				  bool swapped)
{
	u64 *array;

	/*
	 * used for cross-endian analysis. See git commit 65014ab3
	 * for why this goofiness is needed.
	 */
	union {
		u64 val64;
		u32 val32[2];
	} u;

	array = event->sample.array;

	if (type & PERF_SAMPLE_IP) {
		event->ip.ip = sample->ip;
		array++;
	}

	if (type & PERF_SAMPLE_TID) {
		u.val32[0] = sample->pid;
		u.val32[1] = sample->tid;
		if (swapped) {
			/*
			 * Inverse of what is done in perf_event__parse_sample
			 */
			u.val32[0] = bswap_32(u.val32[0]);
			u.val32[1] = bswap_32(u.val32[1]);
			u.val64 = bswap_64(u.val64);
		}

		*array = u.val64;
		array++;
	}

	if (type & PERF_SAMPLE_TIME) {
		*array = sample->time;
		array++;
	}

	if (type & PERF_SAMPLE_ADDR) {
		*array = sample->addr;
		array++;
	}

	if (type & PERF_SAMPLE_ID) {
		*array = sample->id;
		array++;
	}

	if (type & PERF_SAMPLE_STREAM_ID) {
		*array = sample->stream_id;
		array++;
	}

	if (type & PERF_SAMPLE_CPU) {
		u.val32[0] = sample->cpu;
		if (swapped) {
			/*
			 * Inverse of what is done in perf_event__parse_sample
			 */
			u.val32[0] = bswap_32(u.val32[0]);
			u.val64 = bswap_64(u.val64);
		}
		*array = u.val64;
		array++;
	}

	if (type & PERF_SAMPLE_PERIOD) {
		*array = sample->period;
		array++;
	}

	return 0;
}