evsel.c 73.7 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
 *
 * Parts came from builtin-{top,stat,record}.c, see those files for further
 * copyright notes.
 *
 * Released under the GPL v2. (and only v2, not any later version)
 */

10
#include <byteswap.h>
11
#include <errno.h>
12
#include <inttypes.h>
13
#include <linux/bitops.h>
14
#include <api/fs/fs.h>
15
#include <api/fs/tracing_path.h>
16 17 18
#include <traceevent/event-parse.h>
#include <linux/hw_breakpoint.h>
#include <linux/perf_event.h>
19
#include <linux/compiler.h>
20
#include <linux/err.h>
21
#include <sys/ioctl.h>
22
#include <sys/resource.h>
23 24
#include <sys/types.h>
#include <dirent.h>
25
#include "asm/bug.h"
26
#include "callchain.h"
27
#include "cgroup.h"
28
#include "event.h"
29
#include "evsel.h"
30
#include "evlist.h"
31
#include "util.h"
32
#include "cpumap.h"
33
#include "thread_map.h"
34
#include "target.h"
35
#include "perf_regs.h"
A
Adrian Hunter 已提交
36
#include "debug.h"
37
#include "trace-event.h"
38
#include "stat.h"
39
#include "memswap.h"
40
#include "util/parse-branch-options.h"
41

42 43
#include "sane_ctype.h"

44
struct perf_missing_features perf_missing_features;
45

46 47
static clockid_t clockid;

A
Arnaldo Carvalho de Melo 已提交
48 49 50 51 52
static int perf_evsel__no_extra_init(struct perf_evsel *evsel __maybe_unused)
{
	return 0;
}

53 54
void __weak test_attr__ready(void) { }

A
Arnaldo Carvalho de Melo 已提交
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
static void perf_evsel__no_extra_fini(struct perf_evsel *evsel __maybe_unused)
{
}

static struct {
	size_t	size;
	int	(*init)(struct perf_evsel *evsel);
	void	(*fini)(struct perf_evsel *evsel);
} perf_evsel__object = {
	.size = sizeof(struct perf_evsel),
	.init = perf_evsel__no_extra_init,
	.fini = perf_evsel__no_extra_fini,
};

int perf_evsel__object_config(size_t object_size,
			      int (*init)(struct perf_evsel *evsel),
			      void (*fini)(struct perf_evsel *evsel))
{

	if (object_size == 0)
		goto set_methods;

	if (perf_evsel__object.size > object_size)
		return -EINVAL;

	perf_evsel__object.size = object_size;

set_methods:
	if (init != NULL)
		perf_evsel__object.init = init;

	if (fini != NULL)
		perf_evsel__object.fini = fini;

	return 0;
}

92 93
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))

94
int __perf_evsel__sample_size(u64 sample_type)
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
{
	u64 mask = sample_type & PERF_SAMPLE_MASK;
	int size = 0;
	int i;

	for (i = 0; i < 64; i++) {
		if (mask & (1ULL << i))
			size++;
	}

	size *= sizeof(u64);

	return size;
}

110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
/**
 * __perf_evsel__calc_id_pos - calculate id_pos.
 * @sample_type: sample type
 *
 * This function returns the position of the event id (PERF_SAMPLE_ID or
 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
 * sample_event.
 */
static int __perf_evsel__calc_id_pos(u64 sample_type)
{
	int idx = 0;

	if (sample_type & PERF_SAMPLE_IDENTIFIER)
		return 0;

	if (!(sample_type & PERF_SAMPLE_ID))
		return -1;

	if (sample_type & PERF_SAMPLE_IP)
		idx += 1;

	if (sample_type & PERF_SAMPLE_TID)
		idx += 1;

	if (sample_type & PERF_SAMPLE_TIME)
		idx += 1;

	if (sample_type & PERF_SAMPLE_ADDR)
		idx += 1;

	return idx;
}

/**
 * __perf_evsel__calc_is_pos - calculate is_pos.
 * @sample_type: sample type
 *
 * This function returns the position (counting backwards) of the event id
 * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
 * sample_id_all is used there is an id sample appended to non-sample events.
 */
static int __perf_evsel__calc_is_pos(u64 sample_type)
{
	int idx = 1;

	if (sample_type & PERF_SAMPLE_IDENTIFIER)
		return 1;

	if (!(sample_type & PERF_SAMPLE_ID))
		return -1;

	if (sample_type & PERF_SAMPLE_CPU)
		idx += 1;

	if (sample_type & PERF_SAMPLE_STREAM_ID)
		idx += 1;

	return idx;
}

void perf_evsel__calc_id_pos(struct perf_evsel *evsel)
{
	evsel->id_pos = __perf_evsel__calc_id_pos(evsel->attr.sample_type);
	evsel->is_pos = __perf_evsel__calc_is_pos(evsel->attr.sample_type);
}

176 177 178 179 180 181
void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
				  enum perf_event_sample_format bit)
{
	if (!(evsel->attr.sample_type & bit)) {
		evsel->attr.sample_type |= bit;
		evsel->sample_size += sizeof(u64);
182
		perf_evsel__calc_id_pos(evsel);
183 184 185 186 187 188 189 190 191
	}
}

void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
				    enum perf_event_sample_format bit)
{
	if (evsel->attr.sample_type & bit) {
		evsel->attr.sample_type &= ~bit;
		evsel->sample_size -= sizeof(u64);
192
		perf_evsel__calc_id_pos(evsel);
193 194 195
	}
}

196 197
void perf_evsel__set_sample_id(struct perf_evsel *evsel,
			       bool can_sample_identifier)
198
{
199 200 201 202 203 204
	if (can_sample_identifier) {
		perf_evsel__reset_sample_bit(evsel, ID);
		perf_evsel__set_sample_bit(evsel, IDENTIFIER);
	} else {
		perf_evsel__set_sample_bit(evsel, ID);
	}
205 206 207
	evsel->attr.read_format |= PERF_FORMAT_ID;
}

208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
/**
 * perf_evsel__is_function_event - Return whether given evsel is a function
 * trace event
 *
 * @evsel - evsel selector to be tested
 *
 * Return %true if event is function trace event
 */
bool perf_evsel__is_function_event(struct perf_evsel *evsel)
{
#define FUNCTION_EVENT "ftrace:function"

	return evsel->name &&
	       !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT));

#undef FUNCTION_EVENT
}

226 227 228 229
void perf_evsel__init(struct perf_evsel *evsel,
		      struct perf_event_attr *attr, int idx)
{
	evsel->idx	   = idx;
230
	evsel->tracking	   = !idx;
231
	evsel->attr	   = *attr;
232
	evsel->leader	   = evsel;
233 234
	evsel->unit	   = "";
	evsel->scale	   = 1.0;
235
	evsel->max_events  = ULONG_MAX;
236
	evsel->evlist	   = NULL;
237
	evsel->bpf_fd	   = -1;
238
	INIT_LIST_HEAD(&evsel->node);
239
	INIT_LIST_HEAD(&evsel->config_terms);
A
Arnaldo Carvalho de Melo 已提交
240
	perf_evsel__object.init(evsel);
241
	evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
242
	perf_evsel__calc_id_pos(evsel);
243
	evsel->cmdline_group_boundary = false;
244
	evsel->metric_expr   = NULL;
245
	evsel->metric_name   = NULL;
246 247
	evsel->metric_events = NULL;
	evsel->collect_stat  = false;
248
	evsel->pmu_name      = NULL;
249 250
}

251
struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
252
{
A
Arnaldo Carvalho de Melo 已提交
253
	struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
254

255 256 257
	if (!evsel)
		return NULL;
	perf_evsel__init(evsel, attr, idx);
258

259
	if (perf_evsel__is_bpf_output(evsel)) {
260 261
		evsel->attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
					    PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
262 263 264
		evsel->attr.sample_period = 1;
	}

265 266 267 268 269 270 271 272 273 274 275
	if (perf_evsel__is_clock(evsel)) {
		/*
		 * The evsel->unit points to static alias->unit
		 * so it's ok to use static string in here.
		 */
		static const char *unit = "msec";

		evsel->unit = unit;
		evsel->scale = 1e-6;
	}

276 277 278
	return evsel;
}

279 280 281 282 283
static bool perf_event_can_profile_kernel(void)
{
	return geteuid() == 0 || perf_event_paranoid() == -1;
}

284
struct perf_evsel *perf_evsel__new_cycles(bool precise)
285 286 287 288
{
	struct perf_event_attr attr = {
		.type	= PERF_TYPE_HARDWARE,
		.config	= PERF_COUNT_HW_CPU_CYCLES,
289
		.exclude_kernel	= !perf_event_can_profile_kernel(),
290 291 292 293
	};
	struct perf_evsel *evsel;

	event_attr_init(&attr);
294 295 296

	if (!precise)
		goto new_event;
297 298 299 300 301 302 303
	/*
	 * Unnamed union member, not supported as struct member named
	 * initializer in older compilers such as gcc 4.4.7
	 *
	 * Just for probing the precise_ip:
	 */
	attr.sample_period = 1;
304 305

	perf_event_attr__set_max_precise_ip(&attr);
306 307 308 309 310
	/*
	 * Now let the usual logic to set up the perf_event_attr defaults
	 * to kick in when we return and before perf_evsel__open() is called.
	 */
	attr.sample_period = 0;
311
new_event:
312 313 314 315 316
	evsel = perf_evsel__new(&attr);
	if (evsel == NULL)
		goto out;

	/* use asprintf() because free(evsel) assumes name is allocated */
317 318 319 320
	if (asprintf(&evsel->name, "cycles%s%s%.*s",
		     (attr.precise_ip || attr.exclude_kernel) ? ":" : "",
		     attr.exclude_kernel ? "u" : "",
		     attr.precise_ip ? attr.precise_ip + 1 : 0, "ppp") < 0)
321 322 323 324 325 326 327 328 329
		goto error_free;
out:
	return evsel;
error_free:
	perf_evsel__delete(evsel);
	evsel = NULL;
	goto out;
}

330 331 332
/*
 * Returns pointer with encoded error via <linux/err.h> interface.
 */
333
struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
334
{
A
Arnaldo Carvalho de Melo 已提交
335
	struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
336
	int err = -ENOMEM;
337

338 339 340
	if (evsel == NULL) {
		goto out_err;
	} else {
341
		struct perf_event_attr attr = {
342 343 344
			.type	       = PERF_TYPE_TRACEPOINT,
			.sample_type   = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
					  PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
345 346
		};

347 348 349
		if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
			goto out_free;

350
		evsel->tp_format = trace_event__tp_format(sys, name);
351 352
		if (IS_ERR(evsel->tp_format)) {
			err = PTR_ERR(evsel->tp_format);
353
			goto out_free;
354
		}
355

356
		event_attr_init(&attr);
357
		attr.config = evsel->tp_format->id;
358
		attr.sample_period = 1;
359 360 361 362 363 364
		perf_evsel__init(evsel, &attr, idx);
	}

	return evsel;

out_free:
365
	zfree(&evsel->name);
366
	free(evsel);
367 368
out_err:
	return ERR_PTR(err);
369 370
}

371
const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
372 373 374 375 376 377 378 379 380 381 382 383
	"cycles",
	"instructions",
	"cache-references",
	"cache-misses",
	"branches",
	"branch-misses",
	"bus-cycles",
	"stalled-cycles-frontend",
	"stalled-cycles-backend",
	"ref-cycles",
};

384
static const char *__perf_evsel__hw_name(u64 config)
385 386 387 388 389 390 391
{
	if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
		return perf_evsel__hw_names[config];

	return "unknown-hardware";
}

392
static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
393
{
394
	int colon = 0, r = 0;
395 396 397 398 399
	struct perf_event_attr *attr = &evsel->attr;
	bool exclude_guest_default = false;

#define MOD_PRINT(context, mod)	do {					\
		if (!attr->exclude_##context) {				\
400
			if (!colon) colon = ++r;			\
401 402 403 404 405 406 407 408 409 410 411 412
			r += scnprintf(bf + r, size - r, "%c", mod);	\
		} } while(0)

	if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
		MOD_PRINT(kernel, 'k');
		MOD_PRINT(user, 'u');
		MOD_PRINT(hv, 'h');
		exclude_guest_default = true;
	}

	if (attr->precise_ip) {
		if (!colon)
413
			colon = ++r;
414 415 416 417 418 419 420 421 422 423
		r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
		exclude_guest_default = true;
	}

	if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
		MOD_PRINT(host, 'H');
		MOD_PRINT(guest, 'G');
	}
#undef MOD_PRINT
	if (colon)
424
		bf[colon - 1] = ':';
425 426 427
	return r;
}

428 429 430 431 432 433
static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
{
	int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
}

434
const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
435 436 437 438
	"cpu-clock",
	"task-clock",
	"page-faults",
	"context-switches",
439
	"cpu-migrations",
440 441 442 443
	"minor-faults",
	"major-faults",
	"alignment-faults",
	"emulation-faults",
444
	"dummy",
445 446
};

447
static const char *__perf_evsel__sw_name(u64 config)
448 449 450 451 452 453 454 455 456 457 458 459
{
	if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
		return perf_evsel__sw_names[config];
	return "unknown-software";
}

static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
{
	int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
}

460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
{
	int r;

	r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);

	if (type & HW_BREAKPOINT_R)
		r += scnprintf(bf + r, size - r, "r");

	if (type & HW_BREAKPOINT_W)
		r += scnprintf(bf + r, size - r, "w");

	if (type & HW_BREAKPOINT_X)
		r += scnprintf(bf + r, size - r, "x");

	return r;
}

static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
{
	struct perf_event_attr *attr = &evsel->attr;
	int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
}

485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550
const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
				[PERF_EVSEL__MAX_ALIASES] = {
 { "L1-dcache",	"l1-d",		"l1d",		"L1-data",		},
 { "L1-icache",	"l1-i",		"l1i",		"L1-instruction",	},
 { "LLC",	"L2",							},
 { "dTLB",	"d-tlb",	"Data-TLB",				},
 { "iTLB",	"i-tlb",	"Instruction-TLB",			},
 { "branch",	"branches",	"bpu",		"btb",		"bpc",	},
 { "node",								},
};

const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
				   [PERF_EVSEL__MAX_ALIASES] = {
 { "load",	"loads",	"read",					},
 { "store",	"stores",	"write",				},
 { "prefetch",	"prefetches",	"speculative-read", "speculative-load",	},
};

const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
				       [PERF_EVSEL__MAX_ALIASES] = {
 { "refs",	"Reference",	"ops",		"access",		},
 { "misses",	"miss",							},
};

#define C(x)		PERF_COUNT_HW_CACHE_##x
#define CACHE_READ	(1 << C(OP_READ))
#define CACHE_WRITE	(1 << C(OP_WRITE))
#define CACHE_PREFETCH	(1 << C(OP_PREFETCH))
#define COP(x)		(1 << x)

/*
 * cache operartion stat
 * L1I : Read and prefetch only
 * ITLB and BPU : Read-only
 */
static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
 [C(L1D)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
 [C(L1I)]	= (CACHE_READ | CACHE_PREFETCH),
 [C(LL)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
 [C(DTLB)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
 [C(ITLB)]	= (CACHE_READ),
 [C(BPU)]	= (CACHE_READ),
 [C(NODE)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
};

bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
{
	if (perf_evsel__hw_cache_stat[type] & COP(op))
		return true;	/* valid */
	else
		return false;	/* invalid */
}

int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
					    char *bf, size_t size)
{
	if (result) {
		return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
				 perf_evsel__hw_cache_op[op][0],
				 perf_evsel__hw_cache_result[result][0]);
	}

	return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
			 perf_evsel__hw_cache_op[op][1]);
}

551
static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
552 553 554 555
{
	u8 op, result, type = (config >>  0) & 0xff;
	const char *err = "unknown-ext-hardware-cache-type";

556
	if (type >= PERF_COUNT_HW_CACHE_MAX)
557 558 559 560
		goto out_err;

	op = (config >>  8) & 0xff;
	err = "unknown-ext-hardware-cache-op";
561
	if (op >= PERF_COUNT_HW_CACHE_OP_MAX)
562 563 564 565
		goto out_err;

	result = (config >> 16) & 0xff;
	err = "unknown-ext-hardware-cache-result";
566
	if (result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583
		goto out_err;

	err = "invalid-cache";
	if (!perf_evsel__is_cache_op_valid(type, op))
		goto out_err;

	return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
out_err:
	return scnprintf(bf, size, "%s", err);
}

static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
{
	int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
	return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
}

584 585 586 587 588 589
static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
{
	int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
	return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
}

590
const char *perf_evsel__name(struct perf_evsel *evsel)
591
{
592
	char bf[128];
593

594 595
	if (evsel->name)
		return evsel->name;
596 597 598

	switch (evsel->attr.type) {
	case PERF_TYPE_RAW:
599
		perf_evsel__raw_name(evsel, bf, sizeof(bf));
600 601 602
		break;

	case PERF_TYPE_HARDWARE:
603
		perf_evsel__hw_name(evsel, bf, sizeof(bf));
604
		break;
605 606

	case PERF_TYPE_HW_CACHE:
607
		perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
608 609
		break;

610
	case PERF_TYPE_SOFTWARE:
611
		perf_evsel__sw_name(evsel, bf, sizeof(bf));
612 613
		break;

614
	case PERF_TYPE_TRACEPOINT:
615
		scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
616 617
		break;

618 619 620 621
	case PERF_TYPE_BREAKPOINT:
		perf_evsel__bp_name(evsel, bf, sizeof(bf));
		break;

622
	default:
623 624
		scnprintf(bf, sizeof(bf), "unknown attr type: %d",
			  evsel->attr.type);
625
		break;
626 627
	}

628 629 630
	evsel->name = strdup(bf);

	return evsel->name ?: "unknown";
631 632
}

633 634 635 636 637
const char *perf_evsel__group_name(struct perf_evsel *evsel)
{
	return evsel->group_name ?: "anon group";
}

638 639 640 641 642 643 644 645 646 647
/*
 * Returns the group details for the specified leader,
 * with following rules.
 *
 *  For record -e '{cycles,instructions}'
 *    'anon group { cycles:u, instructions:u }'
 *
 *  For record -e 'cycles,instructions' and report --group
 *    'cycles:u, instructions:u'
 */
648 649
int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
{
650
	int ret = 0;
651 652 653
	struct perf_evsel *pos;
	const char *group_name = perf_evsel__group_name(evsel);

654 655
	if (!evsel->forced_leader)
		ret = scnprintf(buf, size, "%s { ", group_name);
656

657
	ret += scnprintf(buf + ret, size - ret, "%s",
658 659 660 661 662 663
			 perf_evsel__name(evsel));

	for_each_group_member(pos, evsel)
		ret += scnprintf(buf + ret, size - ret, ", %s",
				 perf_evsel__name(pos));

664 665
	if (!evsel->forced_leader)
		ret += scnprintf(buf + ret, size - ret, " }");
666 667 668 669

	return ret;
}

670 671 672
static void __perf_evsel__config_callchain(struct perf_evsel *evsel,
					   struct record_opts *opts,
					   struct callchain_param *param)
673 674 675 676 677 678
{
	bool function = perf_evsel__is_function_event(evsel);
	struct perf_event_attr *attr = &evsel->attr;

	perf_evsel__set_sample_bit(evsel, CALLCHAIN);

679 680
	attr->sample_max_stack = param->max_stack;

681
	if (param->record_mode == CALLCHAIN_LBR) {
682 683 684 685 686 687 688 689
		if (!opts->branch_stack) {
			if (attr->exclude_user) {
				pr_warning("LBR callstack option is only available "
					   "to get user callchain information. "
					   "Falling back to framepointers.\n");
			} else {
				perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
				attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER |
690 691 692
							PERF_SAMPLE_BRANCH_CALL_STACK |
							PERF_SAMPLE_BRANCH_NO_CYCLES |
							PERF_SAMPLE_BRANCH_NO_FLAGS;
693 694 695 696 697 698
			}
		} else
			 pr_warning("Cannot use LBR callstack with branch stack. "
				    "Falling back to framepointers.\n");
	}

699
	if (param->record_mode == CALLCHAIN_DWARF) {
700 701 702
		if (!function) {
			perf_evsel__set_sample_bit(evsel, REGS_USER);
			perf_evsel__set_sample_bit(evsel, STACK_USER);
703
			attr->sample_regs_user |= PERF_REGS_MASK;
704
			attr->sample_stack_user = param->dump_size;
705 706 707 708 709 710 711 712 713 714 715 716 717
			attr->exclude_callchain_user = 1;
		} else {
			pr_info("Cannot use DWARF unwind for function trace event,"
				" falling back to framepointers.\n");
		}
	}

	if (function) {
		pr_info("Disabling user space callchains for function trace event.\n");
		attr->exclude_callchain_user = 1;
	}
}

718 719 720 721 722 723 724 725
void perf_evsel__config_callchain(struct perf_evsel *evsel,
				  struct record_opts *opts,
				  struct callchain_param *param)
{
	if (param->enabled)
		return __perf_evsel__config_callchain(evsel, opts, param);
}

726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744
static void
perf_evsel__reset_callgraph(struct perf_evsel *evsel,
			    struct callchain_param *param)
{
	struct perf_event_attr *attr = &evsel->attr;

	perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
	if (param->record_mode == CALLCHAIN_LBR) {
		perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
		attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER |
					      PERF_SAMPLE_BRANCH_CALL_STACK);
	}
	if (param->record_mode == CALLCHAIN_DWARF) {
		perf_evsel__reset_sample_bit(evsel, REGS_USER);
		perf_evsel__reset_sample_bit(evsel, STACK_USER);
	}
}

static void apply_config_terms(struct perf_evsel *evsel,
745
			       struct record_opts *opts, bool track)
746 747
{
	struct perf_evsel_config_term *term;
K
Kan Liang 已提交
748 749
	struct list_head *config_terms = &evsel->config_terms;
	struct perf_event_attr *attr = &evsel->attr;
750 751 752 753
	/* callgraph default */
	struct callchain_param param = {
		.record_mode = callchain_param.record_mode,
	};
754
	u32 dump_size = 0;
755 756
	int max_stack = 0;
	const char *callgraph_buf = NULL;
757

758 759
	list_for_each_entry(term, config_terms, list) {
		switch (term->type) {
760
		case PERF_EVSEL__CONFIG_TERM_PERIOD:
761 762 763
			if (!(term->weak && opts->user_interval != ULLONG_MAX)) {
				attr->sample_period = term->val.period;
				attr->freq = 0;
764
				perf_evsel__reset_sample_bit(evsel, PERIOD);
765
			}
K
Kan Liang 已提交
766
			break;
767
		case PERF_EVSEL__CONFIG_TERM_FREQ:
768 769 770
			if (!(term->weak && opts->user_freq != UINT_MAX)) {
				attr->sample_freq = term->val.freq;
				attr->freq = 1;
771
				perf_evsel__set_sample_bit(evsel, PERIOD);
772
			}
773
			break;
K
Kan Liang 已提交
774 775 776 777 778 779
		case PERF_EVSEL__CONFIG_TERM_TIME:
			if (term->val.time)
				perf_evsel__set_sample_bit(evsel, TIME);
			else
				perf_evsel__reset_sample_bit(evsel, TIME);
			break;
780 781 782
		case PERF_EVSEL__CONFIG_TERM_CALLGRAPH:
			callgraph_buf = term->val.callgraph;
			break;
783 784 785 786 787 788 789 790
		case PERF_EVSEL__CONFIG_TERM_BRANCH:
			if (term->val.branch && strcmp(term->val.branch, "no")) {
				perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
				parse_branch_str(term->val.branch,
						 &attr->branch_sample_type);
			} else
				perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
			break;
791 792 793
		case PERF_EVSEL__CONFIG_TERM_STACK_USER:
			dump_size = term->val.stack_user;
			break;
794 795 796
		case PERF_EVSEL__CONFIG_TERM_MAX_STACK:
			max_stack = term->val.max_stack;
			break;
797 798 799
		case PERF_EVSEL__CONFIG_TERM_MAX_EVENTS:
			evsel->max_events = term->val.max_events;
			break;
800 801 802 803 804 805 806 807 808
		case PERF_EVSEL__CONFIG_TERM_INHERIT:
			/*
			 * attr->inherit should has already been set by
			 * perf_evsel__config. If user explicitly set
			 * inherit using config terms, override global
			 * opt->no_inherit setting.
			 */
			attr->inherit = term->val.inherit ? 1 : 0;
			break;
W
Wang Nan 已提交
809 810 811
		case PERF_EVSEL__CONFIG_TERM_OVERWRITE:
			attr->write_backward = term->val.overwrite ? 1 : 0;
			break;
812
		case PERF_EVSEL__CONFIG_TERM_DRV_CFG:
813
			break;
814 815 816 817
		default:
			break;
		}
	}
818 819

	/* User explicitly set per-event callgraph, clear the old setting and reset. */
820
	if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) {
821 822
		bool sample_address = false;

823 824 825 826 827
		if (max_stack) {
			param.max_stack = max_stack;
			if (callgraph_buf == NULL)
				callgraph_buf = "fp";
		}
828 829 830

		/* parse callgraph parameters */
		if (callgraph_buf != NULL) {
831 832 833 834 835 836 837 838 839 840 841
			if (!strcmp(callgraph_buf, "no")) {
				param.enabled = false;
				param.record_mode = CALLCHAIN_NONE;
			} else {
				param.enabled = true;
				if (parse_callchain_record(callgraph_buf, &param)) {
					pr_err("per-event callgraph setting for %s failed. "
					       "Apply callgraph global setting for it\n",
					       evsel->name);
					return;
				}
842 843
				if (param.record_mode == CALLCHAIN_DWARF)
					sample_address = true;
844 845 846 847 848 849 850 851 852 853 854 855
			}
		}
		if (dump_size > 0) {
			dump_size = round_up(dump_size, sizeof(u64));
			param.dump_size = dump_size;
		}

		/* If global callgraph set, clear it */
		if (callchain_param.enabled)
			perf_evsel__reset_callgraph(evsel, &callchain_param);

		/* set perf-event callgraph */
856 857 858 859 860 861
		if (param.enabled) {
			if (sample_address) {
				perf_evsel__set_sample_bit(evsel, ADDR);
				perf_evsel__set_sample_bit(evsel, DATA_SRC);
				evsel->attr.mmap_data = track;
			}
862
			perf_evsel__config_callchain(evsel, opts, &param);
863
		}
864
	}
865 866
}

867 868 869 870 871 872
static bool is_dummy_event(struct perf_evsel *evsel)
{
	return (evsel->attr.type == PERF_TYPE_SOFTWARE) &&
	       (evsel->attr.config == PERF_COUNT_SW_DUMMY);
}

873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900
/*
 * The enable_on_exec/disabled value strategy:
 *
 *  1) For any type of traced program:
 *    - all independent events and group leaders are disabled
 *    - all group members are enabled
 *
 *     Group members are ruled by group leaders. They need to
 *     be enabled, because the group scheduling relies on that.
 *
 *  2) For traced programs executed by perf:
 *     - all independent events and group leaders have
 *       enable_on_exec set
 *     - we don't specifically enable or disable any event during
 *       the record command
 *
 *     Independent events and group leaders are initially disabled
 *     and get enabled by exec. Group members are ruled by group
 *     leaders as stated in 1).
 *
 *  3) For traced programs attached by perf (pid/tid):
 *     - we specifically enable or disable all events during
 *       the record command
 *
 *     When attaching events to already running traced we
 *     enable/disable events specifically, as there's no
 *     initial traced exec call.
 */
901 902
void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
			struct callchain_param *callchain)
903
{
904
	struct perf_evsel *leader = evsel->leader;
905
	struct perf_event_attr *attr = &evsel->attr;
906
	int track = evsel->tracking;
907
	bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
908

909
	attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
910
	attr->inherit	    = !opts->no_inherit;
W
Wang Nan 已提交
911
	attr->write_backward = opts->overwrite ? 1 : 0;
912

913 914
	perf_evsel__set_sample_bit(evsel, IP);
	perf_evsel__set_sample_bit(evsel, TID);
915

916 917 918 919 920 921 922
	if (evsel->sample_read) {
		perf_evsel__set_sample_bit(evsel, READ);

		/*
		 * We need ID even in case of single event, because
		 * PERF_SAMPLE_READ process ID specific data.
		 */
923
		perf_evsel__set_sample_id(evsel, false);
924 925 926 927 928 929 930 931 932 933 934

		/*
		 * Apply group format only if we belong to group
		 * with more than one members.
		 */
		if (leader->nr_members > 1) {
			attr->read_format |= PERF_FORMAT_GROUP;
			attr->inherit = 0;
		}
	}

935
	/*
936
	 * We default some events to have a default interval. But keep
937 938
	 * it a weak assumption overridable by the user.
	 */
939
	if (!attr->sample_period || (opts->user_freq != UINT_MAX ||
940 941
				     opts->user_interval != ULLONG_MAX)) {
		if (opts->freq) {
942
			perf_evsel__set_sample_bit(evsel, PERIOD);
943 944 945 946 947 948 949
			attr->freq		= 1;
			attr->sample_freq	= opts->freq;
		} else {
			attr->sample_period = opts->default_interval;
		}
	}

950 951 952 953 954
	/*
	 * Disable sampling for all group members other
	 * than leader in case leader 'leads' the sampling.
	 */
	if ((leader != evsel) && leader->sample_read) {
955 956 957 958
		attr->freq           = 0;
		attr->sample_freq    = 0;
		attr->sample_period  = 0;
		attr->write_backward = 0;
959 960
	}

961 962 963
	if (opts->no_samples)
		attr->sample_freq = 0;

964 965 966 967 968
	if (opts->inherit_stat) {
		evsel->attr.read_format |=
			PERF_FORMAT_TOTAL_TIME_ENABLED |
			PERF_FORMAT_TOTAL_TIME_RUNNING |
			PERF_FORMAT_ID;
969
		attr->inherit_stat = 1;
970
	}
971 972

	if (opts->sample_address) {
973
		perf_evsel__set_sample_bit(evsel, ADDR);
974 975 976
		attr->mmap_data = track;
	}

977 978 979 980 981 982 983 984
	/*
	 * We don't allow user space callchains for  function trace
	 * event, due to issues with page faults while tracing page
	 * fault handler and its overall trickiness nature.
	 */
	if (perf_evsel__is_function_event(evsel))
		evsel->attr.exclude_callchain_user = 1;

985
	if (callchain && callchain->enabled && !evsel->no_aux_samples)
986
		perf_evsel__config_callchain(evsel, opts, callchain);
987

988
	if (opts->sample_intr_regs) {
989
		attr->sample_regs_intr = opts->sample_intr_regs;
990 991 992
		perf_evsel__set_sample_bit(evsel, REGS_INTR);
	}

993 994 995 996 997
	if (opts->sample_user_regs) {
		attr->sample_regs_user |= opts->sample_user_regs;
		perf_evsel__set_sample_bit(evsel, REGS_USER);
	}

J
Jiri Olsa 已提交
998
	if (target__has_cpu(&opts->target) || opts->sample_cpu)
999
		perf_evsel__set_sample_bit(evsel, CPU);
1000

1001
	/*
1002
	 * When the user explicitly disabled time don't force it here.
1003 1004 1005
	 */
	if (opts->sample_time &&
	    (!perf_missing_features.sample_id_all &&
1006 1007
	    (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu ||
	     opts->sample_time_set)))
1008
		perf_evsel__set_sample_bit(evsel, TIME);
1009

1010
	if (opts->raw_samples && !evsel->no_aux_samples) {
1011 1012 1013
		perf_evsel__set_sample_bit(evsel, TIME);
		perf_evsel__set_sample_bit(evsel, RAW);
		perf_evsel__set_sample_bit(evsel, CPU);
1014 1015
	}

1016
	if (opts->sample_address)
1017
		perf_evsel__set_sample_bit(evsel, DATA_SRC);
1018

1019 1020 1021
	if (opts->sample_phys_addr)
		perf_evsel__set_sample_bit(evsel, PHYS_ADDR);

1022
	if (opts->no_buffering) {
1023 1024 1025
		attr->watermark = 0;
		attr->wakeup_events = 1;
	}
1026
	if (opts->branch_stack && !evsel->no_aux_samples) {
1027
		perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
1028 1029
		attr->branch_sample_type = opts->branch_stack;
	}
1030

1031
	if (opts->sample_weight)
1032
		perf_evsel__set_sample_bit(evsel, WEIGHT);
1033

1034
	attr->task  = track;
1035
	attr->mmap  = track;
1036
	attr->mmap2 = track && !perf_missing_features.mmap2;
1037
	attr->comm  = track;
1038
	attr->ksymbol = track && !perf_missing_features.ksymbol;
1039 1040
	attr->bpf_event = track && opts->bpf_event &&
		!perf_missing_features.bpf_event;
1041

1042 1043 1044
	if (opts->record_namespaces)
		attr->namespaces  = track;

1045 1046 1047
	if (opts->record_switch_events)
		attr->context_switch = track;

1048
	if (opts->sample_transaction)
1049
		perf_evsel__set_sample_bit(evsel, TRANSACTION);
1050

1051 1052 1053 1054 1055 1056
	if (opts->running_time) {
		evsel->attr.read_format |=
			PERF_FORMAT_TOTAL_TIME_ENABLED |
			PERF_FORMAT_TOTAL_TIME_RUNNING;
	}

1057 1058 1059 1060 1061 1062
	/*
	 * XXX see the function comment above
	 *
	 * Disabling only independent events or group leaders,
	 * keeping group members enabled.
	 */
1063
	if (perf_evsel__is_group_leader(evsel))
1064 1065 1066 1067 1068 1069
		attr->disabled = 1;

	/*
	 * Setting enable_on_exec for independent events and
	 * group leaders for traced executed by perf.
	 */
1070 1071
	if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel) &&
		!opts->initial_delay)
1072
		attr->enable_on_exec = 1;
1073 1074 1075 1076 1077

	if (evsel->immediate) {
		attr->disabled = 0;
		attr->enable_on_exec = 0;
	}
1078 1079 1080 1081 1082 1083

	clockid = opts->clockid;
	if (opts->use_clockid) {
		attr->use_clockid = 1;
		attr->clockid = opts->clockid;
	}
1084

1085 1086 1087
	if (evsel->precise_max)
		perf_event_attr__set_max_precise_ip(attr);

1088 1089 1090 1091 1092 1093 1094 1095 1096 1097
	if (opts->all_user) {
		attr->exclude_kernel = 1;
		attr->exclude_user   = 0;
	}

	if (opts->all_kernel) {
		attr->exclude_kernel = 0;
		attr->exclude_user   = 1;
	}

1098
	if (evsel->own_cpus || evsel->unit)
1099 1100
		evsel->attr.read_format |= PERF_FORMAT_ID;

1101 1102 1103 1104
	/*
	 * Apply event specific term settings,
	 * it overloads any global configuration.
	 */
1105
	apply_config_terms(evsel, opts, track);
1106 1107

	evsel->ignore_missing_thread = opts->ignore_missing_thread;
1108 1109 1110 1111 1112 1113 1114 1115

	/* The --period option takes the precedence. */
	if (opts->period_set) {
		if (opts->period)
			perf_evsel__set_sample_bit(evsel, PERIOD);
		else
			perf_evsel__reset_sample_bit(evsel, PERIOD);
	}
1116 1117 1118 1119 1120 1121 1122 1123

	/*
	 * For initial_delay, a dummy event is added implicitly.
	 * The software event will trigger -EOPNOTSUPP error out,
	 * if BRANCH_STACK bit is set.
	 */
	if (opts->initial_delay && is_dummy_event(evsel))
		perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
1124 1125
}

1126
static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
1127
{
1128 1129 1130
	if (evsel->system_wide)
		nthreads = 1;

1131
	evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
1132 1133

	if (evsel->fd) {
1134
		int cpu, thread;
1135 1136 1137 1138 1139 1140 1141
		for (cpu = 0; cpu < ncpus; cpu++) {
			for (thread = 0; thread < nthreads; thread++) {
				FD(evsel, cpu, thread) = -1;
			}
		}
	}

1142 1143 1144
	return evsel->fd != NULL ? 0 : -ENOMEM;
}

1145
static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
1146
			  int ioc,  void *arg)
1147 1148 1149
{
	int cpu, thread;

1150 1151
	for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
		for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
1152
			int fd = FD(evsel, cpu, thread),
1153
			    err = ioctl(fd, ioc, arg);
1154 1155 1156 1157 1158 1159 1160 1161 1162

			if (err)
				return err;
		}
	}

	return 0;
}

1163
int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
1164
{
1165
	return perf_evsel__run_ioctl(evsel,
1166 1167 1168 1169
				     PERF_EVENT_IOC_SET_FILTER,
				     (void *)filter);
}

1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182
int perf_evsel__set_filter(struct perf_evsel *evsel, const char *filter)
{
	char *new_filter = strdup(filter);

	if (new_filter != NULL) {
		free(evsel->filter);
		evsel->filter = new_filter;
		return 0;
	}

	return -1;
}

1183 1184
static int perf_evsel__append_filter(struct perf_evsel *evsel,
				     const char *fmt, const char *filter)
1185 1186 1187 1188 1189 1190
{
	char *new_filter;

	if (evsel->filter == NULL)
		return perf_evsel__set_filter(evsel, filter);

1191
	if (asprintf(&new_filter, fmt, evsel->filter, filter) > 0) {
1192 1193 1194 1195 1196 1197 1198 1199
		free(evsel->filter);
		evsel->filter = new_filter;
		return 0;
	}

	return -1;
}

1200 1201 1202 1203 1204
int perf_evsel__append_tp_filter(struct perf_evsel *evsel, const char *filter)
{
	return perf_evsel__append_filter(evsel, "(%s) && (%s)", filter);
}

1205 1206 1207 1208 1209
int perf_evsel__append_addr_filter(struct perf_evsel *evsel, const char *filter)
{
	return perf_evsel__append_filter(evsel, "%s,%s", filter);
}

1210
int perf_evsel__enable(struct perf_evsel *evsel)
1211
{
1212 1213 1214 1215 1216 1217
	int err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, 0);

	if (!err)
		evsel->disabled = false;

	return err;
1218 1219
}

J
Jiri Olsa 已提交
1220 1221
int perf_evsel__disable(struct perf_evsel *evsel)
{
1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232
	int err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, 0);
	/*
	 * We mark it disabled here so that tools that disable a event can
	 * ignore events after they disable it. I.e. the ring buffer may have
	 * already a few more events queued up before the kernel got the stop
	 * request.
	 */
	if (!err)
		evsel->disabled = true;

	return err;
J
Jiri Olsa 已提交
1233 1234
}

1235 1236
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
{
1237 1238 1239
	if (ncpus == 0 || nthreads == 0)
		return 0;

1240 1241 1242
	if (evsel->system_wide)
		nthreads = 1;

1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254
	evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
	if (evsel->sample_id == NULL)
		return -ENOMEM;

	evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
	if (evsel->id == NULL) {
		xyarray__delete(evsel->sample_id);
		evsel->sample_id = NULL;
		return -ENOMEM;
	}

	return 0;
1255 1256
}

1257
static void perf_evsel__free_fd(struct perf_evsel *evsel)
1258 1259 1260 1261 1262
{
	xyarray__delete(evsel->fd);
	evsel->fd = NULL;
}

1263
static void perf_evsel__free_id(struct perf_evsel *evsel)
1264
{
1265 1266
	xyarray__delete(evsel->sample_id);
	evsel->sample_id = NULL;
1267
	zfree(&evsel->id);
1268 1269
}

1270 1271 1272 1273 1274 1275 1276 1277 1278 1279
static void perf_evsel__free_config_terms(struct perf_evsel *evsel)
{
	struct perf_evsel_config_term *term, *h;

	list_for_each_entry_safe(term, h, &evsel->config_terms, list) {
		list_del(&term->list);
		free(term);
	}
}

1280
void perf_evsel__close_fd(struct perf_evsel *evsel)
1281 1282 1283
{
	int cpu, thread;

1284 1285
	for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++)
		for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
1286 1287 1288 1289 1290
			close(FD(evsel, cpu, thread));
			FD(evsel, cpu, thread) = -1;
		}
}

1291
void perf_evsel__exit(struct perf_evsel *evsel)
1292 1293
{
	assert(list_empty(&evsel->node));
1294
	assert(evsel->evlist == NULL);
1295 1296
	perf_evsel__free_fd(evsel);
	perf_evsel__free_id(evsel);
1297
	perf_evsel__free_config_terms(evsel);
1298
	cgroup__put(evsel->cgrp);
1299
	cpu_map__put(evsel->cpus);
A
Adrian Hunter 已提交
1300
	cpu_map__put(evsel->own_cpus);
1301
	thread_map__put(evsel->threads);
1302 1303
	zfree(&evsel->group_name);
	zfree(&evsel->name);
A
Arnaldo Carvalho de Melo 已提交
1304
	perf_evsel__object.fini(evsel);
1305 1306 1307 1308 1309
}

void perf_evsel__delete(struct perf_evsel *evsel)
{
	perf_evsel__exit(evsel);
1310 1311
	free(evsel);
}
1312

1313
void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, int thread,
1314
				struct perf_counts_values *count)
1315 1316 1317 1318 1319 1320 1321 1322 1323 1324
{
	struct perf_counts_values tmp;

	if (!evsel->prev_raw_counts)
		return;

	if (cpu == -1) {
		tmp = evsel->prev_raw_counts->aggr;
		evsel->prev_raw_counts->aggr = *count;
	} else {
1325 1326
		tmp = *perf_counts(evsel->prev_raw_counts, cpu, thread);
		*perf_counts(evsel->prev_raw_counts, cpu, thread) = *count;
1327 1328 1329 1330 1331 1332 1333
	}

	count->val = count->val - tmp.val;
	count->ena = count->ena - tmp.ena;
	count->run = count->run - tmp.run;
}

1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353
void perf_counts_values__scale(struct perf_counts_values *count,
			       bool scale, s8 *pscaled)
{
	s8 scaled = 0;

	if (scale) {
		if (count->run == 0) {
			scaled = -1;
			count->val = 0;
		} else if (count->run < count->ena) {
			scaled = 1;
			count->val = (u64)((double) count->val * count->ena / count->run + 0.5);
		}
	} else
		count->ena = count->run = 0;

	if (pscaled)
		*pscaled = scaled;
}

1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
static int perf_evsel__read_size(struct perf_evsel *evsel)
{
	u64 read_format = evsel->attr.read_format;
	int entry = sizeof(u64); /* value */
	int size = 0;
	int nr = 1;

	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		size += sizeof(u64);

	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		size += sizeof(u64);

	if (read_format & PERF_FORMAT_ID)
		entry += sizeof(u64);

	if (read_format & PERF_FORMAT_GROUP) {
		nr = evsel->nr_members;
		size += sizeof(u64);
	}

	size += entry * nr;
	return size;
}

1379 1380 1381
int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
		     struct perf_counts_values *count)
{
1382 1383
	size_t size = perf_evsel__read_size(evsel);

1384 1385 1386 1387 1388
	memset(count, 0, sizeof(*count));

	if (FD(evsel, cpu, thread) < 0)
		return -EINVAL;

1389
	if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
1390 1391 1392 1393 1394
		return -errno;

	return 0;
}

J
Jiri Olsa 已提交
1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413
static int
perf_evsel__read_one(struct perf_evsel *evsel, int cpu, int thread)
{
	struct perf_counts_values *count = perf_counts(evsel->counts, cpu, thread);

	return perf_evsel__read(evsel, cpu, thread, count);
}

static void
perf_evsel__set_count(struct perf_evsel *counter, int cpu, int thread,
		      u64 val, u64 ena, u64 run)
{
	struct perf_counts_values *count;

	count = perf_counts(counter->counts, cpu, thread);

	count->val    = val;
	count->ena    = ena;
	count->run    = run;
1414
	count->loaded = true;
J
Jiri Olsa 已提交
1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457
}

static int
perf_evsel__process_group_data(struct perf_evsel *leader,
			       int cpu, int thread, u64 *data)
{
	u64 read_format = leader->attr.read_format;
	struct sample_read_value *v;
	u64 nr, ena = 0, run = 0, i;

	nr = *data++;

	if (nr != (u64) leader->nr_members)
		return -EINVAL;

	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		ena = *data++;

	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		run = *data++;

	v = (struct sample_read_value *) data;

	perf_evsel__set_count(leader, cpu, thread,
			      v[0].value, ena, run);

	for (i = 1; i < nr; i++) {
		struct perf_evsel *counter;

		counter = perf_evlist__id2evsel(leader->evlist, v[i].id);
		if (!counter)
			return -EINVAL;

		perf_evsel__set_count(counter, cpu, thread,
				      v[i].value, ena, run);
	}

	return 0;
}

static int
perf_evsel__read_group(struct perf_evsel *leader, int cpu, int thread)
{
1458
	struct perf_stat_evsel *ps = leader->stats;
J
Jiri Olsa 已提交
1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495
	u64 read_format = leader->attr.read_format;
	int size = perf_evsel__read_size(leader);
	u64 *data = ps->group_data;

	if (!(read_format & PERF_FORMAT_ID))
		return -EINVAL;

	if (!perf_evsel__is_group_leader(leader))
		return -EINVAL;

	if (!data) {
		data = zalloc(size);
		if (!data)
			return -ENOMEM;

		ps->group_data = data;
	}

	if (FD(leader, cpu, thread) < 0)
		return -EINVAL;

	if (readn(FD(leader, cpu, thread), data, size) <= 0)
		return -errno;

	return perf_evsel__process_group_data(leader, cpu, thread, data);
}

int perf_evsel__read_counter(struct perf_evsel *evsel, int cpu, int thread)
{
	u64 read_format = evsel->attr.read_format;

	if (read_format & PERF_FORMAT_GROUP)
		return perf_evsel__read_group(evsel, cpu, thread);
	else
		return perf_evsel__read_one(evsel, cpu, thread);
}

1496 1497 1498 1499 1500 1501 1502 1503 1504
int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
			      int cpu, int thread, bool scale)
{
	struct perf_counts_values count;
	size_t nv = scale ? 3 : 1;

	if (FD(evsel, cpu, thread) < 0)
		return -EINVAL;

1505
	if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0)
1506 1507
		return -ENOMEM;

1508
	if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) <= 0)
1509 1510
		return -errno;

1511
	perf_evsel__compute_deltas(evsel, cpu, thread, &count);
1512
	perf_counts_values__scale(&count, scale, NULL);
1513
	*perf_counts(evsel->counts, cpu, thread) = count;
1514 1515 1516
	return 0;
}

1517 1518 1519 1520 1521
static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
{
	struct perf_evsel *leader = evsel->leader;
	int fd;

1522
	if (perf_evsel__is_group_leader(evsel))
1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536
		return -1;

	/*
	 * Leader must be already processed/open,
	 * if not it's a bug.
	 */
	BUG_ON(!leader->fd);

	fd = FD(leader, cpu, thread);
	BUG_ON(fd == -1);

	return fd;
}

1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562
struct bit_names {
	int bit;
	const char *name;
};

static void __p_bits(char *buf, size_t size, u64 value, struct bit_names *bits)
{
	bool first_bit = true;
	int i = 0;

	do {
		if (value & bits[i].bit) {
			buf += scnprintf(buf, size, "%s%s", first_bit ? "" : "|", bits[i].name);
			first_bit = false;
		}
	} while (bits[++i].name != NULL);
}

static void __p_sample_type(char *buf, size_t size, u64 value)
{
#define bit_name(n) { PERF_SAMPLE_##n, #n }
	struct bit_names bits[] = {
		bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
		bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
		bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
		bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
1563
		bit_name(IDENTIFIER), bit_name(REGS_INTR), bit_name(DATA_SRC),
1564
		bit_name(WEIGHT), bit_name(PHYS_ADDR),
1565 1566 1567 1568 1569 1570
		{ .name = NULL, }
	};
#undef bit_name
	__p_bits(buf, size, value, bits);
}

1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585
static void __p_branch_sample_type(char *buf, size_t size, u64 value)
{
#define bit_name(n) { PERF_SAMPLE_BRANCH_##n, #n }
	struct bit_names bits[] = {
		bit_name(USER), bit_name(KERNEL), bit_name(HV), bit_name(ANY),
		bit_name(ANY_CALL), bit_name(ANY_RETURN), bit_name(IND_CALL),
		bit_name(ABORT_TX), bit_name(IN_TX), bit_name(NO_TX),
		bit_name(COND), bit_name(CALL_STACK), bit_name(IND_JUMP),
		bit_name(CALL), bit_name(NO_FLAGS), bit_name(NO_CYCLES),
		{ .name = NULL, }
	};
#undef bit_name
	__p_bits(buf, size, value, bits);
}

1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599
static void __p_read_format(char *buf, size_t size, u64 value)
{
#define bit_name(n) { PERF_FORMAT_##n, #n }
	struct bit_names bits[] = {
		bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
		bit_name(ID), bit_name(GROUP),
		{ .name = NULL, }
	};
#undef bit_name
	__p_bits(buf, size, value, bits);
}

#define BUF_SIZE		1024

1600
#define p_hex(val)		snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val))
1601 1602 1603
#define p_unsigned(val)		snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
#define p_signed(val)		snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
#define p_sample_type(val)	__p_sample_type(buf, BUF_SIZE, val)
1604
#define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val)
1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654
#define p_read_format(val)	__p_read_format(buf, BUF_SIZE, val)

#define PRINT_ATTRn(_n, _f, _p)				\
do {							\
	if (attr->_f) {					\
		_p(attr->_f);				\
		ret += attr__fprintf(fp, _n, buf, priv);\
	}						\
} while (0)

#define PRINT_ATTRf(_f, _p)	PRINT_ATTRn(#_f, _f, _p)

int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
			     attr__fprintf_f attr__fprintf, void *priv)
{
	char buf[BUF_SIZE];
	int ret = 0;

	PRINT_ATTRf(type, p_unsigned);
	PRINT_ATTRf(size, p_unsigned);
	PRINT_ATTRf(config, p_hex);
	PRINT_ATTRn("{ sample_period, sample_freq }", sample_period, p_unsigned);
	PRINT_ATTRf(sample_type, p_sample_type);
	PRINT_ATTRf(read_format, p_read_format);

	PRINT_ATTRf(disabled, p_unsigned);
	PRINT_ATTRf(inherit, p_unsigned);
	PRINT_ATTRf(pinned, p_unsigned);
	PRINT_ATTRf(exclusive, p_unsigned);
	PRINT_ATTRf(exclude_user, p_unsigned);
	PRINT_ATTRf(exclude_kernel, p_unsigned);
	PRINT_ATTRf(exclude_hv, p_unsigned);
	PRINT_ATTRf(exclude_idle, p_unsigned);
	PRINT_ATTRf(mmap, p_unsigned);
	PRINT_ATTRf(comm, p_unsigned);
	PRINT_ATTRf(freq, p_unsigned);
	PRINT_ATTRf(inherit_stat, p_unsigned);
	PRINT_ATTRf(enable_on_exec, p_unsigned);
	PRINT_ATTRf(task, p_unsigned);
	PRINT_ATTRf(watermark, p_unsigned);
	PRINT_ATTRf(precise_ip, p_unsigned);
	PRINT_ATTRf(mmap_data, p_unsigned);
	PRINT_ATTRf(sample_id_all, p_unsigned);
	PRINT_ATTRf(exclude_host, p_unsigned);
	PRINT_ATTRf(exclude_guest, p_unsigned);
	PRINT_ATTRf(exclude_callchain_kernel, p_unsigned);
	PRINT_ATTRf(exclude_callchain_user, p_unsigned);
	PRINT_ATTRf(mmap2, p_unsigned);
	PRINT_ATTRf(comm_exec, p_unsigned);
	PRINT_ATTRf(use_clockid, p_unsigned);
1655
	PRINT_ATTRf(context_switch, p_unsigned);
1656
	PRINT_ATTRf(write_backward, p_unsigned);
1657
	PRINT_ATTRf(namespaces, p_unsigned);
1658
	PRINT_ATTRf(ksymbol, p_unsigned);
1659
	PRINT_ATTRf(bpf_event, p_unsigned);
1660 1661 1662 1663 1664

	PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
	PRINT_ATTRf(bp_type, p_unsigned);
	PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex);
	PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex);
1665
	PRINT_ATTRf(branch_sample_type, p_branch_sample_type);
1666 1667 1668 1669
	PRINT_ATTRf(sample_regs_user, p_hex);
	PRINT_ATTRf(sample_stack_user, p_unsigned);
	PRINT_ATTRf(clockid, p_signed);
	PRINT_ATTRf(sample_regs_intr, p_hex);
1670
	PRINT_ATTRf(aux_watermark, p_unsigned);
1671
	PRINT_ATTRf(sample_max_stack, p_unsigned);
A
Adrian Hunter 已提交
1672 1673 1674 1675

	return ret;
}

1676
static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
1677
				void *priv __maybe_unused)
1678 1679 1680 1681
{
	return fprintf(fp, "  %-32s %s\n", name, val);
}

1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714
static void perf_evsel__remove_fd(struct perf_evsel *pos,
				  int nr_cpus, int nr_threads,
				  int thread_idx)
{
	for (int cpu = 0; cpu < nr_cpus; cpu++)
		for (int thread = thread_idx; thread < nr_threads - 1; thread++)
			FD(pos, cpu, thread) = FD(pos, cpu, thread + 1);
}

static int update_fds(struct perf_evsel *evsel,
		      int nr_cpus, int cpu_idx,
		      int nr_threads, int thread_idx)
{
	struct perf_evsel *pos;

	if (cpu_idx >= nr_cpus || thread_idx >= nr_threads)
		return -EINVAL;

	evlist__for_each_entry(evsel->evlist, pos) {
		nr_cpus = pos != evsel ? nr_cpus : cpu_idx;

		perf_evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx);

		/*
		 * Since fds for next evsel has not been created,
		 * there is no need to iterate whole event list.
		 */
		if (pos == evsel)
			break;
	}
	return 0;
}

1715
static bool ignore_missing_thread(struct perf_evsel *evsel,
1716
				  int nr_cpus, int cpu,
1717 1718 1719
				  struct thread_map *threads,
				  int thread, int err)
{
1720 1721
	pid_t ignore_pid = thread_map__pid(threads, thread);

1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736
	if (!evsel->ignore_missing_thread)
		return false;

	/* The system wide setup does not work with threads. */
	if (evsel->system_wide)
		return false;

	/* The -ESRCH is perf event syscall errno for pid's not found. */
	if (err != -ESRCH)
		return false;

	/* If there's only one thread, let it fail. */
	if (threads->nr == 1)
		return false;

1737 1738 1739 1740 1741 1742 1743
	/*
	 * We should remove fd for missing_thread first
	 * because thread_map__remove() will decrease threads->nr.
	 */
	if (update_fds(evsel, nr_cpus, cpu, threads->nr, thread))
		return false;

1744 1745 1746 1747
	if (thread_map__remove(threads, thread))
		return false;

	pr_warning("WARNING: Ignored open failure for pid %d\n",
1748
		   ignore_pid);
1749 1750 1751
	return true;
}

1752 1753
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
		     struct thread_map *threads)
1754
{
1755
	int cpu, thread, nthreads;
1756
	unsigned long flags = PERF_FLAG_FD_CLOEXEC;
1757
	int pid = -1, err;
1758
	enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
1759

1760 1761 1762
	if (perf_missing_features.write_backward && evsel->attr.write_backward)
		return -EINVAL;

1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786
	if (cpus == NULL) {
		static struct cpu_map *empty_cpu_map;

		if (empty_cpu_map == NULL) {
			empty_cpu_map = cpu_map__dummy_new();
			if (empty_cpu_map == NULL)
				return -ENOMEM;
		}

		cpus = empty_cpu_map;
	}

	if (threads == NULL) {
		static struct thread_map *empty_thread_map;

		if (empty_thread_map == NULL) {
			empty_thread_map = thread_map__new_by_tid(-1);
			if (empty_thread_map == NULL)
				return -ENOMEM;
		}

		threads = empty_thread_map;
	}

1787 1788 1789 1790 1791
	if (evsel->system_wide)
		nthreads = 1;
	else
		nthreads = threads->nr;

1792
	if (evsel->fd == NULL &&
1793
	    perf_evsel__alloc_fd(evsel, cpus->nr, nthreads) < 0)
1794
		return -ENOMEM;
1795

S
Stephane Eranian 已提交
1796
	if (evsel->cgrp) {
1797
		flags |= PERF_FLAG_PID_CGROUP;
S
Stephane Eranian 已提交
1798 1799 1800
		pid = evsel->cgrp->fd;
	}

1801
fallback_missing_features:
1802 1803 1804 1805 1806 1807
	if (perf_missing_features.clockid_wrong)
		evsel->attr.clockid = CLOCK_MONOTONIC; /* should always work */
	if (perf_missing_features.clockid) {
		evsel->attr.use_clockid = 0;
		evsel->attr.clockid = 0;
	}
1808 1809
	if (perf_missing_features.cloexec)
		flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC;
1810 1811
	if (perf_missing_features.mmap2)
		evsel->attr.mmap2 = 0;
1812 1813
	if (perf_missing_features.exclude_guest)
		evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
1814 1815 1816
	if (perf_missing_features.lbr_flags)
		evsel->attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS |
				     PERF_SAMPLE_BRANCH_NO_CYCLES);
1817 1818
	if (perf_missing_features.group_read && evsel->attr.inherit)
		evsel->attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID);
1819 1820
	if (perf_missing_features.ksymbol)
		evsel->attr.ksymbol = 0;
1821 1822
	if (perf_missing_features.bpf_event)
		evsel->attr.bpf_event = 0;
1823 1824 1825 1826
retry_sample_id:
	if (perf_missing_features.sample_id_all)
		evsel->attr.sample_id_all = 0;

1827 1828 1829 1830 1831 1832
	if (verbose >= 2) {
		fprintf(stderr, "%.60s\n", graph_dotted_line);
		fprintf(stderr, "perf_event_attr:\n");
		perf_event_attr__fprintf(stderr, &evsel->attr, __open_attr__fprintf, NULL);
		fprintf(stderr, "%.60s\n", graph_dotted_line);
	}
A
Adrian Hunter 已提交
1833

1834
	for (cpu = 0; cpu < cpus->nr; cpu++) {
1835

1836
		for (thread = 0; thread < nthreads; thread++) {
1837
			int fd, group_fd;
S
Stephane Eranian 已提交
1838

1839
			if (!evsel->cgrp && !evsel->system_wide)
1840
				pid = thread_map__pid(threads, thread);
S
Stephane Eranian 已提交
1841

1842
			group_fd = get_group_fd(evsel, cpu, thread);
1843
retry_open:
1844
			pr_debug2("sys_perf_event_open: pid %d  cpu %d  group_fd %d  flags %#lx",
A
Adrian Hunter 已提交
1845 1846
				  pid, cpus->map[cpu], group_fd, flags);

1847 1848
			test_attr__ready();

1849 1850 1851 1852 1853 1854
			fd = sys_perf_event_open(&evsel->attr, pid, cpus->map[cpu],
						 group_fd, flags);

			FD(evsel, cpu, thread) = fd;

			if (fd < 0) {
1855
				err = -errno;
1856

1857
				if (ignore_missing_thread(evsel, cpus->nr, cpu, threads, thread, err)) {
1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870
					/*
					 * We just removed 1 thread, so take a step
					 * back on thread index and lower the upper
					 * nthreads limit.
					 */
					nthreads--;
					thread--;

					/* ... and pretend like nothing have happened. */
					err = 0;
					continue;
				}

1871
				pr_debug2("\nsys_perf_event_open failed, error %d\n",
1872
					  err);
1873
				goto try_fallback;
1874
			}
1875

1876
			pr_debug2(" = %d\n", fd);
1877

1878
			if (evsel->bpf_fd >= 0) {
1879
				int evt_fd = fd;
1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892
				int bpf_fd = evsel->bpf_fd;

				err = ioctl(evt_fd,
					    PERF_EVENT_IOC_SET_BPF,
					    bpf_fd);
				if (err && errno != EEXIST) {
					pr_err("failed to attach bpf fd %d: %s\n",
					       bpf_fd, strerror(errno));
					err = -EINVAL;
					goto out_close;
				}
			}

1893
			set_rlimit = NO_CHANGE;
1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904

			/*
			 * If we succeeded but had to kill clockid, fail and
			 * have perf_evsel__open_strerror() print us a nice
			 * error.
			 */
			if (perf_missing_features.clockid ||
			    perf_missing_features.clockid_wrong) {
				err = -EINVAL;
				goto out_close;
			}
1905
		}
1906 1907 1908 1909
	}

	return 0;

1910
try_fallback:
1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934
	/*
	 * perf stat needs between 5 and 22 fds per CPU. When we run out
	 * of them try to increase the limits.
	 */
	if (err == -EMFILE && set_rlimit < INCREASED_MAX) {
		struct rlimit l;
		int old_errno = errno;

		if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
			if (set_rlimit == NO_CHANGE)
				l.rlim_cur = l.rlim_max;
			else {
				l.rlim_cur = l.rlim_max + 1000;
				l.rlim_max = l.rlim_cur;
			}
			if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
				set_rlimit++;
				errno = old_errno;
				goto retry_open;
			}
		}
		errno = old_errno;
	}

1935 1936 1937
	if (err != -EINVAL || cpu > 0 || thread > 0)
		goto out_close;

1938 1939 1940 1941
	/*
	 * Must probe features in the order they were added to the
	 * perf_event_attr interface.
	 */
1942 1943 1944 1945 1946
	if (!perf_missing_features.bpf_event && evsel->attr.bpf_event) {
		perf_missing_features.bpf_event = true;
		pr_debug2("switching off bpf_event\n");
		goto fallback_missing_features;
	} else if (!perf_missing_features.ksymbol && evsel->attr.ksymbol) {
1947 1948 1949 1950
		perf_missing_features.ksymbol = true;
		pr_debug2("switching off ksymbol\n");
		goto fallback_missing_features;
	} else if (!perf_missing_features.write_backward && evsel->attr.write_backward) {
1951
		perf_missing_features.write_backward = true;
1952
		pr_debug2("switching off write_backward\n");
1953
		goto out_close;
1954
	} else if (!perf_missing_features.clockid_wrong && evsel->attr.use_clockid) {
1955
		perf_missing_features.clockid_wrong = true;
1956
		pr_debug2("switching off clockid\n");
1957 1958 1959
		goto fallback_missing_features;
	} else if (!perf_missing_features.clockid && evsel->attr.use_clockid) {
		perf_missing_features.clockid = true;
1960
		pr_debug2("switching off use_clockid\n");
1961 1962
		goto fallback_missing_features;
	} else if (!perf_missing_features.cloexec && (flags & PERF_FLAG_FD_CLOEXEC)) {
1963
		perf_missing_features.cloexec = true;
1964
		pr_debug2("switching off cloexec flag\n");
1965 1966
		goto fallback_missing_features;
	} else if (!perf_missing_features.mmap2 && evsel->attr.mmap2) {
1967
		perf_missing_features.mmap2 = true;
1968
		pr_debug2("switching off mmap2\n");
1969 1970 1971
		goto fallback_missing_features;
	} else if (!perf_missing_features.exclude_guest &&
		   (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
1972
		perf_missing_features.exclude_guest = true;
1973
		pr_debug2("switching off exclude_guest, exclude_host\n");
1974 1975 1976
		goto fallback_missing_features;
	} else if (!perf_missing_features.sample_id_all) {
		perf_missing_features.sample_id_all = true;
1977
		pr_debug2("switching off sample_id_all\n");
1978
		goto retry_sample_id;
1979 1980 1981 1982 1983
	} else if (!perf_missing_features.lbr_flags &&
			(evsel->attr.branch_sample_type &
			 (PERF_SAMPLE_BRANCH_NO_CYCLES |
			  PERF_SAMPLE_BRANCH_NO_FLAGS))) {
		perf_missing_features.lbr_flags = true;
1984
		pr_debug2("switching off branch sample type no (cycles/flags)\n");
1985
		goto fallback_missing_features;
1986 1987
	} else if (!perf_missing_features.group_read &&
		    evsel->attr.inherit &&
1988 1989
		   (evsel->attr.read_format & PERF_FORMAT_GROUP) &&
		   perf_evsel__is_group_leader(evsel)) {
1990 1991 1992
		perf_missing_features.group_read = true;
		pr_debug2("switching off group read\n");
		goto fallback_missing_features;
1993
	}
1994
out_close:
1995 1996 1997
	if (err)
		threads->err_thread = thread;

1998 1999 2000 2001 2002
	do {
		while (--thread >= 0) {
			close(FD(evsel, cpu, thread));
			FD(evsel, cpu, thread) = -1;
		}
2003
		thread = nthreads;
2004
	} while (--cpu >= 0);
2005 2006 2007
	return err;
}

2008
void perf_evsel__close(struct perf_evsel *evsel)
2009 2010 2011 2012
{
	if (evsel->fd == NULL)
		return;

2013
	perf_evsel__close_fd(evsel);
2014
	perf_evsel__free_fd(evsel);
2015 2016
}

2017
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
2018
			     struct cpu_map *cpus)
2019
{
2020
	return perf_evsel__open(evsel, cpus, NULL);
2021
}
2022

2023
int perf_evsel__open_per_thread(struct perf_evsel *evsel,
2024
				struct thread_map *threads)
2025
{
2026
	return perf_evsel__open(evsel, NULL, threads);
2027
}
2028

2029 2030 2031
static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
				       const union perf_event *event,
				       struct perf_sample *sample)
2032
{
2033
	u64 type = evsel->attr.sample_type;
2034
	const u64 *array = event->sample.array;
2035
	bool swapped = evsel->needs_swap;
2036
	union u64_swap u;
2037 2038 2039 2040

	array += ((event->header.size -
		   sizeof(event->header)) / sizeof(u64)) - 1;

2041 2042 2043 2044 2045
	if (type & PERF_SAMPLE_IDENTIFIER) {
		sample->id = *array;
		array--;
	}

2046
	if (type & PERF_SAMPLE_CPU) {
2047 2048 2049 2050 2051 2052 2053 2054
		u.val64 = *array;
		if (swapped) {
			/* undo swap of u64, then swap on individual u32s */
			u.val64 = bswap_64(u.val64);
			u.val32[0] = bswap_32(u.val32[0]);
		}

		sample->cpu = u.val32[0];
2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073
		array--;
	}

	if (type & PERF_SAMPLE_STREAM_ID) {
		sample->stream_id = *array;
		array--;
	}

	if (type & PERF_SAMPLE_ID) {
		sample->id = *array;
		array--;
	}

	if (type & PERF_SAMPLE_TIME) {
		sample->time = *array;
		array--;
	}

	if (type & PERF_SAMPLE_TID) {
2074 2075 2076 2077 2078 2079 2080 2081 2082 2083
		u.val64 = *array;
		if (swapped) {
			/* undo swap of u64, then swap on individual u32s */
			u.val64 = bswap_64(u.val64);
			u.val32[0] = bswap_32(u.val32[0]);
			u.val32[1] = bswap_32(u.val32[1]);
		}

		sample->pid = u.val32[0];
		sample->tid = u.val32[1];
2084
		array--;
2085 2086 2087 2088 2089
	}

	return 0;
}

2090 2091
static inline bool overflow(const void *endp, u16 max_size, const void *offset,
			    u64 size)
2092
{
2093 2094
	return size > max_size || offset + size > endp;
}
2095

2096 2097 2098 2099 2100
#define OVERFLOW_CHECK(offset, size, max_size)				\
	do {								\
		if (overflow(endp, (max_size), (offset), (size)))	\
			return -EFAULT;					\
	} while (0)
2101

2102 2103
#define OVERFLOW_CHECK_u64(offset) \
	OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
2104

2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118
static int
perf_event__check_size(union perf_event *event, unsigned int sample_size)
{
	/*
	 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
	 * up to PERF_SAMPLE_PERIOD.  After that overflow() must be used to
	 * check the format does not go past the end of the event.
	 */
	if (sample_size + sizeof(event->header) > event->header.size)
		return -EFAULT;

	return 0;
}

2119
int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
2120
			     struct perf_sample *data)
2121
{
2122
	u64 type = evsel->attr.sample_type;
2123
	bool swapped = evsel->needs_swap;
2124
	const u64 *array;
2125 2126 2127
	u16 max_size = event->header.size;
	const void *endp = (void *)event + max_size;
	u64 sz;
2128

2129 2130 2131 2132
	/*
	 * used for cross-endian analysis. See git commit 65014ab3
	 * for why this goofiness is needed.
	 */
2133
	union u64_swap u;
2134

2135
	memset(data, 0, sizeof(*data));
2136 2137
	data->cpu = data->pid = data->tid = -1;
	data->stream_id = data->id = data->time = -1ULL;
2138
	data->period = evsel->attr.sample_period;
2139
	data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
2140
	data->misc    = event->header.misc;
2141 2142
	data->id = -1ULL;
	data->data_src = PERF_MEM_DATA_SRC_NONE;
2143 2144

	if (event->header.type != PERF_RECORD_SAMPLE) {
2145
		if (!evsel->attr.sample_id_all)
2146
			return 0;
2147
		return perf_evsel__parse_id_sample(evsel, event, data);
2148 2149 2150 2151
	}

	array = event->sample.array;

2152
	if (perf_event__check_size(event, evsel->sample_size))
2153 2154
		return -EFAULT;

2155 2156 2157 2158 2159
	if (type & PERF_SAMPLE_IDENTIFIER) {
		data->id = *array;
		array++;
	}

2160
	if (type & PERF_SAMPLE_IP) {
2161
		data->ip = *array;
2162 2163 2164 2165
		array++;
	}

	if (type & PERF_SAMPLE_TID) {
2166 2167 2168 2169 2170 2171 2172 2173 2174 2175
		u.val64 = *array;
		if (swapped) {
			/* undo swap of u64, then swap on individual u32s */
			u.val64 = bswap_64(u.val64);
			u.val32[0] = bswap_32(u.val32[0]);
			u.val32[1] = bswap_32(u.val32[1]);
		}

		data->pid = u.val32[0];
		data->tid = u.val32[1];
2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199
		array++;
	}

	if (type & PERF_SAMPLE_TIME) {
		data->time = *array;
		array++;
	}

	if (type & PERF_SAMPLE_ADDR) {
		data->addr = *array;
		array++;
	}

	if (type & PERF_SAMPLE_ID) {
		data->id = *array;
		array++;
	}

	if (type & PERF_SAMPLE_STREAM_ID) {
		data->stream_id = *array;
		array++;
	}

	if (type & PERF_SAMPLE_CPU) {
2200 2201 2202 2203 2204 2205 2206 2207 2208

		u.val64 = *array;
		if (swapped) {
			/* undo swap of u64, then swap on individual u32s */
			u.val64 = bswap_64(u.val64);
			u.val32[0] = bswap_32(u.val32[0]);
		}

		data->cpu = u.val32[0];
2209 2210 2211 2212 2213 2214 2215 2216 2217
		array++;
	}

	if (type & PERF_SAMPLE_PERIOD) {
		data->period = *array;
		array++;
	}

	if (type & PERF_SAMPLE_READ) {
2218 2219
		u64 read_format = evsel->attr.read_format;

2220
		OVERFLOW_CHECK_u64(array);
2221 2222 2223 2224 2225 2226 2227 2228
		if (read_format & PERF_FORMAT_GROUP)
			data->read.group.nr = *array;
		else
			data->read.one.value = *array;

		array++;

		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2229
			OVERFLOW_CHECK_u64(array);
2230 2231 2232 2233 2234
			data->read.time_enabled = *array;
			array++;
		}

		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2235
			OVERFLOW_CHECK_u64(array);
2236 2237 2238 2239 2240 2241
			data->read.time_running = *array;
			array++;
		}

		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
		if (read_format & PERF_FORMAT_GROUP) {
2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252
			const u64 max_group_nr = UINT64_MAX /
					sizeof(struct sample_read_value);

			if (data->read.group.nr > max_group_nr)
				return -EFAULT;
			sz = data->read.group.nr *
			     sizeof(struct sample_read_value);
			OVERFLOW_CHECK(array, sz, max_size);
			data->read.group.values =
					(struct sample_read_value *)array;
			array = (void *)array + sz;
2253
		} else {
2254
			OVERFLOW_CHECK_u64(array);
2255 2256 2257
			data->read.one.id = *array;
			array++;
		}
2258 2259
	}

2260
	if (evsel__has_callchain(evsel)) {
2261
		const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
2262

2263 2264 2265
		OVERFLOW_CHECK_u64(array);
		data->callchain = (struct ip_callchain *)array++;
		if (data->callchain->nr > max_callchain_nr)
2266
			return -EFAULT;
2267 2268 2269
		sz = data->callchain->nr * sizeof(u64);
		OVERFLOW_CHECK(array, sz, max_size);
		array = (void *)array + sz;
2270 2271 2272
	}

	if (type & PERF_SAMPLE_RAW) {
2273
		OVERFLOW_CHECK_u64(array);
2274
		u.val64 = *array;
2275 2276 2277 2278 2279 2280 2281 2282

		/*
		 * Undo swap of u64, then swap on individual u32s,
		 * get the size of the raw area and undo all of the
		 * swap. The pevent interface handles endianity by
		 * itself.
		 */
		if (swapped) {
2283 2284 2285 2286 2287
			u.val64 = bswap_64(u.val64);
			u.val32[0] = bswap_32(u.val32[0]);
			u.val32[1] = bswap_32(u.val32[1]);
		}
		data->raw_size = u.val32[0];
2288 2289 2290 2291 2292 2293 2294 2295

		/*
		 * The raw data is aligned on 64bits including the
		 * u32 size, so it's safe to use mem_bswap_64.
		 */
		if (swapped)
			mem_bswap_64((void *) array, data->raw_size);

2296
		array = (void *)array + sizeof(u32);
2297

2298 2299 2300
		OVERFLOW_CHECK(array, data->raw_size, max_size);
		data->raw_data = (void *)array;
		array = (void *)array + data->raw_size;
2301 2302
	}

2303
	if (type & PERF_SAMPLE_BRANCH_STACK) {
2304 2305
		const u64 max_branch_nr = UINT64_MAX /
					  sizeof(struct branch_entry);
2306

2307 2308
		OVERFLOW_CHECK_u64(array);
		data->branch_stack = (struct branch_stack *)array++;
2309

2310 2311
		if (data->branch_stack->nr > max_branch_nr)
			return -EFAULT;
2312
		sz = data->branch_stack->nr * sizeof(struct branch_entry);
2313 2314
		OVERFLOW_CHECK(array, sz, max_size);
		array = (void *)array + sz;
2315
	}
2316 2317

	if (type & PERF_SAMPLE_REGS_USER) {
2318
		OVERFLOW_CHECK_u64(array);
2319 2320
		data->user_regs.abi = *array;
		array++;
2321

2322
		if (data->user_regs.abi) {
2323
			u64 mask = evsel->attr.sample_regs_user;
2324

2325
			sz = hweight_long(mask) * sizeof(u64);
2326
			OVERFLOW_CHECK(array, sz, max_size);
2327
			data->user_regs.mask = mask;
2328
			data->user_regs.regs = (u64 *)array;
2329
			array = (void *)array + sz;
2330 2331 2332 2333
		}
	}

	if (type & PERF_SAMPLE_STACK_USER) {
2334 2335
		OVERFLOW_CHECK_u64(array);
		sz = *array++;
2336 2337 2338 2339

		data->user_stack.offset = ((char *)(array - 1)
					  - (char *) event);

2340
		if (!sz) {
2341 2342
			data->user_stack.size = 0;
		} else {
2343
			OVERFLOW_CHECK(array, sz, max_size);
2344
			data->user_stack.data = (char *)array;
2345 2346
			array = (void *)array + sz;
			OVERFLOW_CHECK_u64(array);
2347
			data->user_stack.size = *array++;
2348 2349 2350
			if (WARN_ONCE(data->user_stack.size > sz,
				      "user stack dump failure\n"))
				return -EFAULT;
2351 2352 2353
		}
	}

2354
	if (type & PERF_SAMPLE_WEIGHT) {
2355
		OVERFLOW_CHECK_u64(array);
2356 2357 2358 2359
		data->weight = *array;
		array++;
	}

2360
	if (type & PERF_SAMPLE_DATA_SRC) {
2361
		OVERFLOW_CHECK_u64(array);
2362 2363 2364 2365
		data->data_src = *array;
		array++;
	}

2366
	if (type & PERF_SAMPLE_TRANSACTION) {
2367
		OVERFLOW_CHECK_u64(array);
2368 2369 2370 2371
		data->transaction = *array;
		array++;
	}

2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388
	data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE;
	if (type & PERF_SAMPLE_REGS_INTR) {
		OVERFLOW_CHECK_u64(array);
		data->intr_regs.abi = *array;
		array++;

		if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) {
			u64 mask = evsel->attr.sample_regs_intr;

			sz = hweight_long(mask) * sizeof(u64);
			OVERFLOW_CHECK(array, sz, max_size);
			data->intr_regs.mask = mask;
			data->intr_regs.regs = (u64 *)array;
			array = (void *)array + sz;
		}
	}

2389 2390 2391 2392 2393 2394
	data->phys_addr = 0;
	if (type & PERF_SAMPLE_PHYS_ADDR) {
		data->phys_addr = *array;
		array++;
	}

2395 2396
	return 0;
}
2397

2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441
int perf_evsel__parse_sample_timestamp(struct perf_evsel *evsel,
				       union perf_event *event,
				       u64 *timestamp)
{
	u64 type = evsel->attr.sample_type;
	const u64 *array;

	if (!(type & PERF_SAMPLE_TIME))
		return -1;

	if (event->header.type != PERF_RECORD_SAMPLE) {
		struct perf_sample data = {
			.time = -1ULL,
		};

		if (!evsel->attr.sample_id_all)
			return -1;
		if (perf_evsel__parse_id_sample(evsel, event, &data))
			return -1;

		*timestamp = data.time;
		return 0;
	}

	array = event->sample.array;

	if (perf_event__check_size(event, evsel->sample_size))
		return -EFAULT;

	if (type & PERF_SAMPLE_IDENTIFIER)
		array++;

	if (type & PERF_SAMPLE_IP)
		array++;

	if (type & PERF_SAMPLE_TID)
		array++;

	if (type & PERF_SAMPLE_TIME)
		*timestamp = *array;

	return 0;
}

2442
size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
2443
				     u64 read_format)
2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508
{
	size_t sz, result = sizeof(struct sample_event);

	if (type & PERF_SAMPLE_IDENTIFIER)
		result += sizeof(u64);

	if (type & PERF_SAMPLE_IP)
		result += sizeof(u64);

	if (type & PERF_SAMPLE_TID)
		result += sizeof(u64);

	if (type & PERF_SAMPLE_TIME)
		result += sizeof(u64);

	if (type & PERF_SAMPLE_ADDR)
		result += sizeof(u64);

	if (type & PERF_SAMPLE_ID)
		result += sizeof(u64);

	if (type & PERF_SAMPLE_STREAM_ID)
		result += sizeof(u64);

	if (type & PERF_SAMPLE_CPU)
		result += sizeof(u64);

	if (type & PERF_SAMPLE_PERIOD)
		result += sizeof(u64);

	if (type & PERF_SAMPLE_READ) {
		result += sizeof(u64);
		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
			result += sizeof(u64);
		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
			result += sizeof(u64);
		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
		if (read_format & PERF_FORMAT_GROUP) {
			sz = sample->read.group.nr *
			     sizeof(struct sample_read_value);
			result += sz;
		} else {
			result += sizeof(u64);
		}
	}

	if (type & PERF_SAMPLE_CALLCHAIN) {
		sz = (sample->callchain->nr + 1) * sizeof(u64);
		result += sz;
	}

	if (type & PERF_SAMPLE_RAW) {
		result += sizeof(u32);
		result += sample->raw_size;
	}

	if (type & PERF_SAMPLE_BRANCH_STACK) {
		sz = sample->branch_stack->nr * sizeof(struct branch_entry);
		sz += sizeof(u64);
		result += sz;
	}

	if (type & PERF_SAMPLE_REGS_USER) {
		if (sample->user_regs.abi) {
			result += sizeof(u64);
2509
			sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530
			result += sz;
		} else {
			result += sizeof(u64);
		}
	}

	if (type & PERF_SAMPLE_STACK_USER) {
		sz = sample->user_stack.size;
		result += sizeof(u64);
		if (sz) {
			result += sz;
			result += sizeof(u64);
		}
	}

	if (type & PERF_SAMPLE_WEIGHT)
		result += sizeof(u64);

	if (type & PERF_SAMPLE_DATA_SRC)
		result += sizeof(u64);

2531 2532 2533
	if (type & PERF_SAMPLE_TRANSACTION)
		result += sizeof(u64);

2534 2535 2536 2537 2538 2539 2540 2541 2542 2543
	if (type & PERF_SAMPLE_REGS_INTR) {
		if (sample->intr_regs.abi) {
			result += sizeof(u64);
			sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
			result += sz;
		} else {
			result += sizeof(u64);
		}
	}

2544 2545 2546
	if (type & PERF_SAMPLE_PHYS_ADDR)
		result += sizeof(u64);

2547 2548 2549
	return result;
}

2550
int perf_event__synthesize_sample(union perf_event *event, u64 type,
2551
				  u64 read_format,
2552
				  const struct perf_sample *sample)
2553 2554
{
	u64 *array;
2555
	size_t sz;
2556 2557 2558 2559
	/*
	 * used for cross-endian analysis. See git commit 65014ab3
	 * for why this goofiness is needed.
	 */
2560
	union u64_swap u;
2561 2562 2563

	array = event->sample.array;

2564 2565 2566 2567 2568
	if (type & PERF_SAMPLE_IDENTIFIER) {
		*array = sample->id;
		array++;
	}

2569
	if (type & PERF_SAMPLE_IP) {
2570
		*array = sample->ip;
2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602
		array++;
	}

	if (type & PERF_SAMPLE_TID) {
		u.val32[0] = sample->pid;
		u.val32[1] = sample->tid;
		*array = u.val64;
		array++;
	}

	if (type & PERF_SAMPLE_TIME) {
		*array = sample->time;
		array++;
	}

	if (type & PERF_SAMPLE_ADDR) {
		*array = sample->addr;
		array++;
	}

	if (type & PERF_SAMPLE_ID) {
		*array = sample->id;
		array++;
	}

	if (type & PERF_SAMPLE_STREAM_ID) {
		*array = sample->stream_id;
		array++;
	}

	if (type & PERF_SAMPLE_CPU) {
		u.val32[0] = sample->cpu;
2603
		u.val32[1] = 0;
2604 2605 2606 2607 2608 2609 2610 2611 2612
		*array = u.val64;
		array++;
	}

	if (type & PERF_SAMPLE_PERIOD) {
		*array = sample->period;
		array++;
	}

2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666
	if (type & PERF_SAMPLE_READ) {
		if (read_format & PERF_FORMAT_GROUP)
			*array = sample->read.group.nr;
		else
			*array = sample->read.one.value;
		array++;

		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
			*array = sample->read.time_enabled;
			array++;
		}

		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
			*array = sample->read.time_running;
			array++;
		}

		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
		if (read_format & PERF_FORMAT_GROUP) {
			sz = sample->read.group.nr *
			     sizeof(struct sample_read_value);
			memcpy(array, sample->read.group.values, sz);
			array = (void *)array + sz;
		} else {
			*array = sample->read.one.id;
			array++;
		}
	}

	if (type & PERF_SAMPLE_CALLCHAIN) {
		sz = (sample->callchain->nr + 1) * sizeof(u64);
		memcpy(array, sample->callchain, sz);
		array = (void *)array + sz;
	}

	if (type & PERF_SAMPLE_RAW) {
		u.val32[0] = sample->raw_size;
		*array = u.val64;
		array = (void *)array + sizeof(u32);

		memcpy(array, sample->raw_data, sample->raw_size);
		array = (void *)array + sample->raw_size;
	}

	if (type & PERF_SAMPLE_BRANCH_STACK) {
		sz = sample->branch_stack->nr * sizeof(struct branch_entry);
		sz += sizeof(u64);
		memcpy(array, sample->branch_stack, sz);
		array = (void *)array + sz;
	}

	if (type & PERF_SAMPLE_REGS_USER) {
		if (sample->user_regs.abi) {
			*array++ = sample->user_regs.abi;
2667
			sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694
			memcpy(array, sample->user_regs.regs, sz);
			array = (void *)array + sz;
		} else {
			*array++ = 0;
		}
	}

	if (type & PERF_SAMPLE_STACK_USER) {
		sz = sample->user_stack.size;
		*array++ = sz;
		if (sz) {
			memcpy(array, sample->user_stack.data, sz);
			array = (void *)array + sz;
			*array++ = sz;
		}
	}

	if (type & PERF_SAMPLE_WEIGHT) {
		*array = sample->weight;
		array++;
	}

	if (type & PERF_SAMPLE_DATA_SRC) {
		*array = sample->data_src;
		array++;
	}

2695 2696 2697 2698 2699
	if (type & PERF_SAMPLE_TRANSACTION) {
		*array = sample->transaction;
		array++;
	}

2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710
	if (type & PERF_SAMPLE_REGS_INTR) {
		if (sample->intr_regs.abi) {
			*array++ = sample->intr_regs.abi;
			sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
			memcpy(array, sample->intr_regs.regs, sz);
			array = (void *)array + sz;
		} else {
			*array++ = 0;
		}
	}

2711 2712 2713 2714 2715
	if (type & PERF_SAMPLE_PHYS_ADDR) {
		*array = sample->phys_addr;
		array++;
	}

2716 2717
	return 0;
}
2718

2719
struct tep_format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
2720
{
2721
	return tep_find_field(evsel->tp_format, name);
2722 2723
}

2724
void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
2725 2726
			 const char *name)
{
2727
	struct tep_format_field *field = perf_evsel__field(evsel, name);
2728 2729
	int offset;

2730 2731
	if (!field)
		return NULL;
2732 2733 2734

	offset = field->offset;

2735
	if (field->flags & TEP_FIELD_IS_DYNAMIC) {
2736 2737 2738 2739 2740 2741 2742
		offset = *(int *)(sample->raw_data + field->offset);
		offset &= 0xffff;
	}

	return sample->raw_data + offset;
}

2743
u64 format_field__intval(struct tep_format_field *field, struct perf_sample *sample,
2744
			 bool needs_swap)
2745
{
2746
	u64 value;
2747
	void *ptr = sample->raw_data + field->offset;
2748

2749 2750 2751 2752 2753 2754 2755 2756 2757 2758
	switch (field->size) {
	case 1:
		return *(u8 *)ptr;
	case 2:
		value = *(u16 *)ptr;
		break;
	case 4:
		value = *(u32 *)ptr;
		break;
	case 8:
2759
		memcpy(&value, ptr, sizeof(u64));
2760 2761 2762 2763 2764
		break;
	default:
		return 0;
	}

2765
	if (!needs_swap)
2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779
		return value;

	switch (field->size) {
	case 2:
		return bswap_16(value);
	case 4:
		return bswap_32(value);
	case 8:
		return bswap_64(value);
	default:
		return 0;
	}

	return 0;
2780
}
2781

2782 2783 2784
u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
		       const char *name)
{
2785
	struct tep_format_field *field = perf_evsel__field(evsel, name);
2786 2787 2788 2789 2790 2791 2792

	if (!field)
		return 0;

	return field ? format_field__intval(field, sample, evsel->needs_swap) : 0;
}

2793 2794 2795
bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
			  char *msg, size_t msgsize)
{
2796 2797
	int paranoid;

2798
	if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814
	    evsel->attr.type   == PERF_TYPE_HARDWARE &&
	    evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
		/*
		 * If it's cycles then fall back to hrtimer based
		 * cpu-clock-tick sw counter, which is always available even if
		 * no PMU support.
		 *
		 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
		 * b0a873e).
		 */
		scnprintf(msg, msgsize, "%s",
"The cycles event is not supported, trying to fall back to cpu-clock-ticks");

		evsel->attr.type   = PERF_TYPE_SOFTWARE;
		evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;

2815
		zfree(&evsel->name);
2816 2817 2818 2819 2820
		return true;
	} else if (err == EACCES && !evsel->attr.exclude_kernel &&
		   (paranoid = perf_event_paranoid()) > 1) {
		const char *name = perf_evsel__name(evsel);
		char *new_name;
2821
		const char *sep = ":";
2822

2823 2824 2825 2826 2827 2828
		/* Is there already the separator in the name. */
		if (strchr(name, '/') ||
		    strchr(name, ':'))
			sep = "";

		if (asprintf(&new_name, "%s%su", name, sep) < 0)
2829 2830 2831 2832 2833 2834 2835 2836 2837
			return false;

		if (evsel->name)
			free(evsel->name);
		evsel->name = new_name;
		scnprintf(msg, msgsize,
"kernel.perf_event_paranoid=%d, trying to fall back to excluding kernel samples", paranoid);
		evsel->attr.exclude_kernel = 1;

2838 2839 2840 2841 2842
		return true;
	}

	return false;
}
2843

2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879
static bool find_process(const char *name)
{
	size_t len = strlen(name);
	DIR *dir;
	struct dirent *d;
	int ret = -1;

	dir = opendir(procfs__mountpoint());
	if (!dir)
		return false;

	/* Walk through the directory. */
	while (ret && (d = readdir(dir)) != NULL) {
		char path[PATH_MAX];
		char *data;
		size_t size;

		if ((d->d_type != DT_DIR) ||
		     !strcmp(".", d->d_name) ||
		     !strcmp("..", d->d_name))
			continue;

		scnprintf(path, sizeof(path), "%s/%s/comm",
			  procfs__mountpoint(), d->d_name);

		if (filename__read_str(path, &data, &size))
			continue;

		ret = strncmp(name, data, len);
		free(data);
	}

	closedir(dir);
	return ret ? false : true;
}

2880
int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
2881 2882
			      int err, char *msg, size_t size)
{
2883
	char sbuf[STRERR_BUFSIZE];
2884
	int printed = 0;
2885

2886 2887 2888
	switch (err) {
	case EPERM:
	case EACCES:
2889 2890 2891 2892 2893 2894
		if (err == EPERM)
			printed = scnprintf(msg, size,
				"No permission to enable %s event.\n\n",
				perf_evsel__name(evsel));

		return scnprintf(msg + printed, size - printed,
2895 2896 2897 2898
		 "You may not have permission to collect %sstats.\n\n"
		 "Consider tweaking /proc/sys/kernel/perf_event_paranoid,\n"
		 "which controls use of the performance events system by\n"
		 "unprivileged users (without CAP_SYS_ADMIN).\n\n"
2899
		 "The current value is %d:\n\n"
2900
		 "  -1: Allow use of (almost) all events by all users\n"
2901 2902 2903
		 "      Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK\n"
		 ">= 0: Disallow ftrace function tracepoint by users without CAP_SYS_ADMIN\n"
		 "      Disallow raw tracepoint access by users without CAP_SYS_ADMIN\n"
2904
		 ">= 1: Disallow CPU event access by users without CAP_SYS_ADMIN\n"
2905 2906 2907
		 ">= 2: Disallow kernel profiling by users without CAP_SYS_ADMIN\n\n"
		 "To make this setting permanent, edit /etc/sysctl.conf too, e.g.:\n\n"
		 "	kernel.perf_event_paranoid = -1\n" ,
2908 2909
				 target->system_wide ? "system-wide " : "",
				 perf_event_paranoid());
2910 2911 2912 2913 2914 2915
	case ENOENT:
		return scnprintf(msg, size, "The %s event is not supported.",
				 perf_evsel__name(evsel));
	case EMFILE:
		return scnprintf(msg, size, "%s",
			 "Too many events are opened.\n"
2916 2917 2918
			 "Probably the maximum number of open file descriptors has been reached.\n"
			 "Hint: Try again after reducing the number of events.\n"
			 "Hint: Try increasing the limit with 'ulimit -n <limit>'");
2919
	case ENOMEM:
2920
		if (evsel__has_callchain(evsel) &&
2921 2922 2923 2924
		    access("/proc/sys/kernel/perf_event_max_stack", F_OK) == 0)
			return scnprintf(msg, size,
					 "Not enough memory to setup event with callchain.\n"
					 "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n"
2925
					 "Hint: Current value: %d", sysctl__max_stack());
2926
		break;
2927 2928 2929
	case ENODEV:
		if (target->cpu_list)
			return scnprintf(msg, size, "%s",
2930
	 "No such device - did you specify an out-of-range profile CPU?");
2931 2932
		break;
	case EOPNOTSUPP:
2933
		if (evsel->attr.sample_period != 0)
2934 2935 2936
			return scnprintf(msg, size,
	"%s: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'",
					 perf_evsel__name(evsel));
2937 2938 2939 2940 2941 2942
		if (evsel->attr.precise_ip)
			return scnprintf(msg, size, "%s",
	"\'precise\' request may not be supported. Try removing 'p' modifier.");
#if defined(__i386__) || defined(__x86_64__)
		if (evsel->attr.type == PERF_TYPE_HARDWARE)
			return scnprintf(msg, size, "%s",
2943
	"No hardware sampling interrupt available.\n");
2944 2945
#endif
		break;
2946 2947 2948 2949 2950 2951
	case EBUSY:
		if (find_process("oprofiled"))
			return scnprintf(msg, size,
	"The PMU counters are busy/taken by another profiler.\n"
	"We found oprofile daemon running, please stop it and try again.");
		break;
2952
	case EINVAL:
2953
		if (evsel->attr.write_backward && perf_missing_features.write_backward)
2954
			return scnprintf(msg, size, "Reading from overwrite event is not supported by this kernel.");
2955 2956 2957 2958 2959
		if (perf_missing_features.clockid)
			return scnprintf(msg, size, "clockid feature not supported.");
		if (perf_missing_features.clockid_wrong)
			return scnprintf(msg, size, "wrong clockid (%d).", clockid);
		break;
2960 2961 2962 2963 2964
	default:
		break;
	}

	return scnprintf(msg, size,
2965
	"The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
2966
	"/bin/dmesg | grep -i perf may provide additional information.\n",
2967
			 err, str_error_r(err, sbuf, sizeof(sbuf)),
2968
			 perf_evsel__name(evsel));
2969
}
2970

2971
struct perf_env *perf_evsel__env(struct perf_evsel *evsel)
2972
{
2973 2974
	if (evsel && evsel->evlist)
		return evsel->evlist->env;
2975 2976
	return NULL;
}
2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005

static int store_evsel_ids(struct perf_evsel *evsel, struct perf_evlist *evlist)
{
	int cpu, thread;

	for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
		for (thread = 0; thread < xyarray__max_y(evsel->fd);
		     thread++) {
			int fd = FD(evsel, cpu, thread);

			if (perf_evlist__id_add_fd(evlist, evsel,
						   cpu, thread, fd) < 0)
				return -1;
		}
	}

	return 0;
}

int perf_evsel__store_ids(struct perf_evsel *evsel, struct perf_evlist *evlist)
{
	struct cpu_map *cpus = evsel->cpus;
	struct thread_map *threads = evsel->threads;

	if (perf_evsel__alloc_id(evsel, cpus->nr, threads->nr))
		return -ENOMEM;

	return store_evsel_ids(evsel, evlist);
}