evsel.c 47.4 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
 *
 * Parts came from builtin-{top,stat,record}.c, see those files for further
 * copyright notes.
 *
 * Released under the GPL v2. (and only v2, not any later version)
 */

10
#include <byteswap.h>
11
#include <linux/bitops.h>
12
#include <api/fs/debugfs.h>
13 14 15
#include <traceevent/event-parse.h>
#include <linux/hw_breakpoint.h>
#include <linux/perf_event.h>
16
#include <sys/resource.h>
17
#include "asm/bug.h"
18
#include "evsel.h"
19
#include "evlist.h"
20
#include "util.h"
21
#include "cpumap.h"
22
#include "thread_map.h"
23
#include "target.h"
24
#include "perf_regs.h"
A
Adrian Hunter 已提交
25
#include "debug.h"
26
#include "trace-event.h"
27

28 29 30
static struct {
	bool sample_id_all;
	bool exclude_guest;
31
	bool mmap2;
32 33
} perf_missing_features;

34 35
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))

36
int __perf_evsel__sample_size(u64 sample_type)
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
{
	u64 mask = sample_type & PERF_SAMPLE_MASK;
	int size = 0;
	int i;

	for (i = 0; i < 64; i++) {
		if (mask & (1ULL << i))
			size++;
	}

	size *= sizeof(u64);

	return size;
}

52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
/**
 * __perf_evsel__calc_id_pos - calculate id_pos.
 * @sample_type: sample type
 *
 * This function returns the position of the event id (PERF_SAMPLE_ID or
 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
 * sample_event.
 */
static int __perf_evsel__calc_id_pos(u64 sample_type)
{
	int idx = 0;

	if (sample_type & PERF_SAMPLE_IDENTIFIER)
		return 0;

	if (!(sample_type & PERF_SAMPLE_ID))
		return -1;

	if (sample_type & PERF_SAMPLE_IP)
		idx += 1;

	if (sample_type & PERF_SAMPLE_TID)
		idx += 1;

	if (sample_type & PERF_SAMPLE_TIME)
		idx += 1;

	if (sample_type & PERF_SAMPLE_ADDR)
		idx += 1;

	return idx;
}

/**
 * __perf_evsel__calc_is_pos - calculate is_pos.
 * @sample_type: sample type
 *
 * This function returns the position (counting backwards) of the event id
 * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
 * sample_id_all is used there is an id sample appended to non-sample events.
 */
static int __perf_evsel__calc_is_pos(u64 sample_type)
{
	int idx = 1;

	if (sample_type & PERF_SAMPLE_IDENTIFIER)
		return 1;

	if (!(sample_type & PERF_SAMPLE_ID))
		return -1;

	if (sample_type & PERF_SAMPLE_CPU)
		idx += 1;

	if (sample_type & PERF_SAMPLE_STREAM_ID)
		idx += 1;

	return idx;
}

void perf_evsel__calc_id_pos(struct perf_evsel *evsel)
{
	evsel->id_pos = __perf_evsel__calc_id_pos(evsel->attr.sample_type);
	evsel->is_pos = __perf_evsel__calc_is_pos(evsel->attr.sample_type);
}

118
void hists__init(struct hists *hists)
119 120 121 122 123 124 125 126 127
{
	memset(hists, 0, sizeof(*hists));
	hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
	hists->entries_in = &hists->entries_in_array[0];
	hists->entries_collapsed = RB_ROOT;
	hists->entries = RB_ROOT;
	pthread_mutex_init(&hists->lock, NULL);
}

128 129 130 131 132 133
void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
				  enum perf_event_sample_format bit)
{
	if (!(evsel->attr.sample_type & bit)) {
		evsel->attr.sample_type |= bit;
		evsel->sample_size += sizeof(u64);
134
		perf_evsel__calc_id_pos(evsel);
135 136 137 138 139 140 141 142 143
	}
}

void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
				    enum perf_event_sample_format bit)
{
	if (evsel->attr.sample_type & bit) {
		evsel->attr.sample_type &= ~bit;
		evsel->sample_size -= sizeof(u64);
144
		perf_evsel__calc_id_pos(evsel);
145 146 147
	}
}

148 149
void perf_evsel__set_sample_id(struct perf_evsel *evsel,
			       bool can_sample_identifier)
150
{
151 152 153 154 155 156
	if (can_sample_identifier) {
		perf_evsel__reset_sample_bit(evsel, ID);
		perf_evsel__set_sample_bit(evsel, IDENTIFIER);
	} else {
		perf_evsel__set_sample_bit(evsel, ID);
	}
157 158 159
	evsel->attr.read_format |= PERF_FORMAT_ID;
}

160 161 162 163 164
void perf_evsel__init(struct perf_evsel *evsel,
		      struct perf_event_attr *attr, int idx)
{
	evsel->idx	   = idx;
	evsel->attr	   = *attr;
165
	evsel->leader	   = evsel;
166 167
	evsel->unit	   = "";
	evsel->scale	   = 1.0;
168
	INIT_LIST_HEAD(&evsel->node);
169
	hists__init(&evsel->hists);
170
	evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
171
	perf_evsel__calc_id_pos(evsel);
172 173
}

174
struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
175 176 177
{
	struct perf_evsel *evsel = zalloc(sizeof(*evsel));

178 179
	if (evsel != NULL)
		perf_evsel__init(evsel, attr, idx);
180 181 182 183

	return evsel;
}

184
struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
185 186 187 188 189
{
	struct perf_evsel *evsel = zalloc(sizeof(*evsel));

	if (evsel != NULL) {
		struct perf_event_attr attr = {
190 191 192
			.type	       = PERF_TYPE_TRACEPOINT,
			.sample_type   = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
					  PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
193 194
		};

195 196 197
		if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
			goto out_free;

198
		evsel->tp_format = trace_event__tp_format(sys, name);
199 200 201
		if (evsel->tp_format == NULL)
			goto out_free;

202
		event_attr_init(&attr);
203
		attr.config = evsel->tp_format->id;
204
		attr.sample_period = 1;
205 206 207 208 209 210
		perf_evsel__init(evsel, &attr, idx);
	}

	return evsel;

out_free:
211
	zfree(&evsel->name);
212 213 214 215
	free(evsel);
	return NULL;
}

216
const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
217 218 219 220 221 222 223 224 225 226 227 228
	"cycles",
	"instructions",
	"cache-references",
	"cache-misses",
	"branches",
	"branch-misses",
	"bus-cycles",
	"stalled-cycles-frontend",
	"stalled-cycles-backend",
	"ref-cycles",
};

229
static const char *__perf_evsel__hw_name(u64 config)
230 231 232 233 234 235 236
{
	if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
		return perf_evsel__hw_names[config];

	return "unknown-hardware";
}

237
static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
238
{
239
	int colon = 0, r = 0;
240 241 242 243 244
	struct perf_event_attr *attr = &evsel->attr;
	bool exclude_guest_default = false;

#define MOD_PRINT(context, mod)	do {					\
		if (!attr->exclude_##context) {				\
245
			if (!colon) colon = ++r;			\
246 247 248 249 250 251 252 253 254 255 256 257
			r += scnprintf(bf + r, size - r, "%c", mod);	\
		} } while(0)

	if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
		MOD_PRINT(kernel, 'k');
		MOD_PRINT(user, 'u');
		MOD_PRINT(hv, 'h');
		exclude_guest_default = true;
	}

	if (attr->precise_ip) {
		if (!colon)
258
			colon = ++r;
259 260 261 262 263 264 265 266 267 268
		r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
		exclude_guest_default = true;
	}

	if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
		MOD_PRINT(host, 'H');
		MOD_PRINT(guest, 'G');
	}
#undef MOD_PRINT
	if (colon)
269
		bf[colon - 1] = ':';
270 271 272
	return r;
}

273 274 275 276 277 278
static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
{
	int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
}

279
const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
280 281 282 283
	"cpu-clock",
	"task-clock",
	"page-faults",
	"context-switches",
284
	"cpu-migrations",
285 286 287 288
	"minor-faults",
	"major-faults",
	"alignment-faults",
	"emulation-faults",
289
	"dummy",
290 291
};

292
static const char *__perf_evsel__sw_name(u64 config)
293 294 295 296 297 298 299 300 301 302 303 304
{
	if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
		return perf_evsel__sw_names[config];
	return "unknown-software";
}

static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
{
	int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
}

305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
{
	int r;

	r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);

	if (type & HW_BREAKPOINT_R)
		r += scnprintf(bf + r, size - r, "r");

	if (type & HW_BREAKPOINT_W)
		r += scnprintf(bf + r, size - r, "w");

	if (type & HW_BREAKPOINT_X)
		r += scnprintf(bf + r, size - r, "x");

	return r;
}

static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
{
	struct perf_event_attr *attr = &evsel->attr;
	int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
}

330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
				[PERF_EVSEL__MAX_ALIASES] = {
 { "L1-dcache",	"l1-d",		"l1d",		"L1-data",		},
 { "L1-icache",	"l1-i",		"l1i",		"L1-instruction",	},
 { "LLC",	"L2",							},
 { "dTLB",	"d-tlb",	"Data-TLB",				},
 { "iTLB",	"i-tlb",	"Instruction-TLB",			},
 { "branch",	"branches",	"bpu",		"btb",		"bpc",	},
 { "node",								},
};

const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
				   [PERF_EVSEL__MAX_ALIASES] = {
 { "load",	"loads",	"read",					},
 { "store",	"stores",	"write",				},
 { "prefetch",	"prefetches",	"speculative-read", "speculative-load",	},
};

const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
				       [PERF_EVSEL__MAX_ALIASES] = {
 { "refs",	"Reference",	"ops",		"access",		},
 { "misses",	"miss",							},
};

#define C(x)		PERF_COUNT_HW_CACHE_##x
#define CACHE_READ	(1 << C(OP_READ))
#define CACHE_WRITE	(1 << C(OP_WRITE))
#define CACHE_PREFETCH	(1 << C(OP_PREFETCH))
#define COP(x)		(1 << x)

/*
 * cache operartion stat
 * L1I : Read and prefetch only
 * ITLB and BPU : Read-only
 */
static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
 [C(L1D)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
 [C(L1I)]	= (CACHE_READ | CACHE_PREFETCH),
 [C(LL)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
 [C(DTLB)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
 [C(ITLB)]	= (CACHE_READ),
 [C(BPU)]	= (CACHE_READ),
 [C(NODE)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
};

bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
{
	if (perf_evsel__hw_cache_stat[type] & COP(op))
		return true;	/* valid */
	else
		return false;	/* invalid */
}

int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
					    char *bf, size_t size)
{
	if (result) {
		return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
				 perf_evsel__hw_cache_op[op][0],
				 perf_evsel__hw_cache_result[result][0]);
	}

	return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
			 perf_evsel__hw_cache_op[op][1]);
}

396
static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428
{
	u8 op, result, type = (config >>  0) & 0xff;
	const char *err = "unknown-ext-hardware-cache-type";

	if (type > PERF_COUNT_HW_CACHE_MAX)
		goto out_err;

	op = (config >>  8) & 0xff;
	err = "unknown-ext-hardware-cache-op";
	if (op > PERF_COUNT_HW_CACHE_OP_MAX)
		goto out_err;

	result = (config >> 16) & 0xff;
	err = "unknown-ext-hardware-cache-result";
	if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
		goto out_err;

	err = "invalid-cache";
	if (!perf_evsel__is_cache_op_valid(type, op))
		goto out_err;

	return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
out_err:
	return scnprintf(bf, size, "%s", err);
}

static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
{
	int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
	return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
}

429 430 431 432 433 434
static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
{
	int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
	return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
}

435
const char *perf_evsel__name(struct perf_evsel *evsel)
436
{
437
	char bf[128];
438

439 440
	if (evsel->name)
		return evsel->name;
441 442 443

	switch (evsel->attr.type) {
	case PERF_TYPE_RAW:
444
		perf_evsel__raw_name(evsel, bf, sizeof(bf));
445 446 447
		break;

	case PERF_TYPE_HARDWARE:
448
		perf_evsel__hw_name(evsel, bf, sizeof(bf));
449
		break;
450 451

	case PERF_TYPE_HW_CACHE:
452
		perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
453 454
		break;

455
	case PERF_TYPE_SOFTWARE:
456
		perf_evsel__sw_name(evsel, bf, sizeof(bf));
457 458
		break;

459
	case PERF_TYPE_TRACEPOINT:
460
		scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
461 462
		break;

463 464 465 466
	case PERF_TYPE_BREAKPOINT:
		perf_evsel__bp_name(evsel, bf, sizeof(bf));
		break;

467
	default:
468 469
		scnprintf(bf, sizeof(bf), "unknown attr type: %d",
			  evsel->attr.type);
470
		break;
471 472
	}

473 474 475
	evsel->name = strdup(bf);

	return evsel->name ?: "unknown";
476 477
}

478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
const char *perf_evsel__group_name(struct perf_evsel *evsel)
{
	return evsel->group_name ?: "anon group";
}

int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
{
	int ret;
	struct perf_evsel *pos;
	const char *group_name = perf_evsel__group_name(evsel);

	ret = scnprintf(buf, size, "%s", group_name);

	ret += scnprintf(buf + ret, size - ret, " { %s",
			 perf_evsel__name(evsel));

	for_each_group_member(pos, evsel)
		ret += scnprintf(buf + ret, size - ret, ", %s",
				 perf_evsel__name(pos));

	ret += scnprintf(buf + ret, size - ret, " }");

	return ret;
}

503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530
/*
 * The enable_on_exec/disabled value strategy:
 *
 *  1) For any type of traced program:
 *    - all independent events and group leaders are disabled
 *    - all group members are enabled
 *
 *     Group members are ruled by group leaders. They need to
 *     be enabled, because the group scheduling relies on that.
 *
 *  2) For traced programs executed by perf:
 *     - all independent events and group leaders have
 *       enable_on_exec set
 *     - we don't specifically enable or disable any event during
 *       the record command
 *
 *     Independent events and group leaders are initially disabled
 *     and get enabled by exec. Group members are ruled by group
 *     leaders as stated in 1).
 *
 *  3) For traced programs attached by perf (pid/tid):
 *     - we specifically enable or disable all events during
 *       the record command
 *
 *     When attaching events to already running traced we
 *     enable/disable events specifically, as there's no
 *     initial traced exec call.
 */
531
void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
532
{
533
	struct perf_evsel *leader = evsel->leader;
534 535
	struct perf_event_attr *attr = &evsel->attr;
	int track = !evsel->idx; /* only the first counter needs these */
536
	bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
537

538
	attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
539 540
	attr->inherit	    = !opts->no_inherit;

541 542
	perf_evsel__set_sample_bit(evsel, IP);
	perf_evsel__set_sample_bit(evsel, TID);
543

544 545 546 547 548 549 550
	if (evsel->sample_read) {
		perf_evsel__set_sample_bit(evsel, READ);

		/*
		 * We need ID even in case of single event, because
		 * PERF_SAMPLE_READ process ID specific data.
		 */
551
		perf_evsel__set_sample_id(evsel, false);
552 553 554 555 556 557 558 559 560 561 562

		/*
		 * Apply group format only if we belong to group
		 * with more than one members.
		 */
		if (leader->nr_members > 1) {
			attr->read_format |= PERF_FORMAT_GROUP;
			attr->inherit = 0;
		}
	}

563 564 565 566 567 568 569
	/*
	 * We default some events to a 1 default interval. But keep
	 * it a weak assumption overridable by the user.
	 */
	if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
				     opts->user_interval != ULLONG_MAX)) {
		if (opts->freq) {
570
			perf_evsel__set_sample_bit(evsel, PERIOD);
571 572 573 574 575 576 577
			attr->freq		= 1;
			attr->sample_freq	= opts->freq;
		} else {
			attr->sample_period = opts->default_interval;
		}
	}

578 579 580 581 582 583 584 585 586
	/*
	 * Disable sampling for all group members other
	 * than leader in case leader 'leads' the sampling.
	 */
	if ((leader != evsel) && leader->sample_read) {
		attr->sample_freq   = 0;
		attr->sample_period = 0;
	}

587 588 589 590 591 592 593
	if (opts->no_samples)
		attr->sample_freq = 0;

	if (opts->inherit_stat)
		attr->inherit_stat = 1;

	if (opts->sample_address) {
594
		perf_evsel__set_sample_bit(evsel, ADDR);
595 596 597
		attr->mmap_data = track;
	}

598
	if (opts->call_graph) {
599
		perf_evsel__set_sample_bit(evsel, CALLCHAIN);
600

601
		if (opts->call_graph == CALLCHAIN_DWARF) {
602 603
			perf_evsel__set_sample_bit(evsel, REGS_USER);
			perf_evsel__set_sample_bit(evsel, STACK_USER);
604 605 606 607 608 609
			attr->sample_regs_user = PERF_REGS_MASK;
			attr->sample_stack_user = opts->stack_dump_size;
			attr->exclude_callchain_user = 1;
		}
	}

610
	if (target__has_cpu(&opts->target))
611
		perf_evsel__set_sample_bit(evsel, CPU);
612

613
	if (opts->period)
614
		perf_evsel__set_sample_bit(evsel, PERIOD);
615

616
	if (!perf_missing_features.sample_id_all &&
617
	    (opts->sample_time || !opts->no_inherit ||
618
	     target__has_cpu(&opts->target) || per_cpu))
619
		perf_evsel__set_sample_bit(evsel, TIME);
620 621

	if (opts->raw_samples) {
622 623 624
		perf_evsel__set_sample_bit(evsel, TIME);
		perf_evsel__set_sample_bit(evsel, RAW);
		perf_evsel__set_sample_bit(evsel, CPU);
625 626
	}

627
	if (opts->sample_address)
628
		perf_evsel__set_sample_bit(evsel, DATA_SRC);
629

630 631 632 633
	if (opts->no_delay) {
		attr->watermark = 0;
		attr->wakeup_events = 1;
	}
634
	if (opts->branch_stack) {
635
		perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
636 637
		attr->branch_sample_type = opts->branch_stack;
	}
638

639
	if (opts->sample_weight)
640
		perf_evsel__set_sample_bit(evsel, WEIGHT);
641

642 643
	attr->mmap  = track;
	attr->comm  = track;
644

645
	if (opts->sample_transaction)
646
		perf_evsel__set_sample_bit(evsel, TRANSACTION);
647

648 649 650 651 652 653
	/*
	 * XXX see the function comment above
	 *
	 * Disabling only independent events or group leaders,
	 * keeping group members enabled.
	 */
654
	if (perf_evsel__is_group_leader(evsel))
655 656 657 658 659 660
		attr->disabled = 1;

	/*
	 * Setting enable_on_exec for independent events and
	 * group leaders for traced executed by perf.
	 */
661
	if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel))
662 663 664
		attr->enable_on_exec = 1;
}

665 666
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
{
667
	int cpu, thread;
668
	evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
669 670 671 672 673 674 675 676 677

	if (evsel->fd) {
		for (cpu = 0; cpu < ncpus; cpu++) {
			for (thread = 0; thread < nthreads; thread++) {
				FD(evsel, cpu, thread) = -1;
			}
		}
	}

678 679 680
	return evsel->fd != NULL ? 0 : -ENOMEM;
}

681 682
static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthreads,
			  int ioc,  void *arg)
683 684 685 686 687 688
{
	int cpu, thread;

	for (cpu = 0; cpu < ncpus; cpu++) {
		for (thread = 0; thread < nthreads; thread++) {
			int fd = FD(evsel, cpu, thread),
689
			    err = ioctl(fd, ioc, arg);
690 691 692 693 694 695 696 697 698

			if (err)
				return err;
		}
	}

	return 0;
}

699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
			   const char *filter)
{
	return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
				     PERF_EVENT_IOC_SET_FILTER,
				     (void *)filter);
}

int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads)
{
	return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
				     PERF_EVENT_IOC_ENABLE,
				     0);
}

714 715
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
{
716 717 718 719 720 721 722 723 724 725 726 727
	evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
	if (evsel->sample_id == NULL)
		return -ENOMEM;

	evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
	if (evsel->id == NULL) {
		xyarray__delete(evsel->sample_id);
		evsel->sample_id = NULL;
		return -ENOMEM;
	}

	return 0;
728 729
}

730 731 732 733 734 735
void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus)
{
	memset(evsel->counts, 0, (sizeof(*evsel->counts) +
				 (ncpus * sizeof(struct perf_counts_values))));
}

736 737 738 739 740 741 742
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
{
	evsel->counts = zalloc((sizeof(*evsel->counts) +
				(ncpus * sizeof(struct perf_counts_values))));
	return evsel->counts != NULL ? 0 : -ENOMEM;
}

743 744 745 746 747 748
void perf_evsel__free_fd(struct perf_evsel *evsel)
{
	xyarray__delete(evsel->fd);
	evsel->fd = NULL;
}

749 750
void perf_evsel__free_id(struct perf_evsel *evsel)
{
751 752
	xyarray__delete(evsel->sample_id);
	evsel->sample_id = NULL;
753
	zfree(&evsel->id);
754 755
}

756 757 758 759 760 761 762 763 764 765 766
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
{
	int cpu, thread;

	for (cpu = 0; cpu < ncpus; cpu++)
		for (thread = 0; thread < nthreads; ++thread) {
			close(FD(evsel, cpu, thread));
			FD(evsel, cpu, thread) = -1;
		}
}

767 768
void perf_evsel__free_counts(struct perf_evsel *evsel)
{
769
	zfree(&evsel->counts);
770 771
}

772
void perf_evsel__exit(struct perf_evsel *evsel)
773 774
{
	assert(list_empty(&evsel->node));
775 776
	perf_evsel__free_fd(evsel);
	perf_evsel__free_id(evsel);
777 778 779 780 781
}

void perf_evsel__delete(struct perf_evsel *evsel)
{
	perf_evsel__exit(evsel);
S
Stephane Eranian 已提交
782
	close_cgroup(evsel->cgrp);
783
	zfree(&evsel->group_name);
784
	if (evsel->tp_format)
785
		pevent_free_format(evsel->tp_format);
786
	zfree(&evsel->name);
787 788
	free(evsel);
}
789

790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811
static inline void compute_deltas(struct perf_evsel *evsel,
				  int cpu,
				  struct perf_counts_values *count)
{
	struct perf_counts_values tmp;

	if (!evsel->prev_raw_counts)
		return;

	if (cpu == -1) {
		tmp = evsel->prev_raw_counts->aggr;
		evsel->prev_raw_counts->aggr = *count;
	} else {
		tmp = evsel->prev_raw_counts->cpu[cpu];
		evsel->prev_raw_counts->cpu[cpu] = *count;
	}

	count->val = count->val - tmp.val;
	count->ena = count->ena - tmp.ena;
	count->run = count->run - tmp.run;
}

812 813 814 815 816 817 818 819 820
int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
			      int cpu, int thread, bool scale)
{
	struct perf_counts_values count;
	size_t nv = scale ? 3 : 1;

	if (FD(evsel, cpu, thread) < 0)
		return -EINVAL;

821 822 823
	if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
		return -ENOMEM;

824 825 826
	if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
		return -errno;

827 828
	compute_deltas(evsel, cpu, &count);

829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847
	if (scale) {
		if (count.run == 0)
			count.val = 0;
		else if (count.run < count.ena)
			count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
	} else
		count.ena = count.run = 0;

	evsel->counts->cpu[cpu] = count;
	return 0;
}

int __perf_evsel__read(struct perf_evsel *evsel,
		       int ncpus, int nthreads, bool scale)
{
	size_t nv = scale ? 3 : 1;
	int cpu, thread;
	struct perf_counts_values *aggr = &evsel->counts->aggr, count;

848
	aggr->val = aggr->ena = aggr->run = 0;
849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866

	for (cpu = 0; cpu < ncpus; cpu++) {
		for (thread = 0; thread < nthreads; thread++) {
			if (FD(evsel, cpu, thread) < 0)
				continue;

			if (readn(FD(evsel, cpu, thread),
				  &count, nv * sizeof(u64)) < 0)
				return -errno;

			aggr->val += count.val;
			if (scale) {
				aggr->ena += count.ena;
				aggr->run += count.run;
			}
		}
	}

867 868
	compute_deltas(evsel, -1, aggr);

869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885
	evsel->counts->scaled = 0;
	if (scale) {
		if (aggr->run == 0) {
			evsel->counts->scaled = -1;
			aggr->val = 0;
			return 0;
		}

		if (aggr->run < aggr->ena) {
			evsel->counts->scaled = 1;
			aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
		}
	} else
		aggr->ena = aggr->run = 0;

	return 0;
}
886

887 888 889 890 891
static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
{
	struct perf_evsel *leader = evsel->leader;
	int fd;

892
	if (perf_evsel__is_group_leader(evsel))
893 894 895 896 897 898 899 900 901 902 903 904 905 906
		return -1;

	/*
	 * Leader must be already processed/open,
	 * if not it's a bug.
	 */
	BUG_ON(!leader->fd);

	fd = FD(leader, cpu, thread);
	BUG_ON(fd == -1);

	return fd;
}

A
Adrian Hunter 已提交
907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948
#define __PRINT_ATTR(fmt, cast, field)  \
	fprintf(fp, "  %-19s "fmt"\n", #field, cast attr->field)

#define PRINT_ATTR_U32(field)  __PRINT_ATTR("%u" , , field)
#define PRINT_ATTR_X32(field)  __PRINT_ATTR("%#x", , field)
#define PRINT_ATTR_U64(field)  __PRINT_ATTR("%" PRIu64, (uint64_t), field)
#define PRINT_ATTR_X64(field)  __PRINT_ATTR("%#"PRIx64, (uint64_t), field)

#define PRINT_ATTR2N(name1, field1, name2, field2)	\
	fprintf(fp, "  %-19s %u    %-19s %u\n",		\
	name1, attr->field1, name2, attr->field2)

#define PRINT_ATTR2(field1, field2) \
	PRINT_ATTR2N(#field1, field1, #field2, field2)

static size_t perf_event_attr__fprintf(struct perf_event_attr *attr, FILE *fp)
{
	size_t ret = 0;

	ret += fprintf(fp, "%.60s\n", graph_dotted_line);
	ret += fprintf(fp, "perf_event_attr:\n");

	ret += PRINT_ATTR_U32(type);
	ret += PRINT_ATTR_U32(size);
	ret += PRINT_ATTR_X64(config);
	ret += PRINT_ATTR_U64(sample_period);
	ret += PRINT_ATTR_U64(sample_freq);
	ret += PRINT_ATTR_X64(sample_type);
	ret += PRINT_ATTR_X64(read_format);

	ret += PRINT_ATTR2(disabled, inherit);
	ret += PRINT_ATTR2(pinned, exclusive);
	ret += PRINT_ATTR2(exclude_user, exclude_kernel);
	ret += PRINT_ATTR2(exclude_hv, exclude_idle);
	ret += PRINT_ATTR2(mmap, comm);
	ret += PRINT_ATTR2(freq, inherit_stat);
	ret += PRINT_ATTR2(enable_on_exec, task);
	ret += PRINT_ATTR2(watermark, precise_ip);
	ret += PRINT_ATTR2(mmap_data, sample_id_all);
	ret += PRINT_ATTR2(exclude_host, exclude_guest);
	ret += PRINT_ATTR2N("excl.callchain_kern", exclude_callchain_kernel,
			    "excl.callchain_user", exclude_callchain_user);
949
	ret += PRINT_ATTR_U32(mmap2);
A
Adrian Hunter 已提交
950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966

	ret += PRINT_ATTR_U32(wakeup_events);
	ret += PRINT_ATTR_U32(wakeup_watermark);
	ret += PRINT_ATTR_X32(bp_type);
	ret += PRINT_ATTR_X64(bp_addr);
	ret += PRINT_ATTR_X64(config1);
	ret += PRINT_ATTR_U64(bp_len);
	ret += PRINT_ATTR_X64(config2);
	ret += PRINT_ATTR_X64(branch_sample_type);
	ret += PRINT_ATTR_X64(sample_regs_user);
	ret += PRINT_ATTR_U32(sample_stack_user);

	ret += fprintf(fp, "%.60s\n", graph_dotted_line);

	return ret;
}

967
static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
968
			      struct thread_map *threads)
969
{
970
	int cpu, thread;
S
Stephane Eranian 已提交
971
	unsigned long flags = 0;
972
	int pid = -1, err;
973
	enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
974

975 976
	if (evsel->fd == NULL &&
	    perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
977
		return -ENOMEM;
978

S
Stephane Eranian 已提交
979 980 981 982 983
	if (evsel->cgrp) {
		flags = PERF_FLAG_PID_CGROUP;
		pid = evsel->cgrp->fd;
	}

984
fallback_missing_features:
985 986
	if (perf_missing_features.mmap2)
		evsel->attr.mmap2 = 0;
987 988 989 990 991 992
	if (perf_missing_features.exclude_guest)
		evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
retry_sample_id:
	if (perf_missing_features.sample_id_all)
		evsel->attr.sample_id_all = 0;

A
Adrian Hunter 已提交
993 994 995
	if (verbose >= 2)
		perf_event_attr__fprintf(&evsel->attr, stderr);

996
	for (cpu = 0; cpu < cpus->nr; cpu++) {
997

998
		for (thread = 0; thread < threads->nr; thread++) {
999
			int group_fd;
S
Stephane Eranian 已提交
1000 1001 1002 1003

			if (!evsel->cgrp)
				pid = threads->map[thread];

1004
			group_fd = get_group_fd(evsel, cpu, thread);
1005
retry_open:
A
Adrian Hunter 已提交
1006 1007 1008
			pr_debug2("perf_event_open: pid %d  cpu %d  group_fd %d  flags %#lx\n",
				  pid, cpus->map[cpu], group_fd, flags);

1009
			FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
S
Stephane Eranian 已提交
1010
								     pid,
1011
								     cpus->map[cpu],
S
Stephane Eranian 已提交
1012
								     group_fd, flags);
1013 1014
			if (FD(evsel, cpu, thread) < 0) {
				err = -errno;
1015 1016
				pr_debug2("perf_event_open failed, error %d\n",
					  err);
1017
				goto try_fallback;
1018
			}
1019
			set_rlimit = NO_CHANGE;
1020
		}
1021 1022 1023 1024
	}

	return 0;

1025
try_fallback:
1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
	/*
	 * perf stat needs between 5 and 22 fds per CPU. When we run out
	 * of them try to increase the limits.
	 */
	if (err == -EMFILE && set_rlimit < INCREASED_MAX) {
		struct rlimit l;
		int old_errno = errno;

		if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
			if (set_rlimit == NO_CHANGE)
				l.rlim_cur = l.rlim_max;
			else {
				l.rlim_cur = l.rlim_max + 1000;
				l.rlim_max = l.rlim_cur;
			}
			if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
				set_rlimit++;
				errno = old_errno;
				goto retry_open;
			}
		}
		errno = old_errno;
	}

1050 1051 1052
	if (err != -EINVAL || cpu > 0 || thread > 0)
		goto out_close;

1053 1054 1055 1056 1057
	if (!perf_missing_features.mmap2 && evsel->attr.mmap2) {
		perf_missing_features.mmap2 = true;
		goto fallback_missing_features;
	} else if (!perf_missing_features.exclude_guest &&
		   (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
1058 1059 1060 1061 1062 1063 1064
		perf_missing_features.exclude_guest = true;
		goto fallback_missing_features;
	} else if (!perf_missing_features.sample_id_all) {
		perf_missing_features.sample_id_all = true;
		goto retry_sample_id;
	}

1065
out_close:
1066 1067 1068 1069 1070 1071 1072
	do {
		while (--thread >= 0) {
			close(FD(evsel, cpu, thread));
			FD(evsel, cpu, thread) = -1;
		}
		thread = threads->nr;
	} while (--cpu >= 0);
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
	return err;
}

void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
{
	if (evsel->fd == NULL)
		return;

	perf_evsel__close_fd(evsel, ncpus, nthreads);
	perf_evsel__free_fd(evsel);
	evsel->fd = NULL;
1084 1085
}

1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
static struct {
	struct cpu_map map;
	int cpus[1];
} empty_cpu_map = {
	.map.nr	= 1,
	.cpus	= { -1, },
};

static struct {
	struct thread_map map;
	int threads[1];
} empty_thread_map = {
	.map.nr	 = 1,
	.threads = { -1, },
};

1102
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
1103
		     struct thread_map *threads)
1104
{
1105 1106 1107
	if (cpus == NULL) {
		/* Work around old compiler warnings about strict aliasing */
		cpus = &empty_cpu_map.map;
1108 1109
	}

1110 1111
	if (threads == NULL)
		threads = &empty_thread_map.map;
1112

1113
	return __perf_evsel__open(evsel, cpus, threads);
1114 1115
}

1116
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
1117
			     struct cpu_map *cpus)
1118
{
1119
	return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
1120
}
1121

1122
int perf_evsel__open_per_thread(struct perf_evsel *evsel,
1123
				struct thread_map *threads)
1124
{
1125
	return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
1126
}
1127

1128 1129 1130
static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
				       const union perf_event *event,
				       struct perf_sample *sample)
1131
{
1132
	u64 type = evsel->attr.sample_type;
1133
	const u64 *array = event->sample.array;
1134
	bool swapped = evsel->needs_swap;
1135
	union u64_swap u;
1136 1137 1138 1139

	array += ((event->header.size -
		   sizeof(event->header)) / sizeof(u64)) - 1;

1140 1141 1142 1143 1144
	if (type & PERF_SAMPLE_IDENTIFIER) {
		sample->id = *array;
		array--;
	}

1145
	if (type & PERF_SAMPLE_CPU) {
1146 1147 1148 1149 1150 1151 1152 1153
		u.val64 = *array;
		if (swapped) {
			/* undo swap of u64, then swap on individual u32s */
			u.val64 = bswap_64(u.val64);
			u.val32[0] = bswap_32(u.val32[0]);
		}

		sample->cpu = u.val32[0];
1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172
		array--;
	}

	if (type & PERF_SAMPLE_STREAM_ID) {
		sample->stream_id = *array;
		array--;
	}

	if (type & PERF_SAMPLE_ID) {
		sample->id = *array;
		array--;
	}

	if (type & PERF_SAMPLE_TIME) {
		sample->time = *array;
		array--;
	}

	if (type & PERF_SAMPLE_TID) {
1173 1174 1175 1176 1177 1178 1179 1180 1181 1182
		u.val64 = *array;
		if (swapped) {
			/* undo swap of u64, then swap on individual u32s */
			u.val64 = bswap_64(u.val64);
			u.val32[0] = bswap_32(u.val32[0]);
			u.val32[1] = bswap_32(u.val32[1]);
		}

		sample->pid = u.val32[0];
		sample->tid = u.val32[1];
1183
		array--;
1184 1185 1186 1187 1188
	}

	return 0;
}

1189 1190
static inline bool overflow(const void *endp, u16 max_size, const void *offset,
			    u64 size)
1191
{
1192 1193
	return size > max_size || offset + size > endp;
}
1194

1195 1196 1197 1198 1199
#define OVERFLOW_CHECK(offset, size, max_size)				\
	do {								\
		if (overflow(endp, (max_size), (offset), (size)))	\
			return -EFAULT;					\
	} while (0)
1200

1201 1202
#define OVERFLOW_CHECK_u64(offset) \
	OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
1203

1204
int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
1205
			     struct perf_sample *data)
1206
{
1207
	u64 type = evsel->attr.sample_type;
1208
	bool swapped = evsel->needs_swap;
1209
	const u64 *array;
1210 1211 1212
	u16 max_size = event->header.size;
	const void *endp = (void *)event + max_size;
	u64 sz;
1213

1214 1215 1216 1217
	/*
	 * used for cross-endian analysis. See git commit 65014ab3
	 * for why this goofiness is needed.
	 */
1218
	union u64_swap u;
1219

1220
	memset(data, 0, sizeof(*data));
1221 1222
	data->cpu = data->pid = data->tid = -1;
	data->stream_id = data->id = data->time = -1ULL;
1223
	data->period = 1;
1224
	data->weight = 0;
1225 1226

	if (event->header.type != PERF_RECORD_SAMPLE) {
1227
		if (!evsel->attr.sample_id_all)
1228
			return 0;
1229
		return perf_evsel__parse_id_sample(evsel, event, data);
1230 1231 1232 1233
	}

	array = event->sample.array;

1234 1235 1236 1237 1238
	/*
	 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
	 * up to PERF_SAMPLE_PERIOD.  After that overflow() must be used to
	 * check the format does not go past the end of the event.
	 */
1239
	if (evsel->sample_size + sizeof(event->header) > event->header.size)
1240 1241
		return -EFAULT;

1242 1243 1244 1245 1246 1247
	data->id = -1ULL;
	if (type & PERF_SAMPLE_IDENTIFIER) {
		data->id = *array;
		array++;
	}

1248
	if (type & PERF_SAMPLE_IP) {
1249
		data->ip = *array;
1250 1251 1252 1253
		array++;
	}

	if (type & PERF_SAMPLE_TID) {
1254 1255 1256 1257 1258 1259 1260 1261 1262 1263
		u.val64 = *array;
		if (swapped) {
			/* undo swap of u64, then swap on individual u32s */
			u.val64 = bswap_64(u.val64);
			u.val32[0] = bswap_32(u.val32[0]);
			u.val32[1] = bswap_32(u.val32[1]);
		}

		data->pid = u.val32[0];
		data->tid = u.val32[1];
1264 1265 1266 1267 1268 1269 1270 1271
		array++;
	}

	if (type & PERF_SAMPLE_TIME) {
		data->time = *array;
		array++;
	}

1272
	data->addr = 0;
1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288
	if (type & PERF_SAMPLE_ADDR) {
		data->addr = *array;
		array++;
	}

	if (type & PERF_SAMPLE_ID) {
		data->id = *array;
		array++;
	}

	if (type & PERF_SAMPLE_STREAM_ID) {
		data->stream_id = *array;
		array++;
	}

	if (type & PERF_SAMPLE_CPU) {
1289 1290 1291 1292 1293 1294 1295 1296 1297

		u.val64 = *array;
		if (swapped) {
			/* undo swap of u64, then swap on individual u32s */
			u.val64 = bswap_64(u.val64);
			u.val32[0] = bswap_32(u.val32[0]);
		}

		data->cpu = u.val32[0];
1298 1299 1300 1301 1302 1303 1304 1305 1306
		array++;
	}

	if (type & PERF_SAMPLE_PERIOD) {
		data->period = *array;
		array++;
	}

	if (type & PERF_SAMPLE_READ) {
1307 1308
		u64 read_format = evsel->attr.read_format;

1309
		OVERFLOW_CHECK_u64(array);
1310 1311 1312 1313 1314 1315 1316 1317
		if (read_format & PERF_FORMAT_GROUP)
			data->read.group.nr = *array;
		else
			data->read.one.value = *array;

		array++;

		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1318
			OVERFLOW_CHECK_u64(array);
1319 1320 1321 1322 1323
			data->read.time_enabled = *array;
			array++;
		}

		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1324
			OVERFLOW_CHECK_u64(array);
1325 1326 1327 1328 1329 1330
			data->read.time_running = *array;
			array++;
		}

		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
		if (read_format & PERF_FORMAT_GROUP) {
1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341
			const u64 max_group_nr = UINT64_MAX /
					sizeof(struct sample_read_value);

			if (data->read.group.nr > max_group_nr)
				return -EFAULT;
			sz = data->read.group.nr *
			     sizeof(struct sample_read_value);
			OVERFLOW_CHECK(array, sz, max_size);
			data->read.group.values =
					(struct sample_read_value *)array;
			array = (void *)array + sz;
1342
		} else {
1343
			OVERFLOW_CHECK_u64(array);
1344 1345 1346
			data->read.one.id = *array;
			array++;
		}
1347 1348 1349
	}

	if (type & PERF_SAMPLE_CALLCHAIN) {
1350
		const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
1351

1352 1353 1354
		OVERFLOW_CHECK_u64(array);
		data->callchain = (struct ip_callchain *)array++;
		if (data->callchain->nr > max_callchain_nr)
1355
			return -EFAULT;
1356 1357 1358
		sz = data->callchain->nr * sizeof(u64);
		OVERFLOW_CHECK(array, sz, max_size);
		array = (void *)array + sz;
1359 1360 1361
	}

	if (type & PERF_SAMPLE_RAW) {
1362
		OVERFLOW_CHECK_u64(array);
1363 1364 1365 1366 1367 1368 1369 1370 1371
		u.val64 = *array;
		if (WARN_ONCE(swapped,
			      "Endianness of raw data not corrected!\n")) {
			/* undo swap of u64, then swap on individual u32s */
			u.val64 = bswap_64(u.val64);
			u.val32[0] = bswap_32(u.val32[0]);
			u.val32[1] = bswap_32(u.val32[1]);
		}
		data->raw_size = u.val32[0];
1372
		array = (void *)array + sizeof(u32);
1373

1374 1375 1376
		OVERFLOW_CHECK(array, data->raw_size, max_size);
		data->raw_data = (void *)array;
		array = (void *)array + data->raw_size;
1377 1378
	}

1379
	if (type & PERF_SAMPLE_BRANCH_STACK) {
1380 1381
		const u64 max_branch_nr = UINT64_MAX /
					  sizeof(struct branch_entry);
1382

1383 1384
		OVERFLOW_CHECK_u64(array);
		data->branch_stack = (struct branch_stack *)array++;
1385

1386 1387
		if (data->branch_stack->nr > max_branch_nr)
			return -EFAULT;
1388
		sz = data->branch_stack->nr * sizeof(struct branch_entry);
1389 1390
		OVERFLOW_CHECK(array, sz, max_size);
		array = (void *)array + sz;
1391
	}
1392 1393

	if (type & PERF_SAMPLE_REGS_USER) {
1394
		OVERFLOW_CHECK_u64(array);
1395 1396
		data->user_regs.abi = *array;
		array++;
1397

1398
		if (data->user_regs.abi) {
1399 1400 1401 1402
			u64 regs_user = evsel->attr.sample_regs_user;

			sz = hweight_long(regs_user) * sizeof(u64);
			OVERFLOW_CHECK(array, sz, max_size);
1403
			data->user_regs.regs = (u64 *)array;
1404
			array = (void *)array + sz;
1405 1406 1407 1408
		}
	}

	if (type & PERF_SAMPLE_STACK_USER) {
1409 1410
		OVERFLOW_CHECK_u64(array);
		sz = *array++;
1411 1412 1413 1414

		data->user_stack.offset = ((char *)(array - 1)
					  - (char *) event);

1415
		if (!sz) {
1416 1417
			data->user_stack.size = 0;
		} else {
1418
			OVERFLOW_CHECK(array, sz, max_size);
1419
			data->user_stack.data = (char *)array;
1420 1421
			array = (void *)array + sz;
			OVERFLOW_CHECK_u64(array);
1422
			data->user_stack.size = *array++;
1423 1424 1425
			if (WARN_ONCE(data->user_stack.size > sz,
				      "user stack dump failure\n"))
				return -EFAULT;
1426 1427 1428
		}
	}

1429 1430
	data->weight = 0;
	if (type & PERF_SAMPLE_WEIGHT) {
1431
		OVERFLOW_CHECK_u64(array);
1432 1433 1434 1435
		data->weight = *array;
		array++;
	}

1436 1437
	data->data_src = PERF_MEM_DATA_SRC_NONE;
	if (type & PERF_SAMPLE_DATA_SRC) {
1438
		OVERFLOW_CHECK_u64(array);
1439 1440 1441 1442
		data->data_src = *array;
		array++;
	}

1443 1444
	data->transaction = 0;
	if (type & PERF_SAMPLE_TRANSACTION) {
1445
		OVERFLOW_CHECK_u64(array);
1446 1447 1448 1449
		data->transaction = *array;
		array++;
	}

1450 1451
	return 0;
}
1452

1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541
size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
				     u64 sample_regs_user, u64 read_format)
{
	size_t sz, result = sizeof(struct sample_event);

	if (type & PERF_SAMPLE_IDENTIFIER)
		result += sizeof(u64);

	if (type & PERF_SAMPLE_IP)
		result += sizeof(u64);

	if (type & PERF_SAMPLE_TID)
		result += sizeof(u64);

	if (type & PERF_SAMPLE_TIME)
		result += sizeof(u64);

	if (type & PERF_SAMPLE_ADDR)
		result += sizeof(u64);

	if (type & PERF_SAMPLE_ID)
		result += sizeof(u64);

	if (type & PERF_SAMPLE_STREAM_ID)
		result += sizeof(u64);

	if (type & PERF_SAMPLE_CPU)
		result += sizeof(u64);

	if (type & PERF_SAMPLE_PERIOD)
		result += sizeof(u64);

	if (type & PERF_SAMPLE_READ) {
		result += sizeof(u64);
		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
			result += sizeof(u64);
		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
			result += sizeof(u64);
		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
		if (read_format & PERF_FORMAT_GROUP) {
			sz = sample->read.group.nr *
			     sizeof(struct sample_read_value);
			result += sz;
		} else {
			result += sizeof(u64);
		}
	}

	if (type & PERF_SAMPLE_CALLCHAIN) {
		sz = (sample->callchain->nr + 1) * sizeof(u64);
		result += sz;
	}

	if (type & PERF_SAMPLE_RAW) {
		result += sizeof(u32);
		result += sample->raw_size;
	}

	if (type & PERF_SAMPLE_BRANCH_STACK) {
		sz = sample->branch_stack->nr * sizeof(struct branch_entry);
		sz += sizeof(u64);
		result += sz;
	}

	if (type & PERF_SAMPLE_REGS_USER) {
		if (sample->user_regs.abi) {
			result += sizeof(u64);
			sz = hweight_long(sample_regs_user) * sizeof(u64);
			result += sz;
		} else {
			result += sizeof(u64);
		}
	}

	if (type & PERF_SAMPLE_STACK_USER) {
		sz = sample->user_stack.size;
		result += sizeof(u64);
		if (sz) {
			result += sz;
			result += sizeof(u64);
		}
	}

	if (type & PERF_SAMPLE_WEIGHT)
		result += sizeof(u64);

	if (type & PERF_SAMPLE_DATA_SRC)
		result += sizeof(u64);

1542 1543 1544
	if (type & PERF_SAMPLE_TRANSACTION)
		result += sizeof(u64);

1545 1546 1547
	return result;
}

1548
int perf_event__synthesize_sample(union perf_event *event, u64 type,
1549
				  u64 sample_regs_user, u64 read_format,
1550 1551 1552 1553
				  const struct perf_sample *sample,
				  bool swapped)
{
	u64 *array;
1554
	size_t sz;
1555 1556 1557 1558
	/*
	 * used for cross-endian analysis. See git commit 65014ab3
	 * for why this goofiness is needed.
	 */
1559
	union u64_swap u;
1560 1561 1562

	array = event->sample.array;

1563 1564 1565 1566 1567
	if (type & PERF_SAMPLE_IDENTIFIER) {
		*array = sample->id;
		array++;
	}

1568
	if (type & PERF_SAMPLE_IP) {
1569
		*array = sample->ip;
1570 1571 1572 1573 1574 1575 1576 1577
		array++;
	}

	if (type & PERF_SAMPLE_TID) {
		u.val32[0] = sample->pid;
		u.val32[1] = sample->tid;
		if (swapped) {
			/*
1578
			 * Inverse of what is done in perf_evsel__parse_sample
1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612
			 */
			u.val32[0] = bswap_32(u.val32[0]);
			u.val32[1] = bswap_32(u.val32[1]);
			u.val64 = bswap_64(u.val64);
		}

		*array = u.val64;
		array++;
	}

	if (type & PERF_SAMPLE_TIME) {
		*array = sample->time;
		array++;
	}

	if (type & PERF_SAMPLE_ADDR) {
		*array = sample->addr;
		array++;
	}

	if (type & PERF_SAMPLE_ID) {
		*array = sample->id;
		array++;
	}

	if (type & PERF_SAMPLE_STREAM_ID) {
		*array = sample->stream_id;
		array++;
	}

	if (type & PERF_SAMPLE_CPU) {
		u.val32[0] = sample->cpu;
		if (swapped) {
			/*
1613
			 * Inverse of what is done in perf_evsel__parse_sample
1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626
			 */
			u.val32[0] = bswap_32(u.val32[0]);
			u.val64 = bswap_64(u.val64);
		}
		*array = u.val64;
		array++;
	}

	if (type & PERF_SAMPLE_PERIOD) {
		*array = sample->period;
		array++;
	}

1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717
	if (type & PERF_SAMPLE_READ) {
		if (read_format & PERF_FORMAT_GROUP)
			*array = sample->read.group.nr;
		else
			*array = sample->read.one.value;
		array++;

		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
			*array = sample->read.time_enabled;
			array++;
		}

		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
			*array = sample->read.time_running;
			array++;
		}

		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
		if (read_format & PERF_FORMAT_GROUP) {
			sz = sample->read.group.nr *
			     sizeof(struct sample_read_value);
			memcpy(array, sample->read.group.values, sz);
			array = (void *)array + sz;
		} else {
			*array = sample->read.one.id;
			array++;
		}
	}

	if (type & PERF_SAMPLE_CALLCHAIN) {
		sz = (sample->callchain->nr + 1) * sizeof(u64);
		memcpy(array, sample->callchain, sz);
		array = (void *)array + sz;
	}

	if (type & PERF_SAMPLE_RAW) {
		u.val32[0] = sample->raw_size;
		if (WARN_ONCE(swapped,
			      "Endianness of raw data not corrected!\n")) {
			/*
			 * Inverse of what is done in perf_evsel__parse_sample
			 */
			u.val32[0] = bswap_32(u.val32[0]);
			u.val32[1] = bswap_32(u.val32[1]);
			u.val64 = bswap_64(u.val64);
		}
		*array = u.val64;
		array = (void *)array + sizeof(u32);

		memcpy(array, sample->raw_data, sample->raw_size);
		array = (void *)array + sample->raw_size;
	}

	if (type & PERF_SAMPLE_BRANCH_STACK) {
		sz = sample->branch_stack->nr * sizeof(struct branch_entry);
		sz += sizeof(u64);
		memcpy(array, sample->branch_stack, sz);
		array = (void *)array + sz;
	}

	if (type & PERF_SAMPLE_REGS_USER) {
		if (sample->user_regs.abi) {
			*array++ = sample->user_regs.abi;
			sz = hweight_long(sample_regs_user) * sizeof(u64);
			memcpy(array, sample->user_regs.regs, sz);
			array = (void *)array + sz;
		} else {
			*array++ = 0;
		}
	}

	if (type & PERF_SAMPLE_STACK_USER) {
		sz = sample->user_stack.size;
		*array++ = sz;
		if (sz) {
			memcpy(array, sample->user_stack.data, sz);
			array = (void *)array + sz;
			*array++ = sz;
		}
	}

	if (type & PERF_SAMPLE_WEIGHT) {
		*array = sample->weight;
		array++;
	}

	if (type & PERF_SAMPLE_DATA_SRC) {
		*array = sample->data_src;
		array++;
	}

1718 1719 1720 1721 1722
	if (type & PERF_SAMPLE_TRANSACTION) {
		*array = sample->transaction;
		array++;
	}

1723 1724
	return 0;
}
1725

1726 1727 1728 1729 1730
struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
{
	return pevent_find_field(evsel->tp_format, name);
}

1731
void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
1732 1733
			 const char *name)
{
1734
	struct format_field *field = perf_evsel__field(evsel, name);
1735 1736
	int offset;

1737 1738
	if (!field)
		return NULL;
1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752

	offset = field->offset;

	if (field->flags & FIELD_IS_DYNAMIC) {
		offset = *(int *)(sample->raw_data + field->offset);
		offset &= 0xffff;
	}

	return sample->raw_data + offset;
}

u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
		       const char *name)
{
1753
	struct format_field *field = perf_evsel__field(evsel, name);
1754 1755
	void *ptr;
	u64 value;
1756

1757 1758
	if (!field)
		return 0;
1759

1760
	ptr = sample->raw_data + field->offset;
1761

1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792
	switch (field->size) {
	case 1:
		return *(u8 *)ptr;
	case 2:
		value = *(u16 *)ptr;
		break;
	case 4:
		value = *(u32 *)ptr;
		break;
	case 8:
		value = *(u64 *)ptr;
		break;
	default:
		return 0;
	}

	if (!evsel->needs_swap)
		return value;

	switch (field->size) {
	case 2:
		return bswap_16(value);
	case 4:
		return bswap_32(value);
	case 8:
		return bswap_64(value);
	default:
		return 0;
	}

	return 0;
1793
}
1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822

static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
{
	va_list args;
	int ret = 0;

	if (!*first) {
		ret += fprintf(fp, ",");
	} else {
		ret += fprintf(fp, ":");
		*first = false;
	}

	va_start(args, fmt);
	ret += vfprintf(fp, fmt, args);
	va_end(args);
	return ret;
}

static int __if_fprintf(FILE *fp, bool *first, const char *field, u64 value)
{
	if (value == 0)
		return 0;

	return comma_fprintf(fp, first, " %s: %" PRIu64, field, value);
}

#define if_print(field) printed += __if_fprintf(fp, &first, #field, evsel->attr.field)

1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851
struct bit_names {
	int bit;
	const char *name;
};

static int bits__fprintf(FILE *fp, const char *field, u64 value,
			 struct bit_names *bits, bool *first)
{
	int i = 0, printed = comma_fprintf(fp, first, " %s: ", field);
	bool first_bit = true;

	do {
		if (value & bits[i].bit) {
			printed += fprintf(fp, "%s%s", first_bit ? "" : "|", bits[i].name);
			first_bit = false;
		}
	} while (bits[++i].name != NULL);

	return printed;
}

static int sample_type__fprintf(FILE *fp, bool *first, u64 value)
{
#define bit_name(n) { PERF_SAMPLE_##n, #n }
	struct bit_names bits[] = {
		bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
		bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
		bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
		bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
1852
		bit_name(IDENTIFIER),
1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870
		{ .name = NULL, }
	};
#undef bit_name
	return bits__fprintf(fp, "sample_type", value, bits, first);
}

static int read_format__fprintf(FILE *fp, bool *first, u64 value)
{
#define bit_name(n) { PERF_FORMAT_##n, #n }
	struct bit_names bits[] = {
		bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
		bit_name(ID), bit_name(GROUP),
		{ .name = NULL, }
	};
#undef bit_name
	return bits__fprintf(fp, "read_format", value, bits, first);
}

1871 1872 1873 1874
int perf_evsel__fprintf(struct perf_evsel *evsel,
			struct perf_attr_details *details, FILE *fp)
{
	bool first = true;
N
Namhyung Kim 已提交
1875 1876
	int printed = 0;

1877
	if (details->event_group) {
N
Namhyung Kim 已提交
1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895
		struct perf_evsel *pos;

		if (!perf_evsel__is_group_leader(evsel))
			return 0;

		if (evsel->nr_members > 1)
			printed += fprintf(fp, "%s{", evsel->group_name ?: "");

		printed += fprintf(fp, "%s", perf_evsel__name(evsel));
		for_each_group_member(pos, evsel)
			printed += fprintf(fp, ",%s", perf_evsel__name(pos));

		if (evsel->nr_members > 1)
			printed += fprintf(fp, "}");
		goto out;
	}

	printed += fprintf(fp, "%s", perf_evsel__name(evsel));
1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907

	if (details->verbose || details->freq) {
		printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64,
					 (u64)evsel->attr.sample_freq);
	}

	if (details->verbose) {
		if_print(type);
		if_print(config);
		if_print(config1);
		if_print(config2);
		if_print(size);
1908 1909 1910
		printed += sample_type__fprintf(fp, &first, evsel->attr.sample_type);
		if (evsel->attr.read_format)
			printed += read_format__fprintf(fp, &first, evsel->attr.read_format);
1911 1912 1913 1914 1915 1916 1917 1918 1919
		if_print(disabled);
		if_print(inherit);
		if_print(pinned);
		if_print(exclusive);
		if_print(exclude_user);
		if_print(exclude_kernel);
		if_print(exclude_hv);
		if_print(exclude_idle);
		if_print(mmap);
1920
		if_print(mmap2);
1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936
		if_print(comm);
		if_print(freq);
		if_print(inherit_stat);
		if_print(enable_on_exec);
		if_print(task);
		if_print(watermark);
		if_print(precise_ip);
		if_print(mmap_data);
		if_print(sample_id_all);
		if_print(exclude_host);
		if_print(exclude_guest);
		if_print(__reserved_1);
		if_print(wakeup_events);
		if_print(bp_type);
		if_print(branch_sample_type);
	}
N
Namhyung Kim 已提交
1937
out:
1938 1939 1940
	fputc('\n', fp);
	return ++printed;
}
1941 1942 1943 1944

bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
			  char *msg, size_t msgsize)
{
1945
	if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961
	    evsel->attr.type   == PERF_TYPE_HARDWARE &&
	    evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
		/*
		 * If it's cycles then fall back to hrtimer based
		 * cpu-clock-tick sw counter, which is always available even if
		 * no PMU support.
		 *
		 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
		 * b0a873e).
		 */
		scnprintf(msg, msgsize, "%s",
"The cycles event is not supported, trying to fall back to cpu-clock-ticks");

		evsel->attr.type   = PERF_TYPE_SOFTWARE;
		evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;

1962
		zfree(&evsel->name);
1963 1964 1965 1966 1967
		return true;
	}

	return false;
}
1968

1969
int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
1970 1971 1972 1973 1974
			      int err, char *msg, size_t size)
{
	switch (err) {
	case EPERM:
	case EACCES:
1975
		return scnprintf(msg, size,
1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015
		 "You may not have permission to collect %sstats.\n"
		 "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
		 " -1 - Not paranoid at all\n"
		 "  0 - Disallow raw tracepoint access for unpriv\n"
		 "  1 - Disallow cpu events for unpriv\n"
		 "  2 - Disallow kernel profiling for unpriv",
				 target->system_wide ? "system-wide " : "");
	case ENOENT:
		return scnprintf(msg, size, "The %s event is not supported.",
				 perf_evsel__name(evsel));
	case EMFILE:
		return scnprintf(msg, size, "%s",
			 "Too many events are opened.\n"
			 "Try again after reducing the number of events.");
	case ENODEV:
		if (target->cpu_list)
			return scnprintf(msg, size, "%s",
	 "No such device - did you specify an out-of-range profile CPU?\n");
		break;
	case EOPNOTSUPP:
		if (evsel->attr.precise_ip)
			return scnprintf(msg, size, "%s",
	"\'precise\' request may not be supported. Try removing 'p' modifier.");
#if defined(__i386__) || defined(__x86_64__)
		if (evsel->attr.type == PERF_TYPE_HARDWARE)
			return scnprintf(msg, size, "%s",
	"No hardware sampling interrupt available.\n"
	"No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
#endif
		break;
	default:
		break;
	}

	return scnprintf(msg, size,
	"The sys_perf_event_open() syscall returned with %d (%s) for event (%s).  \n"
	"/bin/dmesg may provide additional information.\n"
	"No CONFIG_PERF_EVENTS=y kernel support configured?\n",
			 err, strerror(err), perf_evsel__name(evsel));
}