builtin-test.c 31.1 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 * builtin-test.c
 *
 * Builtin regression testing command: ever growing number of sanity tests
 */
#include "builtin.h"

#include "util/cache.h"
#include "util/debug.h"
10
#include "util/debugfs.h"
11
#include "util/evlist.h"
12
#include "util/parse-options.h"
13
#include "util/parse-events.h"
14
#include "util/symbol.h"
15
#include "util/thread_map.h"
16
#include "util/pmu.h"
17
#include "../../include/linux/hw_breakpoint.h"
18

19 20
#include <sys/mman.h>

21 22
static int vmlinux_matches_kallsyms_filter(struct map *map __maybe_unused,
					   struct symbol *sym)
23 24 25 26 27 28 29 30 31 32 33 34 35 36
{
	bool *visited = symbol__priv(sym);
	*visited = true;
	return 0;
}

static int test__vmlinux_matches_kallsyms(void)
{
	int err = -1;
	struct rb_node *nd;
	struct symbol *sym;
	struct map *kallsyms_map, *vmlinux_map;
	struct machine kallsyms, vmlinux;
	enum map_type type = MAP__FUNCTION;
37
	long page_size = sysconf(_SC_PAGE_SIZE);
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
	struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", };

	/*
	 * Step 1:
	 *
	 * Init the machines that will hold kernel, modules obtained from
	 * both vmlinux + .ko files and from /proc/kallsyms split by modules.
	 */
	machine__init(&kallsyms, "", HOST_KERNEL_ID);
	machine__init(&vmlinux, "", HOST_KERNEL_ID);

	/*
	 * Step 2:
	 *
	 * Create the kernel maps for kallsyms and the DSO where we will then
	 * load /proc/kallsyms. Also create the modules maps from /proc/modules
	 * and find the .ko files that match them in /lib/modules/`uname -r`/.
	 */
	if (machine__create_kernel_maps(&kallsyms) < 0) {
		pr_debug("machine__create_kernel_maps ");
		return -1;
	}

	/*
	 * Step 3:
	 *
	 * Load and split /proc/kallsyms into multiple maps, one per module.
	 */
	if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) {
		pr_debug("dso__load_kallsyms ");
		goto out;
	}

	/*
	 * Step 4:
	 *
	 * kallsyms will be internally on demand sorted by name so that we can
	 * find the reference relocation * symbol, i.e. the symbol we will use
	 * to see if the running kernel was relocated by checking if it has the
	 * same value in the vmlinux file we load.
	 */
	kallsyms_map = machine__kernel_map(&kallsyms, type);

	sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL);
	if (sym == NULL) {
		pr_debug("dso__find_symbol_by_name ");
		goto out;
	}

	ref_reloc_sym.addr = sym->start;

	/*
	 * Step 5:
	 *
	 * Now repeat step 2, this time for the vmlinux file we'll auto-locate.
	 */
	if (machine__create_kernel_maps(&vmlinux) < 0) {
		pr_debug("machine__create_kernel_maps ");
		goto out;
	}

	vmlinux_map = machine__kernel_map(&vmlinux, type);
	map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym;

	/*
	 * Step 6:
	 *
	 * Locate a vmlinux file in the vmlinux path that has a buildid that
	 * matches the one of the running kernel.
	 *
	 * While doing that look if we find the ref reloc symbol, if we find it
	 * we'll have its ref_reloc_symbol.unrelocated_addr and then
	 * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
	 * to fixup the symbols.
	 */
	if (machine__load_vmlinux_path(&vmlinux, type,
				       vmlinux_matches_kallsyms_filter) <= 0) {
		pr_debug("machine__load_vmlinux_path ");
		goto out;
	}

	err = 0;
	/*
	 * Step 7:
	 *
	 * Now look at the symbols in the vmlinux DSO and check if we find all of them
	 * in the kallsyms dso. For the ones that are in both, check its names and
	 * end addresses too.
	 */
	for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) {
128 129
		struct symbol *pair, *first_pair;
		bool backwards = true;
130 131

		sym  = rb_entry(nd, struct symbol, rb_node);
132 133 134 135 136 137

		if (sym->start == sym->end)
			continue;

		first_pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL);
		pair = first_pair;
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154

		if (pair && pair->start == sym->start) {
next_pair:
			if (strcmp(sym->name, pair->name) == 0) {
				/*
				 * kallsyms don't have the symbol end, so we
				 * set that by using the next symbol start - 1,
				 * in some cases we get this up to a page
				 * wrong, trace_kmalloc when I was developing
				 * this code was one such example, 2106 bytes
				 * off the real size. More than that and we
				 * _really_ have a problem.
				 */
				s64 skew = sym->end - pair->end;
				if (llabs(skew) < page_size)
					continue;

155
				pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
156 157
					 sym->start, sym->name, sym->end, pair->end);
			} else {
158 159 160 161
				struct rb_node *nnd;
detour:
				nnd = backwards ? rb_prev(&pair->rb_node) :
						  rb_next(&pair->rb_node);
162 163 164 165 166 167 168 169
				if (nnd) {
					struct symbol *next = rb_entry(nnd, struct symbol, rb_node);

					if (next->start == sym->start) {
						pair = next;
						goto next_pair;
					}
				}
170 171 172 173 174 175 176

				if (backwards) {
					backwards = false;
					pair = first_pair;
					goto detour;
				}

177
				pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
178 179 180
					 sym->start, sym->name, pair->name);
			}
		} else
181
			pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name);
182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219

		err = -1;
	}

	if (!verbose)
		goto out;

	pr_info("Maps only in vmlinux:\n");

	for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
		struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
		/*
		 * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
		 * the kernel will have the path for the vmlinux file being used,
		 * so use the short name, less descriptive but the same ("[kernel]" in
		 * both cases.
		 */
		pair = map_groups__find_by_name(&kallsyms.kmaps, type,
						(pos->dso->kernel ?
							pos->dso->short_name :
							pos->dso->name));
		if (pair)
			pair->priv = 1;
		else
			map__fprintf(pos, stderr);
	}

	pr_info("Maps in vmlinux with a different name in kallsyms:\n");

	for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
		struct map *pos = rb_entry(nd, struct map, rb_node), *pair;

		pair = map_groups__find(&kallsyms.kmaps, type, pos->start);
		if (pair == NULL || pair->priv)
			continue;

		if (pair->start == pos->start) {
			pair->priv = 1;
220
			pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
221 222
				pos->start, pos->end, pos->pgoff, pos->dso->name);
			if (pos->pgoff != pair->pgoff || pos->end != pair->end)
223
				pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "",
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
					pair->start, pair->end, pair->pgoff);
			pr_info(" %s\n", pair->dso->name);
			pair->priv = 1;
		}
	}

	pr_info("Maps only in kallsyms:\n");

	for (nd = rb_first(&kallsyms.kmaps.maps[type]);
	     nd; nd = rb_next(nd)) {
		struct map *pos = rb_entry(nd, struct map, rb_node);

		if (!pos->priv)
			map__fprintf(pos, stderr);
	}
out:
	return err;
}

243
#include "util/cpumap.h"
244 245 246
#include "util/evsel.h"
#include <sys/types.h>

247
static int trace_event__id(const char *evname)
248 249 250 251 252
{
	char *filename;
	int err = -1, fd;

	if (asprintf(&filename,
253
		     "%s/syscalls/%s/id",
254
		     tracing_events_path, evname) < 0)
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
		return -1;

	fd = open(filename, O_RDONLY);
	if (fd >= 0) {
		char id[16];
		if (read(fd, id, sizeof(id)) > 0)
			err = atoi(id);
		close(fd);
	}

	free(filename);
	return err;
}

static int test__open_syscall_event(void)
{
	int err = -1, fd;
	struct thread_map *threads;
	struct perf_evsel *evsel;
274
	struct perf_event_attr attr;
275 276 277 278
	unsigned int nr_open_calls = 111, i;
	int id = trace_event__id("sys_enter_open");

	if (id < 0) {
279
		pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
280 281 282
		return -1;
	}

283
	threads = thread_map__new(-1, getpid(), UINT_MAX);
284
	if (threads == NULL) {
285
		pr_debug("thread_map__new\n");
286 287 288
		return -1;
	}

289 290 291 292
	memset(&attr, 0, sizeof(attr));
	attr.type = PERF_TYPE_TRACEPOINT;
	attr.config = id;
	evsel = perf_evsel__new(&attr, 0);
293
	if (evsel == NULL) {
294
		pr_debug("perf_evsel__new\n");
295 296 297
		goto out_thread_map_delete;
	}

298
	if (perf_evsel__open_per_thread(evsel, threads) < 0) {
299 300 301
		pr_debug("failed to open counter: %s, "
			 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
			 strerror(errno));
302 303 304 305 306 307 308 309 310
		goto out_evsel_delete;
	}

	for (i = 0; i < nr_open_calls; ++i) {
		fd = open("/etc/passwd", O_RDONLY);
		close(fd);
	}

	if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
311
		pr_debug("perf_evsel__read_on_cpu\n");
312 313 314
		goto out_close_fd;
	}

315
	if (evsel->counts->cpu[0].val != nr_open_calls) {
316
		pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
317
			 nr_open_calls, evsel->counts->cpu[0].val);
318 319
		goto out_close_fd;
	}
320 321 322 323 324 325 326 327 328 329 330
	
	err = 0;
out_close_fd:
	perf_evsel__close_fd(evsel, 1, threads->nr);
out_evsel_delete:
	perf_evsel__delete(evsel);
out_thread_map_delete:
	thread_map__delete(threads);
	return err;
}

331 332 333 334 335 336 337 338 339 340
#include <sched.h>

static int test__open_syscall_event_on_all_cpus(void)
{
	int err = -1, fd, cpu;
	struct thread_map *threads;
	struct cpu_map *cpus;
	struct perf_evsel *evsel;
	struct perf_event_attr attr;
	unsigned int nr_open_calls = 111, i;
341
	cpu_set_t cpu_set;
342 343 344 345 346 347 348
	int id = trace_event__id("sys_enter_open");

	if (id < 0) {
		pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
		return -1;
	}

349
	threads = thread_map__new(-1, getpid(), UINT_MAX);
350 351 352 353 354 355
	if (threads == NULL) {
		pr_debug("thread_map__new\n");
		return -1;
	}

	cpus = cpu_map__new(NULL);
356 357 358
	if (cpus == NULL) {
		pr_debug("cpu_map__new\n");
		goto out_thread_map_delete;
359 360 361
	}


362
	CPU_ZERO(&cpu_set);
363 364 365 366 367 368 369

	memset(&attr, 0, sizeof(attr));
	attr.type = PERF_TYPE_TRACEPOINT;
	attr.config = id;
	evsel = perf_evsel__new(&attr, 0);
	if (evsel == NULL) {
		pr_debug("perf_evsel__new\n");
370
		goto out_thread_map_delete;
371 372
	}

373
	if (perf_evsel__open(evsel, cpus, threads) < 0) {
374 375 376 377 378 379 380 381
		pr_debug("failed to open counter: %s, "
			 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
			 strerror(errno));
		goto out_evsel_delete;
	}

	for (cpu = 0; cpu < cpus->nr; ++cpu) {
		unsigned int ncalls = nr_open_calls + cpu;
382 383 384 385 386 387 388 389 390 391
		/*
		 * XXX eventually lift this restriction in a way that
		 * keeps perf building on older glibc installations
		 * without CPU_ALLOC. 1024 cpus in 2010 still seems
		 * a reasonable upper limit tho :-)
		 */
		if (cpus->map[cpu] >= CPU_SETSIZE) {
			pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
			continue;
		}
392

393 394
		CPU_SET(cpus->map[cpu], &cpu_set);
		if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
395 396 397 398 399
			pr_debug("sched_setaffinity() failed on CPU %d: %s ",
				 cpus->map[cpu],
				 strerror(errno));
			goto out_close_fd;
		}
400 401 402 403
		for (i = 0; i < ncalls; ++i) {
			fd = open("/etc/passwd", O_RDONLY);
			close(fd);
		}
404
		CPU_CLR(cpus->map[cpu], &cpu_set);
405 406 407 408 409 410 411 412 413 414 415 416
	}

	/*
	 * Here we need to explicitely preallocate the counts, as if
	 * we use the auto allocation it will allocate just for 1 cpu,
	 * as we start by cpu 0.
	 */
	if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) {
		pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
		goto out_close_fd;
	}

417 418
	err = 0;

419 420 421
	for (cpu = 0; cpu < cpus->nr; ++cpu) {
		unsigned int expected;

422 423 424
		if (cpus->map[cpu] >= CPU_SETSIZE)
			continue;

425
		if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
426
			pr_debug("perf_evsel__read_on_cpu\n");
427 428
			err = -1;
			break;
429 430 431 432
		}

		expected = nr_open_calls + cpu;
		if (evsel->counts->cpu[cpu].val != expected) {
433
			pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
434
				 expected, cpus->map[cpu], evsel->counts->cpu[cpu].val);
435
			err = -1;
436 437 438 439 440 441 442 443 444 445 446 447
		}
	}

out_close_fd:
	perf_evsel__close_fd(evsel, 1, threads->nr);
out_evsel_delete:
	perf_evsel__delete(evsel);
out_thread_map_delete:
	thread_map__delete(threads);
	return err;
}

448 449 450 451 452 453 454 455 456 457 458 459 460 461
/*
 * This test will generate random numbers of calls to some getpid syscalls,
 * then establish an mmap for a group of events that are created to monitor
 * the syscalls.
 *
 * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
 * sample.id field to map back to its respective perf_evsel instance.
 *
 * Then it checks if the number of syscalls reported as perf events by
 * the kernel corresponds to the number of syscalls made.
 */
static int test__basic_mmap(void)
{
	int err = -1;
462
	union perf_event *event;
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495
	struct thread_map *threads;
	struct cpu_map *cpus;
	struct perf_evlist *evlist;
	struct perf_event_attr attr = {
		.type		= PERF_TYPE_TRACEPOINT,
		.read_format	= PERF_FORMAT_ID,
		.sample_type	= PERF_SAMPLE_ID,
		.watermark	= 0,
	};
	cpu_set_t cpu_set;
	const char *syscall_names[] = { "getsid", "getppid", "getpgrp",
					"getpgid", };
	pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp,
				      (void*)getpgid };
#define nsyscalls ARRAY_SIZE(syscall_names)
	int ids[nsyscalls];
	unsigned int nr_events[nsyscalls],
		     expected_nr_events[nsyscalls], i, j;
	struct perf_evsel *evsels[nsyscalls], *evsel;

	for (i = 0; i < nsyscalls; ++i) {
		char name[64];

		snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
		ids[i] = trace_event__id(name);
		if (ids[i] < 0) {
			pr_debug("Is debugfs mounted on /sys/kernel/debug?\n");
			return -1;
		}
		nr_events[i] = 0;
		expected_nr_events[i] = random() % 257;
	}

496
	threads = thread_map__new(-1, getpid(), UINT_MAX);
497 498 499 500 501 502
	if (threads == NULL) {
		pr_debug("thread_map__new\n");
		return -1;
	}

	cpus = cpu_map__new(NULL);
503 504
	if (cpus == NULL) {
		pr_debug("cpu_map__new\n");
505 506 507 508 509 510 511 512 513 514 515 516
		goto out_free_threads;
	}

	CPU_ZERO(&cpu_set);
	CPU_SET(cpus->map[0], &cpu_set);
	sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
	if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
		pr_debug("sched_setaffinity() failed on CPU %d: %s ",
			 cpus->map[0], strerror(errno));
		goto out_free_cpus;
	}

517
	evlist = perf_evlist__new(cpus, threads);
518
	if (evlist == NULL) {
519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
		pr_debug("perf_evlist__new\n");
		goto out_free_cpus;
	}

	/* anonymous union fields, can't be initialized above */
	attr.wakeup_events = 1;
	attr.sample_period = 1;

	for (i = 0; i < nsyscalls; ++i) {
		attr.config = ids[i];
		evsels[i] = perf_evsel__new(&attr, i);
		if (evsels[i] == NULL) {
			pr_debug("perf_evsel__new\n");
			goto out_free_evlist;
		}

		perf_evlist__add(evlist, evsels[i]);

537
		if (perf_evsel__open(evsels[i], cpus, threads) < 0) {
538 539 540 541 542 543 544
			pr_debug("failed to open counter: %s, "
				 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
				 strerror(errno));
			goto out_close_fd;
		}
	}

545
	if (perf_evlist__mmap(evlist, 128, true) < 0) {
546 547 548 549 550 551 552 553 554 555 556
		pr_debug("failed to mmap events: %d (%s)\n", errno,
			 strerror(errno));
		goto out_close_fd;
	}

	for (i = 0; i < nsyscalls; ++i)
		for (j = 0; j < expected_nr_events[i]; ++j) {
			int foo = syscalls[i]();
			++foo;
		}

557
	while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
558
		struct perf_sample sample;
559 560 561

		if (event->header.type != PERF_RECORD_SAMPLE) {
			pr_debug("unexpected %s event\n",
562
				 perf_event__name(event->header.type));
563 564 565
			goto out_munmap;
		}

566
		err = perf_evlist__parse_sample(evlist, event, &sample, false);
567 568 569 570 571
		if (err) {
			pr_err("Can't parse sample, err = %d\n", err);
			goto out_munmap;
		}

572 573 574 575 576 577 578 579 580 581 582 583 584
		evsel = perf_evlist__id2evsel(evlist, sample.id);
		if (evsel == NULL) {
			pr_debug("event with id %" PRIu64
				 " doesn't map to an evsel\n", sample.id);
			goto out_munmap;
		}
		nr_events[evsel->idx]++;
	}

	list_for_each_entry(evsel, &evlist->entries, node) {
		if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
			pr_debug("expected %d %s events, got %d\n",
				 expected_nr_events[evsel->idx],
585
				 perf_evsel__name(evsel), nr_events[evsel->idx]);
586 587 588 589 590 591
			goto out_munmap;
		}
	}

	err = 0;
out_munmap:
592
	perf_evlist__munmap(evlist);
593 594 595 596 597 598 599 600 601 602 603 604 605
out_close_fd:
	for (i = 0; i < nsyscalls; ++i)
		perf_evsel__close_fd(evsels[i], 1, threads->nr);
out_free_evlist:
	perf_evlist__delete(evlist);
out_free_cpus:
	cpu_map__delete(cpus);
out_free_threads:
	thread_map__delete(threads);
	return err;
#undef nsyscalls
}

606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646
static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t **maskp,
					 size_t *sizep)
{
	cpu_set_t *mask;
	size_t size;
	int i, cpu = -1, nrcpus = 1024;
realloc:
	mask = CPU_ALLOC(nrcpus);
	size = CPU_ALLOC_SIZE(nrcpus);
	CPU_ZERO_S(size, mask);

	if (sched_getaffinity(pid, size, mask) == -1) {
		CPU_FREE(mask);
		if (errno == EINVAL && nrcpus < (1024 << 8)) {
			nrcpus = nrcpus << 2;
			goto realloc;
		}
		perror("sched_getaffinity");
			return -1;
	}

	for (i = 0; i < nrcpus; i++) {
		if (CPU_ISSET_S(i, size, mask)) {
			if (cpu == -1) {
				cpu = i;
				*maskp = mask;
				*sizep = size;
			} else
				CPU_CLR_S(i, size, mask);
		}
	}

	if (cpu == -1)
		CPU_FREE(mask);

	return cpu;
}

static int test__PERF_RECORD(void)
{
	struct perf_record_opts opts = {
647 648
		.target = {
			.uid = UINT_MAX,
N
Namhyung Kim 已提交
649
			.uses_mmap = true,
650
		},
651 652 653 654 655 656 657 658 659 660 661 662
		.no_delay   = true,
		.freq	    = 10,
		.mmap_pages = 256,
	};
	cpu_set_t *cpu_mask = NULL;
	size_t cpu_mask_size = 0;
	struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
	struct perf_evsel *evsel;
	struct perf_sample sample;
	const char *cmd = "sleep";
	const char *argv[] = { cmd, "1", NULL, };
	char *bname;
663
	u64 prev_time = 0;
664 665 666 667
	bool found_cmd_mmap = false,
	     found_libc_mmap = false,
	     found_vdso_mmap = false,
	     found_ld_mmap = false;
668
	int err = -1, errs = 0, i, wakeups = 0;
669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
	u32 cpu;
	int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };

	if (evlist == NULL || argv == NULL) {
		pr_debug("Not enough memory to create evlist\n");
		goto out;
	}

	/*
	 * We need at least one evsel in the evlist, use the default
	 * one: "cycles".
	 */
	err = perf_evlist__add_default(evlist);
	if (err < 0) {
		pr_debug("Not enough memory to create evsel\n");
		goto out_delete_evlist;
	}

	/*
	 * Create maps of threads and cpus to monitor. In this case
	 * we start with all threads and cpus (-1, -1) but then in
	 * perf_evlist__prepare_workload we'll fill in the only thread
	 * we're monitoring, the one forked there.
	 */
693
	err = perf_evlist__create_maps(evlist, &opts.target);
694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
	if (err < 0) {
		pr_debug("Not enough memory to create thread/cpu maps\n");
		goto out_delete_evlist;
	}

	/*
	 * Prepare the workload in argv[] to run, it'll fork it, and then wait
	 * for perf_evlist__start_workload() to exec it. This is done this way
	 * so that we have time to open the evlist (calling sys_perf_event_open
	 * on all the fds) and then mmap them.
	 */
	err = perf_evlist__prepare_workload(evlist, &opts, argv);
	if (err < 0) {
		pr_debug("Couldn't run the workload!\n");
		goto out_delete_evlist;
	}

	/*
	 * Config the evsels, setting attr->comm on the first one, etc.
	 */
714
	evsel = perf_evlist__first(evlist);
715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740
	evsel->attr.sample_type |= PERF_SAMPLE_CPU;
	evsel->attr.sample_type |= PERF_SAMPLE_TID;
	evsel->attr.sample_type |= PERF_SAMPLE_TIME;
	perf_evlist__config_attrs(evlist, &opts);

	err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask,
					    &cpu_mask_size);
	if (err < 0) {
		pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno));
		goto out_delete_evlist;
	}

	cpu = err;

	/*
	 * So that we can check perf_sample.cpu on all the samples.
	 */
	if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, cpu_mask) < 0) {
		pr_debug("sched_setaffinity: %s\n", strerror(errno));
		goto out_free_cpu_mask;
	}

	/*
	 * Call sys_perf_event_open on all the fds on all the evsels,
	 * grouping them if asked to.
	 */
741
	err = perf_evlist__open(evlist);
742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
	if (err < 0) {
		pr_debug("perf_evlist__open: %s\n", strerror(errno));
		goto out_delete_evlist;
	}

	/*
	 * mmap the first fd on a given CPU and ask for events for the other
	 * fds in the same CPU to be injected in the same mmap ring buffer
	 * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
	 */
	err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
	if (err < 0) {
		pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
		goto out_delete_evlist;
	}

	/*
	 * Now that all is properly set up, enable the events, they will
	 * count just on workload.pid, which will start...
	 */
	perf_evlist__enable(evlist);

	/*
	 * Now!
	 */
	perf_evlist__start_workload(evlist);

	while (1) {
		int before = total_events;

		for (i = 0; i < evlist->nr_mmaps; i++) {
			union perf_event *event;

			while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
				const u32 type = event->header.type;
				const char *name = perf_event__name(type);

				++total_events;
				if (type < PERF_RECORD_MAX)
					nr_events[type]++;

783
				err = perf_evlist__parse_sample(evlist, event, &sample, false);
784
				if (err < 0) {
785 786 787 788 789 790 791 792 793 794 795 796 797 798
					if (verbose)
						perf_event__fprintf(event, stderr);
					pr_debug("Couldn't parse sample\n");
					goto out_err;
				}

				if (verbose) {
					pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
					perf_event__fprintf(event, stderr);
				}

				if (prev_time > sample.time) {
					pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
						 name, prev_time, sample.time);
799
					++errs;
800 801 802 803 804 805 806
				}

				prev_time = sample.time;

				if (sample.cpu != cpu) {
					pr_debug("%s with unexpected cpu, expected %d, got %d\n",
						 name, cpu, sample.cpu);
807
					++errs;
808 809 810 811 812
				}

				if ((pid_t)sample.pid != evlist->workload.pid) {
					pr_debug("%s with unexpected pid, expected %d, got %d\n",
						 name, evlist->workload.pid, sample.pid);
813
					++errs;
814 815 816 817 818
				}

				if ((pid_t)sample.tid != evlist->workload.pid) {
					pr_debug("%s with unexpected tid, expected %d, got %d\n",
						 name, evlist->workload.pid, sample.tid);
819
					++errs;
820 821 822 823 824 825 826 827
				}

				if ((type == PERF_RECORD_COMM ||
				     type == PERF_RECORD_MMAP ||
				     type == PERF_RECORD_FORK ||
				     type == PERF_RECORD_EXIT) &&
				     (pid_t)event->comm.pid != evlist->workload.pid) {
					pr_debug("%s with unexpected pid/tid\n", name);
828
					++errs;
829 830 831 832 833 834
				}

				if ((type == PERF_RECORD_COMM ||
				     type == PERF_RECORD_MMAP) &&
				     event->comm.pid != event->comm.tid) {
					pr_debug("%s with different pid/tid!\n", name);
835
					++errs;
836 837 838 839 840 841
				}

				switch (type) {
				case PERF_RECORD_COMM:
					if (strcmp(event->comm.comm, cmd)) {
						pr_debug("%s with unexpected comm!\n", name);
842
						++errs;
843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865
					}
					break;
				case PERF_RECORD_EXIT:
					goto found_exit;
				case PERF_RECORD_MMAP:
					bname = strrchr(event->mmap.filename, '/');
					if (bname != NULL) {
						if (!found_cmd_mmap)
							found_cmd_mmap = !strcmp(bname + 1, cmd);
						if (!found_libc_mmap)
							found_libc_mmap = !strncmp(bname + 1, "libc", 4);
						if (!found_ld_mmap)
							found_ld_mmap = !strncmp(bname + 1, "ld", 2);
					} else if (!found_vdso_mmap)
						found_vdso_mmap = !strcmp(event->mmap.filename, "[vdso]");
					break;

				case PERF_RECORD_SAMPLE:
					/* Just ignore samples for now */
					break;
				default:
					pr_debug("Unexpected perf_event->header.type %d!\n",
						 type);
866
					++errs;
867 868 869 870 871 872 873 874 875 876 877 878 879 880 881
				}
			}
		}

		/*
		 * We don't use poll here because at least at 3.1 times the
		 * PERF_RECORD_{!SAMPLE} events don't honour
		 * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
		 */
		if (total_events == before && false)
			poll(evlist->pollfd, evlist->nr_fds, -1);

		sleep(1);
		if (++wakeups > 5) {
			pr_debug("No PERF_RECORD_EXIT event!\n");
882
			break;
883 884 885 886 887 888
		}
	}

found_exit:
	if (nr_events[PERF_RECORD_COMM] > 1) {
		pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
889
		++errs;
890 891 892 893
	}

	if (nr_events[PERF_RECORD_COMM] == 0) {
		pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
894
		++errs;
895 896 897 898
	}

	if (!found_cmd_mmap) {
		pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
899
		++errs;
900 901 902 903
	}

	if (!found_libc_mmap) {
		pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
904
		++errs;
905 906 907 908
	}

	if (!found_ld_mmap) {
		pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
909
		++errs;
910 911 912 913
	}

	if (!found_vdso_mmap) {
		pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
914
		++errs;
915 916 917 918 919 920 921 922
	}
out_err:
	perf_evlist__munmap(evlist);
out_free_cpu_mask:
	CPU_FREE(cpu_mask);
out_delete_evlist:
	perf_evlist__delete(evlist);
out:
923
	return (err < 0 || errs > 0) ? -1 : 0;
924 925
}

926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999

#if defined(__x86_64__) || defined(__i386__)

#define barrier() asm volatile("" ::: "memory")

static u64 rdpmc(unsigned int counter)
{
	unsigned int low, high;

	asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));

	return low | ((u64)high) << 32;
}

static u64 rdtsc(void)
{
	unsigned int low, high;

	asm volatile("rdtsc" : "=a" (low), "=d" (high));

	return low | ((u64)high) << 32;
}

static u64 mmap_read_self(void *addr)
{
	struct perf_event_mmap_page *pc = addr;
	u32 seq, idx, time_mult = 0, time_shift = 0;
	u64 count, cyc = 0, time_offset = 0, enabled, running, delta;

	do {
		seq = pc->lock;
		barrier();

		enabled = pc->time_enabled;
		running = pc->time_running;

		if (enabled != running) {
			cyc = rdtsc();
			time_mult = pc->time_mult;
			time_shift = pc->time_shift;
			time_offset = pc->time_offset;
		}

		idx = pc->index;
		count = pc->offset;
		if (idx)
			count += rdpmc(idx - 1);

		barrier();
	} while (pc->lock != seq);

	if (enabled != running) {
		u64 quot, rem;

		quot = (cyc >> time_shift);
		rem = cyc & ((1 << time_shift) - 1);
		delta = time_offset + quot * time_mult +
			((rem * time_mult) >> time_shift);

		enabled += delta;
		if (idx)
			running += delta;

		quot = count / running;
		rem = count % running;
		count = quot * enabled + (rem * enabled) / running;
	}

	return count;
}

/*
 * If the RDPMC instruction faults then signal this back to the test parent task:
 */
1000 1001 1002
static void segfault_handler(int sig __maybe_unused,
			     siginfo_t *info __maybe_unused,
			     void *uc __maybe_unused)
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
{
	exit(-1);
}

static int __test__rdpmc(void)
{
	long page_size = sysconf(_SC_PAGE_SIZE);
	volatile int tmp = 0;
	u64 i, loops = 1000;
	int n;
	int fd;
	void *addr;
	struct perf_event_attr attr = {
		.type = PERF_TYPE_HARDWARE,
		.config = PERF_COUNT_HW_INSTRUCTIONS,
		.exclude_kernel = 1,
	};
	u64 delta_sum = 0;
        struct sigaction sa;

	sigfillset(&sa.sa_mask);
	sa.sa_sigaction = segfault_handler;
	sigaction(SIGSEGV, &sa, NULL);

	fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
	if (fd < 0) {
1029 1030
		pr_err("Error: sys_perf_event_open() syscall returned "
		       "with %d (%s)\n", fd, strerror(errno));
1031
		return -1;
1032 1033 1034 1035
	}

	addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0);
	if (addr == (void *)(-1)) {
1036 1037
		pr_err("Error: mmap() syscall returned with (%s)\n",
		       strerror(errno));
1038
		goto out_close;
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052
	}

	for (n = 0; n < 6; n++) {
		u64 stamp, now, delta;

		stamp = mmap_read_self(addr);

		for (i = 0; i < loops; i++)
			tmp++;

		now = mmap_read_self(addr);
		loops *= 10;

		delta = now - stamp;
1053
		pr_debug("%14d: %14Lu\n", n, (long long)delta);
1054 1055 1056 1057 1058

		delta_sum += delta;
	}

	munmap(addr, page_size);
1059
	pr_debug("   ");
1060 1061
out_close:
	close(fd);
1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094

	if (!delta_sum)
		return -1;

	return 0;
}

static int test__rdpmc(void)
{
	int status = 0;
	int wret = 0;
	int ret;
	int pid;

	pid = fork();
	if (pid < 0)
		return -1;

	if (!pid) {
		ret = __test__rdpmc();

		exit(ret);
	}

	wret = waitpid(pid, &status, 0);
	if (wret < 0 || status)
		return -1;

	return 0;
}

#endif

1095 1096 1097 1098 1099
static int test__perf_pmu(void)
{
	return perf_pmu__test();
}

1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156
static int perf_evsel__roundtrip_cache_name_test(void)
{
	char name[128];
	int type, op, err = 0, ret = 0, i, idx;
	struct perf_evsel *evsel;
        struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);

        if (evlist == NULL)
                return -ENOMEM;

	for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
		for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
			/* skip invalid cache type */
			if (!perf_evsel__is_cache_op_valid(type, op))
				continue;

			for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
				__perf_evsel__hw_cache_type_op_res_name(type, op, i,
									name, sizeof(name));
				err = parse_events(evlist, name, 0);
				if (err)
					ret = err;
			}
		}
	}

	idx = 0;
	evsel = perf_evlist__first(evlist);

	for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
		for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
			/* skip invalid cache type */
			if (!perf_evsel__is_cache_op_valid(type, op))
				continue;

			for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
				__perf_evsel__hw_cache_type_op_res_name(type, op, i,
									name, sizeof(name));
				if (evsel->idx != idx)
					continue;

				++idx;

				if (strcmp(perf_evsel__name(evsel), name)) {
					pr_debug("%s != %s\n", perf_evsel__name(evsel), name);
					ret = -1;
				}

				evsel = perf_evsel__next(evsel);
			}
		}
	}

	perf_evlist__delete(evlist);
	return ret;
}

1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
static int __perf_evsel__name_array_test(const char *names[], int nr_names)
{
	int i, err;
	struct perf_evsel *evsel;
        struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);

        if (evlist == NULL)
                return -ENOMEM;

	for (i = 0; i < nr_names; ++i) {
		err = parse_events(evlist, names[i], 0);
		if (err) {
			pr_debug("failed to parse event '%s', err %d\n",
				 names[i], err);
			goto out_delete_evlist;
		}
	}

	err = 0;
	list_for_each_entry(evsel, &evlist->entries, node) {
		if (strcmp(perf_evsel__name(evsel), names[evsel->idx])) {
			--err;
			pr_debug("%s != %s\n", perf_evsel__name(evsel), names[evsel->idx]);
		}
	}

out_delete_evlist:
	perf_evlist__delete(evlist);
	return err;
}

#define perf_evsel__name_array_test(names) \
	__perf_evsel__name_array_test(names, ARRAY_SIZE(names))

static int perf_evsel__roundtrip_name_test(void)
{
	int err = 0, ret = 0;

	err = perf_evsel__name_array_test(perf_evsel__hw_names);
	if (err)
		ret = err;

	err = perf_evsel__name_array_test(perf_evsel__sw_names);
	if (err)
		ret = err;

1203 1204 1205 1206
	err = perf_evsel__roundtrip_cache_name_test();
	if (err)
		ret = err;

1207 1208 1209
	return ret;
}

1210 1211 1212 1213 1214 1215 1216 1217
static struct test {
	const char *desc;
	int (*func)(void);
} tests[] = {
	{
		.desc = "vmlinux symtab matches kallsyms",
		.func = test__vmlinux_matches_kallsyms,
	},
1218 1219 1220 1221
	{
		.desc = "detect open syscall event",
		.func = test__open_syscall_event,
	},
1222 1223 1224 1225
	{
		.desc = "detect open syscall event on all cpus",
		.func = test__open_syscall_event_on_all_cpus,
	},
1226 1227 1228 1229
	{
		.desc = "read samples using the mmap interface",
		.func = test__basic_mmap,
	},
1230 1231
	{
		.desc = "parse events tests",
1232
		.func = parse_events__test,
1233
	},
1234 1235 1236 1237 1238 1239
#if defined(__x86_64__) || defined(__i386__)
	{
		.desc = "x86 rdpmc test",
		.func = test__rdpmc,
	},
#endif
1240 1241 1242 1243
	{
		.desc = "Validate PERF_RECORD_* events & perf_sample fields",
		.func = test__PERF_RECORD,
	},
1244 1245 1246 1247
	{
		.desc = "Test perf pmu format parsing",
		.func = test__perf_pmu,
	},
1248 1249 1250 1251
	{
		.desc = "Test dso data interface",
		.func = dso__test_data,
	},
1252 1253 1254 1255
	{
		.desc = "roundtrip evsel->name check",
		.func = perf_evsel__roundtrip_name_test,
	},
1256 1257 1258 1259 1260
	{
		.func = NULL,
	},
};

1261
static bool perf_test__matches(int curr, int argc, const char *argv[])
1262
{
1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276
	int i;

	if (argc == 0)
		return true;

	for (i = 0; i < argc; ++i) {
		char *end;
		long nr = strtoul(argv[i], &end, 10);

		if (*end == '\0') {
			if (nr == curr + 1)
				return true;
			continue;
		}
1277

1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
		if (strstr(tests[curr].desc, argv[i]))
			return true;
	}

	return false;
}

static int __cmd_test(int argc, const char *argv[])
{
	int i = 0;
1288 1289

	while (tests[i].func) {
1290 1291 1292 1293 1294 1295
		int curr = i++, err;

		if (!perf_test__matches(curr, argc, argv))
			continue;

		pr_info("%2d: %s:", i, tests[curr].desc);
1296
		pr_debug("\n--- start ---\n");
1297 1298
		err = tests[curr].func();
		pr_debug("---- end ----\n%s:", tests[curr].desc);
1299 1300 1301 1302 1303 1304
		pr_info(" %s\n", err ? "FAILED!\n" : "Ok");
	}

	return 0;
}

1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319
static int perf_test__list(int argc, const char **argv)
{
	int i = 0;

	while (tests[i].func) {
		int curr = i++;

		if (argc > 1 && !strstr(tests[curr].desc, argv[1]))
			continue;

		pr_info("%2d: %s\n", i, tests[curr].desc);
	}

	return 0;
}
1320

1321
int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused)
1322 1323 1324 1325 1326 1327
{
	const char * const test_usage[] = {
	"perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
	NULL,
	};
	const struct option test_options[] = {
1328
	OPT_INCR('v', "verbose", &verbose,
1329 1330
		    "be more verbose (show symbol address, etc)"),
	OPT_END()
1331
	};
1332 1333

	argc = parse_options(argc, argv, test_options, test_usage, 0);
1334 1335
	if (argc >= 1 && !strcmp(argv[0], "list"))
		return perf_test__list(argc, argv);
1336 1337 1338 1339 1340 1341 1342 1343

	symbol_conf.priv_size = sizeof(int);
	symbol_conf.sort_by_name = true;
	symbol_conf.try_vmlinux_path = true;

	if (symbol__init() < 0)
		return -1;

1344
	return __cmd_test(argc, argv);
1345
}