mmap-basic.c 4.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
#include <errno.h>
3
#include <inttypes.h>
4 5 6
/* For the CLR_() macros */
#include <pthread.h>

7 8 9 10 11
#include "evlist.h"
#include "evsel.h"
#include "thread_map.h"
#include "cpumap.h"
#include "tests.h"
12
#include <linux/err.h>
13
#include <linux/kernel.h>
14 15 16 17 18 19 20 21 22 23 24 25

/*
 * This test will generate random numbers of calls to some getpid syscalls,
 * then establish an mmap for a group of events that are created to monitor
 * the syscalls.
 *
 * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
 * sample.id field to map back to its respective perf_evsel instance.
 *
 * Then it checks if the number of syscalls reported as perf events by
 * the kernel corresponds to the number of syscalls made.
 */
26
int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unused)
27 28 29 30 31 32 33
{
	int err = -1;
	union perf_event *event;
	struct thread_map *threads;
	struct cpu_map *cpus;
	struct perf_evlist *evlist;
	cpu_set_t cpu_set;
34 35
	const char *syscall_names[] = { "getsid", "getppid", "getpgid", };
	pid_t (*syscalls[])(void) = { (void *)getsid, getppid, (void*)getpgid };
36 37 38 39
#define nsyscalls ARRAY_SIZE(syscall_names)
	unsigned int nr_events[nsyscalls],
		     expected_nr_events[nsyscalls], i, j;
	struct perf_evsel *evsels[nsyscalls], *evsel;
40
	char sbuf[STRERR_BUFSIZE];
41
	struct perf_mmap *md;
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59

	threads = thread_map__new(-1, getpid(), UINT_MAX);
	if (threads == NULL) {
		pr_debug("thread_map__new\n");
		return -1;
	}

	cpus = cpu_map__new(NULL);
	if (cpus == NULL) {
		pr_debug("cpu_map__new\n");
		goto out_free_threads;
	}

	CPU_ZERO(&cpu_set);
	CPU_SET(cpus->map[0], &cpu_set);
	sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
	if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
		pr_debug("sched_setaffinity() failed on CPU %d: %s ",
60
			 cpus->map[0], str_error_r(errno, sbuf, sizeof(sbuf)));
61 62 63
		goto out_free_cpus;
	}

64
	evlist = perf_evlist__new();
65 66 67 68 69
	if (evlist == NULL) {
		pr_debug("perf_evlist__new\n");
		goto out_free_cpus;
	}

70 71
	perf_evlist__set_maps(evlist, cpus, threads);

72
	for (i = 0; i < nsyscalls; ++i) {
73 74 75
		char name[64];

		snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
76
		evsels[i] = perf_evsel__newtp("syscalls", name);
77
		if (IS_ERR(evsels[i])) {
78
			pr_debug("perf_evsel__new\n");
79
			goto out_delete_evlist;
80 81
		}

82
		evsels[i]->attr.wakeup_events = 1;
83
		perf_evsel__set_sample_id(evsels[i], false);
84

85 86 87 88 89
		perf_evlist__add(evlist, evsels[i]);

		if (perf_evsel__open(evsels[i], cpus, threads) < 0) {
			pr_debug("failed to open counter: %s, "
				 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
90
				 str_error_r(errno, sbuf, sizeof(sbuf)));
91
			goto out_delete_evlist;
92
		}
93 94 95

		nr_events[i] = 0;
		expected_nr_events[i] = 1 + rand() % 127;
96 97
	}

98
	if (perf_evlist__mmap(evlist, 128) < 0) {
99
		pr_debug("failed to mmap events: %d (%s)\n", errno,
100
			 str_error_r(errno, sbuf, sizeof(sbuf)));
101
		goto out_delete_evlist;
102 103 104 105 106 107 108 109
	}

	for (i = 0; i < nsyscalls; ++i)
		for (j = 0; j < expected_nr_events[i]; ++j) {
			int foo = syscalls[i]();
			++foo;
		}

110
	md = &evlist->mmap[0];
111
	if (perf_mmap__read_init(md) < 0)
112 113
		goto out_init;

114
	while ((event = perf_mmap__read_event(md)) != NULL) {
115 116 117 118 119
		struct perf_sample sample;

		if (event->header.type != PERF_RECORD_SAMPLE) {
			pr_debug("unexpected %s event\n",
				 perf_event__name(event->header.type));
120
			goto out_delete_evlist;
121 122 123 124 125
		}

		err = perf_evlist__parse_sample(evlist, event, &sample);
		if (err) {
			pr_err("Can't parse sample, err = %d\n", err);
126
			goto out_delete_evlist;
127 128
		}

129
		err = -1;
130 131 132 133
		evsel = perf_evlist__id2evsel(evlist, sample.id);
		if (evsel == NULL) {
			pr_debug("event with id %" PRIu64
				 " doesn't map to an evsel\n", sample.id);
134
			goto out_delete_evlist;
135 136
		}
		nr_events[evsel->idx]++;
137
		perf_mmap__consume(md);
138
	}
139
	perf_mmap__read_done(md);
140

141
out_init:
142
	err = 0;
143
	evlist__for_each_entry(evlist, evsel) {
144 145 146 147
		if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
			pr_debug("expected %d %s events, got %d\n",
				 expected_nr_events[evsel->idx],
				 perf_evsel__name(evsel), nr_events[evsel->idx]);
148
			err = -1;
149
			goto out_delete_evlist;
150 151 152
		}
	}

153
out_delete_evlist:
154
	perf_evlist__delete(evlist);
155 156
	cpus	= NULL;
	threads = NULL;
157
out_free_cpus:
158
	cpu_map__put(cpus);
159
out_free_threads:
160
	thread_map__put(threads);
161 162
	return err;
}