perf-record.c 8.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
#include <errno.h>
3
#include <inttypes.h>
4
#include <linux/string.h>
5 6 7
/* For the CLR_() macros */
#include <pthread.h>

8
#include <sched.h>
9
#include <perf/mmap.h>
10 11 12
#include "evlist.h"
#include "evsel.h"
#include "debug.h"
13
#include "record.h"
14
#include "tests.h"
15
#include "util/mmap.h"
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43

static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp)
{
	int i, cpu = -1, nrcpus = 1024;
realloc:
	CPU_ZERO(maskp);

	if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) {
		if (errno == EINVAL && nrcpus < (1024 << 8)) {
			nrcpus = nrcpus << 2;
			goto realloc;
		}
		perror("sched_getaffinity");
			return -1;
	}

	for (i = 0; i < nrcpus; i++) {
		if (CPU_ISSET(i, maskp)) {
			if (cpu == -1)
				cpu = i;
			else
				CPU_CLR(i, maskp);
		}
	}

	return cpu;
}

44
int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unused)
45
{
46
	struct record_opts opts = {
47 48 49 50
		.target = {
			.uid = UINT_MAX,
			.uses_mmap = true,
		},
51 52
		.no_buffering = true,
		.mmap_pages   = 256,
53 54 55
	};
	cpu_set_t cpu_mask;
	size_t cpu_mask_size = sizeof(cpu_mask);
56
	struct evlist *evlist = perf_evlist__new_dummy();
57
	struct evsel *evsel;
58 59 60
	struct perf_sample sample;
	const char *cmd = "sleep";
	const char *argv[] = { cmd, "1", NULL, };
61
	char *bname, *mmap_filename;
62 63
	u64 prev_time = 0;
	bool found_cmd_mmap = false,
64
	     found_coreutils_mmap = false,
65 66 67 68 69 70
	     found_libc_mmap = false,
	     found_vdso_mmap = false,
	     found_ld_mmap = false;
	int err = -1, errs = 0, i, wakeups = 0;
	u32 cpu;
	int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
71
	char sbuf[STRERR_BUFSIZE];
72

73 74 75
	if (evlist == NULL) /* Fallback for kernels lacking PERF_COUNT_SW_DUMMY */
		evlist = perf_evlist__new_default();

76
	if (evlist == NULL) {
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
		pr_debug("Not enough memory to create evlist\n");
		goto out;
	}

	/*
	 * Create maps of threads and cpus to monitor. In this case
	 * we start with all threads and cpus (-1, -1) but then in
	 * perf_evlist__prepare_workload we'll fill in the only thread
	 * we're monitoring, the one forked there.
	 */
	err = perf_evlist__create_maps(evlist, &opts.target);
	if (err < 0) {
		pr_debug("Not enough memory to create thread/cpu maps\n");
		goto out_delete_evlist;
	}

	/*
	 * Prepare the workload in argv[] to run, it'll fork it, and then wait
	 * for perf_evlist__start_workload() to exec it. This is done this way
	 * so that we have time to open the evlist (calling sys_perf_event_open
	 * on all the fds) and then mmap them.
	 */
99
	err = perf_evlist__prepare_workload(evlist, &opts.target, argv, false, NULL);
100 101
	if (err < 0) {
		pr_debug("Couldn't run the workload!\n");
102
		goto out_delete_evlist;
103 104 105 106 107
	}

	/*
	 * Config the evsels, setting attr->comm on the first one, etc.
	 */
108
	evsel = evlist__first(evlist);
109 110 111
	perf_evsel__set_sample_bit(evsel, CPU);
	perf_evsel__set_sample_bit(evsel, TID);
	perf_evsel__set_sample_bit(evsel, TIME);
112
	perf_evlist__config(evlist, &opts, NULL);
113 114 115

	err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
	if (err < 0) {
116
		pr_debug("sched__get_first_possible_cpu: %s\n",
117
			 str_error_r(errno, sbuf, sizeof(sbuf)));
118
		goto out_delete_evlist;
119 120 121 122 123 124 125 126
	}

	cpu = err;

	/*
	 * So that we can check perf_sample.cpu on all the samples.
	 */
	if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) {
127
		pr_debug("sched_setaffinity: %s\n",
128
			 str_error_r(errno, sbuf, sizeof(sbuf)));
129
		goto out_delete_evlist;
130 131 132 133 134 135
	}

	/*
	 * Call sys_perf_event_open on all the fds on all the evsels,
	 * grouping them if asked to.
	 */
136
	err = evlist__open(evlist);
137
	if (err < 0) {
138
		pr_debug("perf_evlist__open: %s\n",
139
			 str_error_r(errno, sbuf, sizeof(sbuf)));
140
		goto out_delete_evlist;
141 142 143 144 145 146 147
	}

	/*
	 * mmap the first fd on a given CPU and ask for events for the other
	 * fds in the same CPU to be injected in the same mmap ring buffer
	 * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
	 */
148
	err = evlist__mmap(evlist, opts.mmap_pages);
149
	if (err < 0) {
150
		pr_debug("evlist__mmap: %s\n",
151
			 str_error_r(errno, sbuf, sizeof(sbuf)));
152
		goto out_delete_evlist;
153 154 155 156 157 158
	}

	/*
	 * Now that all is properly set up, enable the events, they will
	 * count just on workload.pid, which will start...
	 */
159
	evlist__enable(evlist);
160 161 162 163 164 165 166 167 168

	/*
	 * Now!
	 */
	perf_evlist__start_workload(evlist);

	while (1) {
		int before = total_events;

169
		for (i = 0; i < evlist->core.nr_mmaps; i++) {
170
			union perf_event *event;
171
			struct mmap *md;
172

173
			md = &evlist->mmap[i];
174
			if (perf_mmap__read_init(&md->core) < 0)
175 176
				continue;

177
			while ((event = perf_mmap__read_event(md)) != NULL) {
178 179 180 181 182 183 184 185 186
				const u32 type = event->header.type;
				const char *name = perf_event__name(type);

				++total_events;
				if (type < PERF_RECORD_MAX)
					nr_events[type]++;

				err = perf_evlist__parse_sample(evlist, event, &sample);
				if (err < 0) {
187
					if (verbose > 0)
188 189
						perf_event__fprintf(event, stderr);
					pr_debug("Couldn't parse sample\n");
190
					goto out_delete_evlist;
191 192
				}

193
				if (verbose > 0) {
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
					pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
					perf_event__fprintf(event, stderr);
				}

				if (prev_time > sample.time) {
					pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
						 name, prev_time, sample.time);
					++errs;
				}

				prev_time = sample.time;

				if (sample.cpu != cpu) {
					pr_debug("%s with unexpected cpu, expected %d, got %d\n",
						 name, cpu, sample.cpu);
					++errs;
				}

				if ((pid_t)sample.pid != evlist->workload.pid) {
					pr_debug("%s with unexpected pid, expected %d, got %d\n",
						 name, evlist->workload.pid, sample.pid);
					++errs;
				}

				if ((pid_t)sample.tid != evlist->workload.pid) {
					pr_debug("%s with unexpected tid, expected %d, got %d\n",
						 name, evlist->workload.pid, sample.tid);
					++errs;
				}

				if ((type == PERF_RECORD_COMM ||
				     type == PERF_RECORD_MMAP ||
226
				     type == PERF_RECORD_MMAP2 ||
227 228 229 230 231 232 233 234
				     type == PERF_RECORD_FORK ||
				     type == PERF_RECORD_EXIT) &&
				     (pid_t)event->comm.pid != evlist->workload.pid) {
					pr_debug("%s with unexpected pid/tid\n", name);
					++errs;
				}

				if ((type == PERF_RECORD_COMM ||
235 236
				     type == PERF_RECORD_MMAP ||
				     type == PERF_RECORD_MMAP2) &&
237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
				     event->comm.pid != event->comm.tid) {
					pr_debug("%s with different pid/tid!\n", name);
					++errs;
				}

				switch (type) {
				case PERF_RECORD_COMM:
					if (strcmp(event->comm.comm, cmd)) {
						pr_debug("%s with unexpected comm!\n", name);
						++errs;
					}
					break;
				case PERF_RECORD_EXIT:
					goto found_exit;
				case PERF_RECORD_MMAP:
252 253 254 255 256 257
					mmap_filename = event->mmap.filename;
					goto check_bname;
				case PERF_RECORD_MMAP2:
					mmap_filename = event->mmap2.filename;
				check_bname:
					bname = strrchr(mmap_filename, '/');
258 259 260
					if (bname != NULL) {
						if (!found_cmd_mmap)
							found_cmd_mmap = !strcmp(bname + 1, cmd);
261 262
						if (!found_coreutils_mmap)
							found_coreutils_mmap = !strcmp(bname + 1, "coreutils");
263 264 265 266 267
						if (!found_libc_mmap)
							found_libc_mmap = !strncmp(bname + 1, "libc", 4);
						if (!found_ld_mmap)
							found_ld_mmap = !strncmp(bname + 1, "ld", 2);
					} else if (!found_vdso_mmap)
268
						found_vdso_mmap = !strcmp(mmap_filename, "[vdso]");
269 270 271 272 273 274 275 276 277 278
					break;

				case PERF_RECORD_SAMPLE:
					/* Just ignore samples for now */
					break;
				default:
					pr_debug("Unexpected perf_event->header.type %d!\n",
						 type);
					++errs;
				}
279

280
				perf_mmap__consume(&md->core);
281
			}
282
			perf_mmap__read_done(md);
283 284 285 286 287 288 289 290
		}

		/*
		 * We don't use poll here because at least at 3.1 times the
		 * PERF_RECORD_{!SAMPLE} events don't honour
		 * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
		 */
		if (total_events == before && false)
291
			evlist__poll(evlist, -1);
292 293 294 295 296 297 298 299 300

		sleep(1);
		if (++wakeups > 5) {
			pr_debug("No PERF_RECORD_EXIT event!\n");
			break;
		}
	}

found_exit:
301
	if (nr_events[PERF_RECORD_COMM] > 1 + !!found_coreutils_mmap) {
302 303 304 305 306 307 308 309 310
		pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
		++errs;
	}

	if (nr_events[PERF_RECORD_COMM] == 0) {
		pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
		++errs;
	}

311
	if (!found_cmd_mmap && !found_coreutils_mmap) {
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
		pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
		++errs;
	}

	if (!found_libc_mmap) {
		pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
		++errs;
	}

	if (!found_ld_mmap) {
		pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
		++errs;
	}

	if (!found_vdso_mmap) {
		pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
		++errs;
	}
out_delete_evlist:
331
	evlist__delete(evlist);
332 333 334
out:
	return (err < 0 || errs > 0) ? -1 : 0;
}