perf-record.c 8.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
#include <errno.h>
3
#include <inttypes.h>
4
#include <linux/string.h>
5 6 7
/* For the CLR_() macros */
#include <pthread.h>

8 9 10 11
#include <sched.h>
#include "evlist.h"
#include "evsel.h"
#include "debug.h"
12
#include "record.h"
13
#include "tests.h"
14
#include "util/mmap.h"
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42

static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp)
{
	int i, cpu = -1, nrcpus = 1024;
realloc:
	CPU_ZERO(maskp);

	if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) {
		if (errno == EINVAL && nrcpus < (1024 << 8)) {
			nrcpus = nrcpus << 2;
			goto realloc;
		}
		perror("sched_getaffinity");
			return -1;
	}

	for (i = 0; i < nrcpus; i++) {
		if (CPU_ISSET(i, maskp)) {
			if (cpu == -1)
				cpu = i;
			else
				CPU_CLR(i, maskp);
		}
	}

	return cpu;
}

43
int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unused)
44
{
45
	struct record_opts opts = {
46 47 48 49
		.target = {
			.uid = UINT_MAX,
			.uses_mmap = true,
		},
50 51
		.no_buffering = true,
		.mmap_pages   = 256,
52 53 54
	};
	cpu_set_t cpu_mask;
	size_t cpu_mask_size = sizeof(cpu_mask);
55
	struct evlist *evlist = perf_evlist__new_dummy();
56
	struct evsel *evsel;
57 58 59
	struct perf_sample sample;
	const char *cmd = "sleep";
	const char *argv[] = { cmd, "1", NULL, };
60
	char *bname, *mmap_filename;
61 62
	u64 prev_time = 0;
	bool found_cmd_mmap = false,
63
	     found_coreutils_mmap = false,
64 65 66 67 68 69
	     found_libc_mmap = false,
	     found_vdso_mmap = false,
	     found_ld_mmap = false;
	int err = -1, errs = 0, i, wakeups = 0;
	u32 cpu;
	int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
70
	char sbuf[STRERR_BUFSIZE];
71

72 73 74
	if (evlist == NULL) /* Fallback for kernels lacking PERF_COUNT_SW_DUMMY */
		evlist = perf_evlist__new_default();

75
	if (evlist == NULL) {
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
		pr_debug("Not enough memory to create evlist\n");
		goto out;
	}

	/*
	 * Create maps of threads and cpus to monitor. In this case
	 * we start with all threads and cpus (-1, -1) but then in
	 * perf_evlist__prepare_workload we'll fill in the only thread
	 * we're monitoring, the one forked there.
	 */
	err = perf_evlist__create_maps(evlist, &opts.target);
	if (err < 0) {
		pr_debug("Not enough memory to create thread/cpu maps\n");
		goto out_delete_evlist;
	}

	/*
	 * Prepare the workload in argv[] to run, it'll fork it, and then wait
	 * for perf_evlist__start_workload() to exec it. This is done this way
	 * so that we have time to open the evlist (calling sys_perf_event_open
	 * on all the fds) and then mmap them.
	 */
98
	err = perf_evlist__prepare_workload(evlist, &opts.target, argv, false, NULL);
99 100
	if (err < 0) {
		pr_debug("Couldn't run the workload!\n");
101
		goto out_delete_evlist;
102 103 104 105 106 107
	}

	/*
	 * Config the evsels, setting attr->comm on the first one, etc.
	 */
	evsel = perf_evlist__first(evlist);
108 109 110
	perf_evsel__set_sample_bit(evsel, CPU);
	perf_evsel__set_sample_bit(evsel, TID);
	perf_evsel__set_sample_bit(evsel, TIME);
111
	perf_evlist__config(evlist, &opts, NULL);
112 113 114

	err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
	if (err < 0) {
115
		pr_debug("sched__get_first_possible_cpu: %s\n",
116
			 str_error_r(errno, sbuf, sizeof(sbuf)));
117
		goto out_delete_evlist;
118 119 120 121 122 123 124 125
	}

	cpu = err;

	/*
	 * So that we can check perf_sample.cpu on all the samples.
	 */
	if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) {
126
		pr_debug("sched_setaffinity: %s\n",
127
			 str_error_r(errno, sbuf, sizeof(sbuf)));
128
		goto out_delete_evlist;
129 130 131 132 133 134
	}

	/*
	 * Call sys_perf_event_open on all the fds on all the evsels,
	 * grouping them if asked to.
	 */
135
	err = evlist__open(evlist);
136
	if (err < 0) {
137
		pr_debug("perf_evlist__open: %s\n",
138
			 str_error_r(errno, sbuf, sizeof(sbuf)));
139
		goto out_delete_evlist;
140 141 142 143 144 145 146
	}

	/*
	 * mmap the first fd on a given CPU and ask for events for the other
	 * fds in the same CPU to be injected in the same mmap ring buffer
	 * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
	 */
147
	err = evlist__mmap(evlist, opts.mmap_pages);
148
	if (err < 0) {
149
		pr_debug("evlist__mmap: %s\n",
150
			 str_error_r(errno, sbuf, sizeof(sbuf)));
151
		goto out_delete_evlist;
152 153 154 155 156 157
	}

	/*
	 * Now that all is properly set up, enable the events, they will
	 * count just on workload.pid, which will start...
	 */
158
	evlist__enable(evlist);
159 160 161 162 163 164 165 166 167 168 169

	/*
	 * Now!
	 */
	perf_evlist__start_workload(evlist);

	while (1) {
		int before = total_events;

		for (i = 0; i < evlist->nr_mmaps; i++) {
			union perf_event *event;
170
			struct mmap *md;
171

172
			md = &evlist->mmap[i];
173
			if (perf_mmap__read_init(md) < 0)
174 175
				continue;

176
			while ((event = perf_mmap__read_event(md)) != NULL) {
177 178 179 180 181 182 183 184 185
				const u32 type = event->header.type;
				const char *name = perf_event__name(type);

				++total_events;
				if (type < PERF_RECORD_MAX)
					nr_events[type]++;

				err = perf_evlist__parse_sample(evlist, event, &sample);
				if (err < 0) {
186
					if (verbose > 0)
187 188
						perf_event__fprintf(event, stderr);
					pr_debug("Couldn't parse sample\n");
189
					goto out_delete_evlist;
190 191
				}

192
				if (verbose > 0) {
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
					pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
					perf_event__fprintf(event, stderr);
				}

				if (prev_time > sample.time) {
					pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
						 name, prev_time, sample.time);
					++errs;
				}

				prev_time = sample.time;

				if (sample.cpu != cpu) {
					pr_debug("%s with unexpected cpu, expected %d, got %d\n",
						 name, cpu, sample.cpu);
					++errs;
				}

				if ((pid_t)sample.pid != evlist->workload.pid) {
					pr_debug("%s with unexpected pid, expected %d, got %d\n",
						 name, evlist->workload.pid, sample.pid);
					++errs;
				}

				if ((pid_t)sample.tid != evlist->workload.pid) {
					pr_debug("%s with unexpected tid, expected %d, got %d\n",
						 name, evlist->workload.pid, sample.tid);
					++errs;
				}

				if ((type == PERF_RECORD_COMM ||
				     type == PERF_RECORD_MMAP ||
225
				     type == PERF_RECORD_MMAP2 ||
226 227 228 229 230 231 232 233
				     type == PERF_RECORD_FORK ||
				     type == PERF_RECORD_EXIT) &&
				     (pid_t)event->comm.pid != evlist->workload.pid) {
					pr_debug("%s with unexpected pid/tid\n", name);
					++errs;
				}

				if ((type == PERF_RECORD_COMM ||
234 235
				     type == PERF_RECORD_MMAP ||
				     type == PERF_RECORD_MMAP2) &&
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
				     event->comm.pid != event->comm.tid) {
					pr_debug("%s with different pid/tid!\n", name);
					++errs;
				}

				switch (type) {
				case PERF_RECORD_COMM:
					if (strcmp(event->comm.comm, cmd)) {
						pr_debug("%s with unexpected comm!\n", name);
						++errs;
					}
					break;
				case PERF_RECORD_EXIT:
					goto found_exit;
				case PERF_RECORD_MMAP:
251 252 253 254 255 256
					mmap_filename = event->mmap.filename;
					goto check_bname;
				case PERF_RECORD_MMAP2:
					mmap_filename = event->mmap2.filename;
				check_bname:
					bname = strrchr(mmap_filename, '/');
257 258 259
					if (bname != NULL) {
						if (!found_cmd_mmap)
							found_cmd_mmap = !strcmp(bname + 1, cmd);
260 261
						if (!found_coreutils_mmap)
							found_coreutils_mmap = !strcmp(bname + 1, "coreutils");
262 263 264 265 266
						if (!found_libc_mmap)
							found_libc_mmap = !strncmp(bname + 1, "libc", 4);
						if (!found_ld_mmap)
							found_ld_mmap = !strncmp(bname + 1, "ld", 2);
					} else if (!found_vdso_mmap)
267
						found_vdso_mmap = !strcmp(mmap_filename, "[vdso]");
268 269 270 271 272 273 274 275 276 277
					break;

				case PERF_RECORD_SAMPLE:
					/* Just ignore samples for now */
					break;
				default:
					pr_debug("Unexpected perf_event->header.type %d!\n",
						 type);
					++errs;
				}
278

279
				perf_mmap__consume(md);
280
			}
281
			perf_mmap__read_done(md);
282 283 284 285 286 287 288 289
		}

		/*
		 * We don't use poll here because at least at 3.1 times the
		 * PERF_RECORD_{!SAMPLE} events don't honour
		 * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
		 */
		if (total_events == before && false)
290
			perf_evlist__poll(evlist, -1);
291 292 293 294 295 296 297 298 299

		sleep(1);
		if (++wakeups > 5) {
			pr_debug("No PERF_RECORD_EXIT event!\n");
			break;
		}
	}

found_exit:
300
	if (nr_events[PERF_RECORD_COMM] > 1 + !!found_coreutils_mmap) {
301 302 303 304 305 306 307 308 309
		pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
		++errs;
	}

	if (nr_events[PERF_RECORD_COMM] == 0) {
		pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
		++errs;
	}

310
	if (!found_cmd_mmap && !found_coreutils_mmap) {
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
		pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
		++errs;
	}

	if (!found_libc_mmap) {
		pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
		++errs;
	}

	if (!found_ld_mmap) {
		pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
		++errs;
	}

	if (!found_vdso_mmap) {
		pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
		++errs;
	}
out_delete_evlist:
330
	evlist__delete(evlist);
331 332 333
out:
	return (err < 0 || errs > 0) ? -1 : 0;
}