perf-time-to-tsc.c 4.7 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
#include <errno.h>
3
#include <inttypes.h>
4
#include <limits.h>
5
#include <stdbool.h>
6 7
#include <stdio.h>
#include <unistd.h>
B
Borislav Petkov 已提交
8
#include <linux/types.h>
9
#include <sys/prctl.h>
10
#include <perf/cpumap.h>
11
#include <perf/evlist.h>
12
#include <perf/mmap.h>
13

14
#include "debug.h"
15 16 17 18
#include "parse-events.h"
#include "evlist.h"
#include "evsel.h"
#include "thread_map.h"
19
#include "record.h"
20
#include "tsc.h"
21 22
#include "mmap.h"
#include "tests.h"
23
#include "pmu.h"
24
#include "pmu-hybrid.h"
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47

#define CHECK__(x) {				\
	while ((x) < 0) {			\
		pr_debug(#x " failed!\n");	\
		goto out_err;			\
	}					\
}

#define CHECK_NOT_NULL__(x) {			\
	while ((x) == NULL) {			\
		pr_debug(#x " failed!\n");	\
		goto out_err;			\
	}					\
}

/**
 * test__perf_time_to_tsc - test converting perf time to TSC.
 *
 * This function implements a test that checks that the conversion of perf time
 * to and from TSC is consistent with the order of events.  If the test passes
 * %0 is returned, otherwise %-1 is returned.  If TSC conversion is not
 * supported then then the test passes but " (not supported)" is printed.
 */
48
int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe_unused)
49
{
50
	struct record_opts opts = {
51 52 53 54 55 56 57 58
		.mmap_pages	     = UINT_MAX,
		.user_freq	     = UINT_MAX,
		.user_interval	     = ULLONG_MAX,
		.target		     = {
			.uses_mmap   = true,
		},
		.sample_time	     = true,
	};
59
	struct perf_thread_map *threads = NULL;
60
	struct perf_cpu_map *cpus = NULL;
61
	struct evlist *evlist = NULL;
62
	struct evsel *evsel = NULL;
63 64 65 66 67 68 69
	int err = -1, ret, i;
	const char *comm1, *comm2;
	struct perf_tsc_conversion tc;
	struct perf_event_mmap_page *pc;
	union perf_event *event;
	u64 test_tsc, comm1_tsc, comm2_tsc;
	u64 test_time, comm1_time = 0, comm2_time = 0;
70
	struct mmap *md;
71 72 73 74

	threads = thread_map__new(-1, getpid(), UINT_MAX);
	CHECK_NOT_NULL__(threads);

75
	cpus = perf_cpu_map__new(NULL);
76 77
	CHECK_NOT_NULL__(cpus);

78
	evlist = evlist__new();
79 80
	CHECK_NOT_NULL__(evlist);

81
	perf_evlist__set_maps(&evlist->core, cpus, threads);
82

83
	CHECK__(parse_events(evlist, "cycles:u", NULL));
84

85
	evlist__config(evlist, &opts, NULL);
86

87
	evsel = evlist__first(evlist);
88

89 90 91
	evsel->core.attr.comm = 1;
	evsel->core.attr.disabled = 1;
	evsel->core.attr.enable_on_exec = 0;
92

93 94 95 96
	/*
	 * For hybrid "cycles:u", it creates two events.
	 * Init the second evsel here.
	 */
97
	if (perf_pmu__has_hybrid() && perf_pmu__hybrid_mounted("cpu_atom")) {
98 99 100 101 102 103
		evsel = evsel__next(evsel);
		evsel->core.attr.comm = 1;
		evsel->core.attr.disabled = 1;
		evsel->core.attr.enable_on_exec = 0;
	}

104
	CHECK__(evlist__open(evlist));
105

106
	CHECK__(evlist__mmap(evlist, UINT_MAX));
107

J
Jiri Olsa 已提交
108
	pc = evlist->mmap[0].core.base;
109 110 111 112 113 114 115 116 117
	ret = perf_read_tsc_conversion(pc, &tc);
	if (ret) {
		if (ret == -EOPNOTSUPP) {
			fprintf(stderr, " (not supported)");
			return 0;
		}
		goto out_err;
	}

118
	evlist__enable(evlist);
119 120 121 122 123 124 125 126 127

	comm1 = "Test COMM 1";
	CHECK__(prctl(PR_SET_NAME, (unsigned long)comm1, 0, 0, 0));

	test_tsc = rdtsc();

	comm2 = "Test COMM 2";
	CHECK__(prctl(PR_SET_NAME, (unsigned long)comm2, 0, 0, 0));

128
	evlist__disable(evlist);
129

130
	for (i = 0; i < evlist->core.nr_mmaps; i++) {
131
		md = &evlist->mmap[i];
132
		if (perf_mmap__read_init(&md->core) < 0)
133 134
			continue;

135
		while ((event = perf_mmap__read_event(&md->core)) != NULL) {
136 137 138 139 140
			struct perf_sample sample;

			if (event->header.type != PERF_RECORD_COMM ||
			    (pid_t)event->comm.pid != getpid() ||
			    (pid_t)event->comm.tid != getpid())
141
				goto next_event;
142 143

			if (strcmp(event->comm.comm, comm1) == 0) {
144
				CHECK__(evsel__parse_sample(evsel, event, &sample));
145 146 147
				comm1_time = sample.time;
			}
			if (strcmp(event->comm.comm, comm2) == 0) {
148
				CHECK__(evsel__parse_sample(evsel, event, &sample));
149 150
				comm2_time = sample.time;
			}
151
next_event:
152
			perf_mmap__consume(&md->core);
153
		}
154
		perf_mmap__read_done(&md->core);
155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
	}

	if (!comm1_time || !comm2_time)
		goto out_err;

	test_time = tsc_to_perf_time(test_tsc, &tc);
	comm1_tsc = perf_time_to_tsc(comm1_time, &tc);
	comm2_tsc = perf_time_to_tsc(comm2_time, &tc);

	pr_debug("1st event perf time %"PRIu64" tsc %"PRIu64"\n",
		 comm1_time, comm1_tsc);
	pr_debug("rdtsc          time %"PRIu64" tsc %"PRIu64"\n",
		 test_time, test_tsc);
	pr_debug("2nd event perf time %"PRIu64" tsc %"PRIu64"\n",
		 comm2_time, comm2_tsc);

	if (test_time <= comm1_time ||
	    test_time >= comm2_time)
		goto out_err;

	if (test_tsc <= comm1_tsc ||
	    test_tsc >= comm2_tsc)
		goto out_err;

	err = 0;

out_err:
182
	evlist__delete(evlist);
183 184
	perf_cpu_map__put(cpus);
	perf_thread_map__put(threads);
185 186
	return err;
}
187 188 189 190 191 192 193 194 195 196 197 198 199

bool test__tsc_is_supported(void)
{
	/*
	 * Except x86_64/i386 and Arm64, other archs don't support TSC in perf.
	 * Just enable the test for x86_64/i386 and Arm64 archs.
	 */
#if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__)
	return true;
#else
	return false;
#endif
}