evsel.c 8.6 KB
Newer Older
1
#include "evsel.h"
2
#include "evlist.h"
3
#include "../perf.h"
4
#include "util.h"
5
#include "cpumap.h"
6
#include "thread_map.h"
7

8 9 10
#include <unistd.h>
#include <sys/mman.h>

11 12 13
#include <linux/bitops.h>
#include <linux/hash.h>

14
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
15
#define SID(e, x, y) xyarray__entry(e->id, x, y)
16

17
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
18 19 20 21 22
{
	struct perf_evsel *evsel = zalloc(sizeof(*evsel));

	if (evsel != NULL) {
		evsel->idx	   = idx;
23
		evsel->attr	   = *attr;
24 25 26 27 28 29 30 31 32 33 34 35
		INIT_LIST_HEAD(&evsel->node);
	}

	return evsel;
}

int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
{
	evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
	return evsel->fd != NULL ? 0 : -ENOMEM;
}

36 37 38 39 40 41
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
{
	evsel->id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
	return evsel->id != NULL ? 0 : -ENOMEM;
}

42 43 44 45 46 47 48
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
{
	evsel->counts = zalloc((sizeof(*evsel->counts) +
				(ncpus * sizeof(struct perf_counts_values))));
	return evsel->counts != NULL ? 0 : -ENOMEM;
}

49 50 51 52 53 54
void perf_evsel__free_fd(struct perf_evsel *evsel)
{
	xyarray__delete(evsel->fd);
	evsel->fd = NULL;
}

55 56 57 58 59 60
void perf_evsel__free_id(struct perf_evsel *evsel)
{
	xyarray__delete(evsel->id);
	evsel->id = NULL;
}

61 62 63 64 65 66 67 68 69 70 71
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
{
	int cpu, thread;

	for (cpu = 0; cpu < ncpus; cpu++)
		for (thread = 0; thread < nthreads; ++thread) {
			close(FD(evsel, cpu, thread));
			FD(evsel, cpu, thread) = -1;
		}
}

72
void perf_evlist__munmap(struct perf_evlist *evlist, int ncpus)
73
{
74
	int cpu;
75

76 77 78 79
	for (cpu = 0; cpu < ncpus; cpu++) {
		if (evlist->mmap[cpu].base != NULL) {
			munmap(evlist->mmap[cpu].base, evlist->mmap_len);
			evlist->mmap[cpu].base = NULL;
80
		}
81
	}
82 83
}

84
int perf_evlist__alloc_mmap(struct perf_evlist *evlist, int ncpus)
85
{
86 87
	evlist->mmap = zalloc(ncpus * sizeof(struct perf_mmap));
	return evlist->mmap != NULL ? 0 : -ENOMEM;
88 89
}

90 91 92 93
void perf_evsel__delete(struct perf_evsel *evsel)
{
	assert(list_empty(&evsel->node));
	xyarray__delete(evsel->fd);
94
	xyarray__delete(evsel->id);
95 96
	free(evsel);
}
97 98 99 100 101 102 103 104 105 106

int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
			      int cpu, int thread, bool scale)
{
	struct perf_counts_values count;
	size_t nv = scale ? 3 : 1;

	if (FD(evsel, cpu, thread) < 0)
		return -EINVAL;

107 108 109
	if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
		return -ENOMEM;

110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
	if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
		return -errno;

	if (scale) {
		if (count.run == 0)
			count.val = 0;
		else if (count.run < count.ena)
			count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
	} else
		count.ena = count.run = 0;

	evsel->counts->cpu[cpu] = count;
	return 0;
}

int __perf_evsel__read(struct perf_evsel *evsel,
		       int ncpus, int nthreads, bool scale)
{
	size_t nv = scale ? 3 : 1;
	int cpu, thread;
	struct perf_counts_values *aggr = &evsel->counts->aggr, count;

	aggr->val = 0;

	for (cpu = 0; cpu < ncpus; cpu++) {
		for (thread = 0; thread < nthreads; thread++) {
			if (FD(evsel, cpu, thread) < 0)
				continue;

			if (readn(FD(evsel, cpu, thread),
				  &count, nv * sizeof(u64)) < 0)
				return -errno;

			aggr->val += count.val;
			if (scale) {
				aggr->ena += count.ena;
				aggr->run += count.run;
			}
		}
	}

	evsel->counts->scaled = 0;
	if (scale) {
		if (aggr->run == 0) {
			evsel->counts->scaled = -1;
			aggr->val = 0;
			return 0;
		}

		if (aggr->run < aggr->ena) {
			evsel->counts->scaled = 1;
			aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
		}
	} else
		aggr->ena = aggr->run = 0;

	return 0;
}
168

169
static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
170
			      struct thread_map *threads, bool group, bool inherit)
171
{
172
	int cpu, thread;
173

174 175
	if (evsel->fd == NULL &&
	    perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
176 177
		return -1;

178
	for (cpu = 0; cpu < cpus->nr; cpu++) {
179 180
		int group_fd = -1;

181 182
		evsel->attr.inherit = (cpus->map[cpu] < 0) && inherit;

183 184 185
		for (thread = 0; thread < threads->nr; thread++) {
			FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
								     threads->map[thread],
186 187
								     cpus->map[cpu],
								     group_fd, 0);
188 189
			if (FD(evsel, cpu, thread) < 0)
				goto out_close;
190 191 192

			if (group && group_fd == -1)
				group_fd = FD(evsel, cpu, thread);
193
		}
194 195 196 197 198
	}

	return 0;

out_close:
199 200 201 202 203 204 205
	do {
		while (--thread >= 0) {
			close(FD(evsel, cpu, thread));
			FD(evsel, cpu, thread) = -1;
		}
		thread = threads->nr;
	} while (--cpu >= 0);
206 207 208
	return -1;
}

209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
static struct {
	struct cpu_map map;
	int cpus[1];
} empty_cpu_map = {
	.map.nr	= 1,
	.cpus	= { -1, },
};

static struct {
	struct thread_map map;
	int threads[1];
} empty_thread_map = {
	.map.nr	 = 1,
	.threads = { -1, },
};

225
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
226
		     struct thread_map *threads, bool group, bool inherit)
227
{
228 229 230
	if (cpus == NULL) {
		/* Work around old compiler warnings about strict aliasing */
		cpus = &empty_cpu_map.map;
231 232
	}

233 234
	if (threads == NULL)
		threads = &empty_thread_map.map;
235

236
	return __perf_evsel__open(evsel, cpus, threads, group, inherit);
237 238
}

239
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
240
			     struct cpu_map *cpus, bool group, bool inherit)
241
{
242
	return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group, inherit);
243
}
244

245
int perf_evsel__open_per_thread(struct perf_evsel *evsel,
246
				struct thread_map *threads, bool group, bool inherit)
247
{
248
	return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group, inherit);
249
}
250

251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
static int __perf_evlist__mmap(struct perf_evlist *evlist, int cpu, int prot,
			       int mask, int fd)
{
	evlist->mmap[cpu].prev = 0;
	evlist->mmap[cpu].mask = mask;
	evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot,
				      MAP_SHARED, fd, 0);
	if (evlist->mmap[cpu].base == MAP_FAILED)
		return -1;

	perf_evlist__add_pollfd(evlist, fd);
	return 0;
}

static int perf_evlist__id_hash(struct perf_evlist *evlist, struct perf_evsel *evsel,
			       int cpu, int thread, int fd)
{
	struct perf_sample_id *sid;
	u64 read_data[4] = { 0, };
	int hash, id_idx = 1; /* The first entry is the counter value */

	if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
	    read(fd, &read_data, sizeof(read_data)) == -1)
		return -1;

	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		++id_idx;
	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		++id_idx;

	sid = SID(evsel, cpu, thread);
	sid->id = read_data[id_idx];
	sid->evsel = evsel;
	hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
	hlist_add_head(&sid->node, &evlist->heads[hash]);
	return 0;
}

/** perf_evlist__mmap - Create per cpu maps to receive events
 *
 * @evlist - list of events
 * @cpus - cpu map being monitored
 * @threads - threads map being monitored
 * @pages - map length in pages
 * @overwrite - overwrite older events?
 *
 * If overwrite is false the user needs to signal event consuption using:
 *
 *	struct perf_mmap *m = &evlist->mmap[cpu];
 *	unsigned int head = perf_mmap__read_head(m);
 *
 *	perf_mmap__write_tail(m, head)
 */
int perf_evlist__mmap(struct perf_evlist *evlist, struct cpu_map *cpus,
		      struct thread_map *threads, int pages, bool overwrite)
306 307 308
{
	unsigned int page_size = sysconf(_SC_PAGE_SIZE);
	int mask = pages * page_size - 1, cpu;
309 310
	struct perf_evsel *first_evsel, *evsel;
	int thread, prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
311

312 313
	if (evlist->mmap == NULL &&
	    perf_evlist__alloc_mmap(evlist, cpus->nr) < 0)
314 315
		return -ENOMEM;

316 317 318
	if (evlist->pollfd == NULL &&
	    perf_evlist__alloc_pollfd(evlist, cpus->nr, threads->nr) < 0)
		return -ENOMEM;
319

320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
	evlist->mmap_len = (pages + 1) * page_size;
	first_evsel = list_entry(evlist->entries.next, struct perf_evsel, node);

	list_for_each_entry(evsel, &evlist->entries, node) {
		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
		    evsel->id == NULL &&
		    perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
			return -ENOMEM;

		for (cpu = 0; cpu < cpus->nr; cpu++) {
			for (thread = 0; thread < threads->nr; thread++) {
				int fd = FD(evsel, cpu, thread);

				if (evsel->idx || thread) {
					if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT,
						  FD(first_evsel, cpu, 0)) != 0)
						goto out_unmap;
				} else if (__perf_evlist__mmap(evlist, cpu, prot, mask, fd) < 0)
					goto out_unmap;

				if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
				    perf_evlist__id_hash(evlist, evsel, cpu, thread, fd) < 0)
					goto out_unmap;
			}
344 345 346 347 348 349
		}
	}

	return 0;

out_unmap:
350 351 352 353
	for (cpu = 0; cpu < cpus->nr; cpu++) {
		if (evlist->mmap[cpu].base != NULL) {
			munmap(evlist->mmap[cpu].base, evlist->mmap_len);
			evlist->mmap[cpu].base = NULL;
354
		}
355
	}
356 357
	return -1;
}