mmap.c 7.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/*
 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
 *
 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
 * copyright notes.
 *
 * Released under the GPL v2. (and only v2, not any later version)
 */

#include <sys/mman.h>
11 12 13
#include <inttypes.h>
#include <asm/bug.h>
#include "debug.h"
14 15 16 17 18 19 20 21 22 23
#include "event.h"
#include "mmap.h"
#include "util.h" /* page_size */

size_t perf_mmap__mmap_len(struct perf_mmap *map)
{
	return map->mask + 1 + page_size;
}

/* When check_messup is true, 'end' must points to a good entry */
24
static union perf_event *perf_mmap__read(struct perf_mmap *map,
25
					 u64 *startp, u64 end)
26 27 28
{
	unsigned char *data = map->base + page_size;
	union perf_event *event = NULL;
29
	int diff = end - *startp;
30 31 32 33

	if (diff >= (int)sizeof(event->header)) {
		size_t size;

34
		event = (union perf_event *)&data[*startp & map->mask];
35 36
		size = event->header.size;

37 38
		if (size < sizeof(event->header) || diff < (int)size)
			return NULL;
39 40 41 42 43

		/*
		 * Event straddles the mmap boundary -- header should always
		 * be inside due to u64 alignment of output.
		 */
44 45
		if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) {
			unsigned int offset = *startp;
46 47 48 49 50 51 52 53 54 55 56 57 58 59
			unsigned int len = min(sizeof(*event), size), cpy;
			void *dst = map->event_copy;

			do {
				cpy = min(map->mask + 1 - (offset & map->mask), len);
				memcpy(dst, &data[offset & map->mask], cpy);
				offset += cpy;
				dst += cpy;
				len -= cpy;
			} while (len);

			event = (union perf_event *)map->event_copy;
		}

60
		*startp += size;
61 62 63 64 65
	}

	return event;
}

66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
/*
 * Read event from ring buffer one by one.
 * Return one event for each call.
 *
 * Usage:
 * perf_mmap__read_init()
 * while(event = perf_mmap__read_event()) {
 *	//process the event
 *	perf_mmap__consume()
 * }
 * perf_mmap__read_done()
 */
union perf_event *perf_mmap__read_event(struct perf_mmap *map,
					bool overwrite,
					u64 *startp, u64 end)
{
	union perf_event *event;

	/*
	 * Check if event was unmapped due to a POLLHUP/POLLERR.
	 */
	if (!refcount_read(&map->refcnt))
		return NULL;

	if (startp == NULL)
		return NULL;

	/* non-overwirte doesn't pause the ringbuffer */
	if (!overwrite)
		end = perf_mmap__read_head(map);

	event = perf_mmap__read(map, startp, end);

	if (!overwrite)
		map->prev = *startp;

	return event;
}

105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
static bool perf_mmap__empty(struct perf_mmap *map)
{
	return perf_mmap__read_head(map) == map->prev && !map->auxtrace_mmap.base;
}

void perf_mmap__get(struct perf_mmap *map)
{
	refcount_inc(&map->refcnt);
}

void perf_mmap__put(struct perf_mmap *map)
{
	BUG_ON(map->base && refcount_read(&map->refcnt) == 0);

	if (refcount_dec_and_test(&map->refcnt))
		perf_mmap__munmap(map);
}

void perf_mmap__consume(struct perf_mmap *map, bool overwrite)
{
	if (!overwrite) {
		u64 old = map->prev;

		perf_mmap__write_tail(map, old);
	}

	if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))
		perf_mmap__put(map);
}

int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
			       struct auxtrace_mmap_params *mp __maybe_unused,
			       void *userpg __maybe_unused,
			       int fd __maybe_unused)
{
	return 0;
}

void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
{
}

void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused,
				       off_t auxtrace_offset __maybe_unused,
				       unsigned int auxtrace_pages __maybe_unused,
				       bool auxtrace_overwrite __maybe_unused)
{
}

void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused,
					  struct perf_evlist *evlist __maybe_unused,
					  int idx __maybe_unused,
					  bool per_cpu __maybe_unused)
{
}

void perf_mmap__munmap(struct perf_mmap *map)
{
	if (map->base != NULL) {
		munmap(map->base, perf_mmap__mmap_len(map));
		map->base = NULL;
		map->fd = -1;
		refcount_set(&map->refcnt, 0);
	}
	auxtrace_mmap__munmap(&map->auxtrace_mmap);
}

int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd)
{
	/*
175
	 * The last one will be done at perf_mmap__consume(), so that we
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
	 * make sure we don't prevent tools from consuming every last event in
	 * the ring buffer.
	 *
	 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
	 * anymore, but the last events for it are still in the ring buffer,
	 * waiting to be consumed.
	 *
	 * Tools can chose to ignore this at their own discretion, but the
	 * evlist layer can't just drop it when filtering events in
	 * perf_evlist__filter_pollfd().
	 */
	refcount_set(&map->refcnt, 2);
	map->prev = 0;
	map->mask = mp->mask;
	map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
			 MAP_SHARED, fd, 0);
	if (map->base == MAP_FAILED) {
		pr_debug2("failed to mmap perf event ring buffer, error %d\n",
			  errno);
		map->base = NULL;
		return -1;
	}
	map->fd = fd;

	if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
				&mp->auxtrace_mp, map->base, fd))
		return -1;

	return 0;
}
206

207
static int overwrite_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
208 209 210 211 212
{
	struct perf_event_header *pheader;
	u64 evt_head = head;
	int size = mask + 1;

213
	pr_debug2("overwrite_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
214 215 216 217
	pheader = (struct perf_event_header *)(buf + (head & mask));
	*start = head;
	while (true) {
		if (evt_head - head >= (unsigned int)size) {
218
			pr_debug("Finished reading overwrite ring buffer: rewind\n");
219 220 221 222 223 224 225 226 227
			if (evt_head - head > (unsigned int)size)
				evt_head -= pheader->size;
			*end = evt_head;
			return 0;
		}

		pheader = (struct perf_event_header *)(buf + (evt_head & mask));

		if (pheader->size == 0) {
228
			pr_debug("Finished reading overwrite ring buffer: get start\n");
229 230 231 232 233 234 235 236 237 238 239
			*end = evt_head;
			return 0;
		}

		evt_head += pheader->size;
		pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
	}
	WARN_ONCE(1, "Shouldn't get here\n");
	return -1;
}

240 241 242 243 244
/*
 * Report the start and end of the available data in ringbuffer
 */
int perf_mmap__read_init(struct perf_mmap *md, bool overwrite,
			 u64 *startp, u64 *endp)
245 246 247 248 249 250
{
	u64 head = perf_mmap__read_head(md);
	u64 old = md->prev;
	unsigned char *data = md->base + page_size;
	unsigned long size;

251 252
	*startp = overwrite ? head : old;
	*endp = overwrite ? old : head;
253 254
	md->start = md->overwrite ? head : old;
	md->end = md->overwrite ? old : head;
255

256
	if (md->start == md->end)
257
		return -EAGAIN;
258

259
	size = md->end - md->start;
260
	if (size > (unsigned long)(md->mask) + 1) {
261
		if (!md->overwrite) {
262
			WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
263

264
			md->prev = head;
265
			perf_mmap__consume(md, overwrite);
266
			return -EAGAIN;
267 268 269 270 271 272
		}

		/*
		 * Backward ring buffer is full. We still have a chance to read
		 * most of data from it.
		 */
273
		if (overwrite_rb_find_range(data, md->mask, head, &md->start, &md->end))
274
			return -EINVAL;
275 276
		*startp = md->start;
		*endp = md->end;
277 278
	}

279
	return 0;
280 281
}

282 283
int perf_mmap__push(struct perf_mmap *md, void *to,
		    int push(void *to, void *buf, size_t size))
284 285 286 287 288 289 290 291
{
	u64 head = perf_mmap__read_head(md);
	u64 end, start;
	unsigned char *data = md->base + page_size;
	unsigned long size;
	void *buf;
	int rc = 0;

292
	rc = perf_mmap__read_init(md, md->overwrite, &start, &end);
293 294
	if (rc < 0)
		return (rc == -EAGAIN) ? 0 : -1;
295

296
	size = md->end - md->start;
297

298 299 300 301
	if ((md->start & md->mask) + size != (md->end & md->mask)) {
		buf = &data[md->start & md->mask];
		size = md->mask + 1 - (md->start & md->mask);
		md->start += size;
302 303 304 305 306 307 308

		if (push(to, buf, size) < 0) {
			rc = -1;
			goto out;
		}
	}

309 310 311
	buf = &data[md->start & md->mask];
	size = md->end - md->start;
	md->start += size;
312 313 314 315 316 317 318

	if (push(to, buf, size) < 0) {
		rc = -1;
		goto out;
	}

	md->prev = head;
319
	perf_mmap__consume(md, md->overwrite);
320 321 322
out:
	return rc;
}
323 324 325 326 327 328 329 330 331 332 333

/*
 * Mandatory for overwrite mode
 * The direction of overwrite mode is backward.
 * The last perf_mmap__read() will set tail to map->prev.
 * Need to correct the map->prev to head which is the end of next read.
 */
void perf_mmap__read_done(struct perf_mmap *map)
{
	map->prev = perf_mmap__read_head(map);
}