mmap.c 7.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/*
 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
 *
 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
 * copyright notes.
 *
 * Released under the GPL v2. (and only v2, not any later version)
 */

#include <sys/mman.h>
11 12 13
#include <inttypes.h>
#include <asm/bug.h>
#include "debug.h"
14 15 16 17 18 19 20 21 22 23
#include "event.h"
#include "mmap.h"
#include "util.h" /* page_size */

size_t perf_mmap__mmap_len(struct perf_mmap *map)
{
	return map->mask + 1 + page_size;
}

/* When check_messup is true, 'end' must points to a good entry */
24
static union perf_event *perf_mmap__read(struct perf_mmap *map,
25
					 u64 *startp, u64 end)
26 27 28
{
	unsigned char *data = map->base + page_size;
	union perf_event *event = NULL;
29
	int diff = end - *startp;
30 31 32 33

	if (diff >= (int)sizeof(event->header)) {
		size_t size;

34
		event = (union perf_event *)&data[*startp & map->mask];
35 36
		size = event->header.size;

37 38
		if (size < sizeof(event->header) || diff < (int)size)
			return NULL;
39 40 41 42 43

		/*
		 * Event straddles the mmap boundary -- header should always
		 * be inside due to u64 alignment of output.
		 */
44 45
		if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) {
			unsigned int offset = *startp;
46 47 48 49 50 51 52 53 54 55 56 57 58 59
			unsigned int len = min(sizeof(*event), size), cpy;
			void *dst = map->event_copy;

			do {
				cpy = min(map->mask + 1 - (offset & map->mask), len);
				memcpy(dst, &data[offset & map->mask], cpy);
				offset += cpy;
				dst += cpy;
				len -= cpy;
			} while (len);

			event = (union perf_event *)map->event_copy;
		}

60
		*startp += size;
61 62 63 64 65
	}

	return event;
}

66 67 68 69 70 71 72 73 74 75 76 77 78
/*
 * Read event from ring buffer one by one.
 * Return one event for each call.
 *
 * Usage:
 * perf_mmap__read_init()
 * while(event = perf_mmap__read_event()) {
 *	//process the event
 *	perf_mmap__consume()
 * }
 * perf_mmap__read_done()
 */
union perf_event *perf_mmap__read_event(struct perf_mmap *map,
79 80 81
					bool overwrite __maybe_unused,
					u64 *startp __maybe_unused,
					u64 end __maybe_unused)
82 83 84 85 86 87 88 89 90 91
{
	union perf_event *event;

	/*
	 * Check if event was unmapped due to a POLLHUP/POLLERR.
	 */
	if (!refcount_read(&map->refcnt))
		return NULL;

	/* non-overwirte doesn't pause the ringbuffer */
92 93
	if (!map->overwrite)
		map->end = perf_mmap__read_head(map);
94

95
	event = perf_mmap__read(map, &map->start, map->end);
96

97 98
	if (!map->overwrite)
		map->prev = map->start;
99 100 101 102

	return event;
}

103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
static bool perf_mmap__empty(struct perf_mmap *map)
{
	return perf_mmap__read_head(map) == map->prev && !map->auxtrace_mmap.base;
}

void perf_mmap__get(struct perf_mmap *map)
{
	refcount_inc(&map->refcnt);
}

void perf_mmap__put(struct perf_mmap *map)
{
	BUG_ON(map->base && refcount_read(&map->refcnt) == 0);

	if (refcount_dec_and_test(&map->refcnt))
		perf_mmap__munmap(map);
}

121
void perf_mmap__consume(struct perf_mmap *map, bool overwrite __maybe_unused)
122
{
123
	if (!map->overwrite) {
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
		u64 old = map->prev;

		perf_mmap__write_tail(map, old);
	}

	if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))
		perf_mmap__put(map);
}

int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
			       struct auxtrace_mmap_params *mp __maybe_unused,
			       void *userpg __maybe_unused,
			       int fd __maybe_unused)
{
	return 0;
}

void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
{
}

void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused,
				       off_t auxtrace_offset __maybe_unused,
				       unsigned int auxtrace_pages __maybe_unused,
				       bool auxtrace_overwrite __maybe_unused)
{
}

void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused,
					  struct perf_evlist *evlist __maybe_unused,
					  int idx __maybe_unused,
					  bool per_cpu __maybe_unused)
{
}

void perf_mmap__munmap(struct perf_mmap *map)
{
	if (map->base != NULL) {
		munmap(map->base, perf_mmap__mmap_len(map));
		map->base = NULL;
		map->fd = -1;
		refcount_set(&map->refcnt, 0);
	}
	auxtrace_mmap__munmap(&map->auxtrace_mmap);
}

int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd)
{
	/*
173
	 * The last one will be done at perf_mmap__consume(), so that we
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
	 * make sure we don't prevent tools from consuming every last event in
	 * the ring buffer.
	 *
	 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
	 * anymore, but the last events for it are still in the ring buffer,
	 * waiting to be consumed.
	 *
	 * Tools can chose to ignore this at their own discretion, but the
	 * evlist layer can't just drop it when filtering events in
	 * perf_evlist__filter_pollfd().
	 */
	refcount_set(&map->refcnt, 2);
	map->prev = 0;
	map->mask = mp->mask;
	map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
			 MAP_SHARED, fd, 0);
	if (map->base == MAP_FAILED) {
		pr_debug2("failed to mmap perf event ring buffer, error %d\n",
			  errno);
		map->base = NULL;
		return -1;
	}
	map->fd = fd;

	if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
				&mp->auxtrace_mp, map->base, fd))
		return -1;

	return 0;
}
204

205
static int overwrite_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
206 207 208 209 210
{
	struct perf_event_header *pheader;
	u64 evt_head = head;
	int size = mask + 1;

211
	pr_debug2("overwrite_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
212 213 214 215
	pheader = (struct perf_event_header *)(buf + (head & mask));
	*start = head;
	while (true) {
		if (evt_head - head >= (unsigned int)size) {
216
			pr_debug("Finished reading overwrite ring buffer: rewind\n");
217 218 219 220 221 222 223 224 225
			if (evt_head - head > (unsigned int)size)
				evt_head -= pheader->size;
			*end = evt_head;
			return 0;
		}

		pheader = (struct perf_event_header *)(buf + (evt_head & mask));

		if (pheader->size == 0) {
226
			pr_debug("Finished reading overwrite ring buffer: get start\n");
227 228 229 230 231 232 233 234 235 236 237
			*end = evt_head;
			return 0;
		}

		evt_head += pheader->size;
		pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
	}
	WARN_ONCE(1, "Shouldn't get here\n");
	return -1;
}

238 239 240 241 242
/*
 * Report the start and end of the available data in ringbuffer
 */
int perf_mmap__read_init(struct perf_mmap *md, bool overwrite,
			 u64 *startp, u64 *endp)
243 244 245 246 247 248
{
	u64 head = perf_mmap__read_head(md);
	u64 old = md->prev;
	unsigned char *data = md->base + page_size;
	unsigned long size;

249 250
	*startp = overwrite ? head : old;
	*endp = overwrite ? old : head;
251 252
	md->start = md->overwrite ? head : old;
	md->end = md->overwrite ? old : head;
253

254
	if (md->start == md->end)
255
		return -EAGAIN;
256

257
	size = md->end - md->start;
258
	if (size > (unsigned long)(md->mask) + 1) {
259
		if (!md->overwrite) {
260
			WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
261

262
			md->prev = head;
263
			perf_mmap__consume(md, overwrite);
264
			return -EAGAIN;
265 266 267 268 269 270
		}

		/*
		 * Backward ring buffer is full. We still have a chance to read
		 * most of data from it.
		 */
271
		if (overwrite_rb_find_range(data, md->mask, head, &md->start, &md->end))
272
			return -EINVAL;
273 274
		*startp = md->start;
		*endp = md->end;
275 276
	}

277
	return 0;
278 279
}

280 281
int perf_mmap__push(struct perf_mmap *md, void *to,
		    int push(void *to, void *buf, size_t size))
282 283 284 285 286 287 288 289
{
	u64 head = perf_mmap__read_head(md);
	u64 end, start;
	unsigned char *data = md->base + page_size;
	unsigned long size;
	void *buf;
	int rc = 0;

290
	rc = perf_mmap__read_init(md, md->overwrite, &start, &end);
291 292
	if (rc < 0)
		return (rc == -EAGAIN) ? 0 : -1;
293

294
	size = md->end - md->start;
295

296 297 298 299
	if ((md->start & md->mask) + size != (md->end & md->mask)) {
		buf = &data[md->start & md->mask];
		size = md->mask + 1 - (md->start & md->mask);
		md->start += size;
300 301 302 303 304 305 306

		if (push(to, buf, size) < 0) {
			rc = -1;
			goto out;
		}
	}

307 308 309
	buf = &data[md->start & md->mask];
	size = md->end - md->start;
	md->start += size;
310 311 312 313 314 315 316

	if (push(to, buf, size) < 0) {
		rc = -1;
		goto out;
	}

	md->prev = head;
317
	perf_mmap__consume(md, md->overwrite);
318 319 320
out:
	return rc;
}
321 322 323 324 325 326 327 328 329 330 331

/*
 * Mandatory for overwrite mode
 * The direction of overwrite mode is backward.
 * The last perf_mmap__read() will set tail to map->prev.
 * Need to correct the map->prev to head which is the end of next read.
 */
void perf_mmap__read_done(struct perf_mmap *map)
{
	map->prev = perf_mmap__read_head(map);
}