mmap.c 7.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/*
 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
 *
 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
 * copyright notes.
 *
 * Released under the GPL v2. (and only v2, not any later version)
 */

#include <sys/mman.h>
11 12 13
#include <inttypes.h>
#include <asm/bug.h>
#include "debug.h"
14 15 16 17 18 19 20 21 22 23
#include "event.h"
#include "mmap.h"
#include "util.h" /* page_size */

size_t perf_mmap__mmap_len(struct perf_mmap *map)
{
	return map->mask + 1 + page_size;
}

/* When check_messup is true, 'end' must points to a good entry */
24
static union perf_event *perf_mmap__read(struct perf_mmap *map,
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
					 u64 start, u64 end, u64 *prev)
{
	unsigned char *data = map->base + page_size;
	union perf_event *event = NULL;
	int diff = end - start;

	if (diff >= (int)sizeof(event->header)) {
		size_t size;

		event = (union perf_event *)&data[start & map->mask];
		size = event->header.size;

		if (size < sizeof(event->header) || diff < (int)size) {
			event = NULL;
			goto broken_event;
		}

		/*
		 * Event straddles the mmap boundary -- header should always
		 * be inside due to u64 alignment of output.
		 */
		if ((start & map->mask) + size != ((start + size) & map->mask)) {
			unsigned int offset = start;
			unsigned int len = min(sizeof(*event), size), cpy;
			void *dst = map->event_copy;

			do {
				cpy = min(map->mask + 1 - (offset & map->mask), len);
				memcpy(dst, &data[offset & map->mask], cpy);
				offset += cpy;
				dst += cpy;
				len -= cpy;
			} while (len);

			event = (union perf_event *)map->event_copy;
		}

		start += size;
	}

broken_event:
	if (prev)
		*prev = start;

	return event;
}

72
union perf_event *perf_mmap__read_forward(struct perf_mmap *map)
73 74 75 76 77 78 79 80 81 82 83 84
{
	u64 head;
	u64 old = map->prev;

	/*
	 * Check if event was unmapped due to a POLLHUP/POLLERR.
	 */
	if (!refcount_read(&map->refcnt))
		return NULL;

	head = perf_mmap__read_head(map);

85
	return perf_mmap__read(map, old, head, &map->prev);
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
}

union perf_event *perf_mmap__read_backward(struct perf_mmap *map)
{
	u64 head, end;
	u64 start = map->prev;

	/*
	 * Check if event was unmapped due to a POLLHUP/POLLERR.
	 */
	if (!refcount_read(&map->refcnt))
		return NULL;

	head = perf_mmap__read_head(map);
	if (!head)
		return NULL;

	/*
	 * 'head' pointer starts from 0. Kernel minus sizeof(record) form
	 * it each time when kernel writes to it, so in fact 'head' is
	 * negative. 'end' pointer is made manually by adding the size of
	 * the ring buffer to 'head' pointer, means the validate data can
	 * read is the whole ring buffer. If 'end' is positive, the ring
	 * buffer has not fully filled, so we must adjust 'end' to 0.
	 *
	 * However, since both 'head' and 'end' is unsigned, we can't
	 * simply compare 'end' against 0. Here we compare '-head' and
	 * the size of the ring buffer, where -head is the number of bytes
	 * kernel write to the ring buffer.
	 */
	if (-head < (u64)(map->mask + 1))
		end = 0;
	else
		end = head + map->mask + 1;

121
	return perf_mmap__read(map, start, end, &map->prev);
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
}

void perf_mmap__read_catchup(struct perf_mmap *map)
{
	u64 head;

	if (!refcount_read(&map->refcnt))
		return;

	head = perf_mmap__read_head(map);
	map->prev = head;
}

static bool perf_mmap__empty(struct perf_mmap *map)
{
	return perf_mmap__read_head(map) == map->prev && !map->auxtrace_mmap.base;
}

void perf_mmap__get(struct perf_mmap *map)
{
	refcount_inc(&map->refcnt);
}

void perf_mmap__put(struct perf_mmap *map)
{
	BUG_ON(map->base && refcount_read(&map->refcnt) == 0);

	if (refcount_dec_and_test(&map->refcnt))
		perf_mmap__munmap(map);
}

void perf_mmap__consume(struct perf_mmap *map, bool overwrite)
{
	if (!overwrite) {
		u64 old = map->prev;

		perf_mmap__write_tail(map, old);
	}

	if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))
		perf_mmap__put(map);
}

int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
			       struct auxtrace_mmap_params *mp __maybe_unused,
			       void *userpg __maybe_unused,
			       int fd __maybe_unused)
{
	return 0;
}

void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
{
}

void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused,
				       off_t auxtrace_offset __maybe_unused,
				       unsigned int auxtrace_pages __maybe_unused,
				       bool auxtrace_overwrite __maybe_unused)
{
}

void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused,
					  struct perf_evlist *evlist __maybe_unused,
					  int idx __maybe_unused,
					  bool per_cpu __maybe_unused)
{
}

void perf_mmap__munmap(struct perf_mmap *map)
{
	if (map->base != NULL) {
		munmap(map->base, perf_mmap__mmap_len(map));
		map->base = NULL;
		map->fd = -1;
		refcount_set(&map->refcnt, 0);
	}
	auxtrace_mmap__munmap(&map->auxtrace_mmap);
}

int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd)
{
	/*
	 * The last one will be done at perf_evlist__mmap_consume(), so that we
	 * make sure we don't prevent tools from consuming every last event in
	 * the ring buffer.
	 *
	 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
	 * anymore, but the last events for it are still in the ring buffer,
	 * waiting to be consumed.
	 *
	 * Tools can chose to ignore this at their own discretion, but the
	 * evlist layer can't just drop it when filtering events in
	 * perf_evlist__filter_pollfd().
	 */
	refcount_set(&map->refcnt, 2);
	map->prev = 0;
	map->mask = mp->mask;
	map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
			 MAP_SHARED, fd, 0);
	if (map->base == MAP_FAILED) {
		pr_debug2("failed to mmap perf event ring buffer, error %d\n",
			  errno);
		map->base = NULL;
		return -1;
	}
	map->fd = fd;

	if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
				&mp->auxtrace_mp, map->base, fd))
		return -1;

	return 0;
}
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269

static int backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
{
	struct perf_event_header *pheader;
	u64 evt_head = head;
	int size = mask + 1;

	pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
	pheader = (struct perf_event_header *)(buf + (head & mask));
	*start = head;
	while (true) {
		if (evt_head - head >= (unsigned int)size) {
			pr_debug("Finished reading backward ring buffer: rewind\n");
			if (evt_head - head > (unsigned int)size)
				evt_head -= pheader->size;
			*end = evt_head;
			return 0;
		}

		pheader = (struct perf_event_header *)(buf + (evt_head & mask));

		if (pheader->size == 0) {
			pr_debug("Finished reading backward ring buffer: get start\n");
			*end = evt_head;
			return 0;
		}

		evt_head += pheader->size;
		pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
	}
	WARN_ONCE(1, "Shouldn't get here\n");
	return -1;
}

270
int perf_mmap__push(struct perf_mmap *md, bool backward,
271 272 273 274 275 276 277 278 279 280
		    void *to, int push(void *to, void *buf, size_t size))
{
	u64 head = perf_mmap__read_head(md);
	u64 old = md->prev;
	u64 end = head, start = old;
	unsigned char *data = md->base + page_size;
	unsigned long size;
	void *buf;
	int rc = 0;

281 282
	start = backward ? head : old;
	end = backward ? old : head;
283 284 285 286 287 288

	if (start == end)
		return 0;

	size = end - start;
	if (size > (unsigned long)(md->mask) + 1) {
289 290
		if (!backward) {
			WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
291

292 293 294 295 296 297 298 299 300 301 302
			md->prev = head;
			perf_mmap__consume(md, backward);
			return 0;
		}

		/*
		 * Backward ring buffer is full. We still have a chance to read
		 * most of data from it.
		 */
		if (backward_rb_find_range(data, md->mask, head, &start, &end))
			return -1;
303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
	}

	if ((start & md->mask) + size != (end & md->mask)) {
		buf = &data[start & md->mask];
		size = md->mask + 1 - (start & md->mask);
		start += size;

		if (push(to, buf, size) < 0) {
			rc = -1;
			goto out;
		}
	}

	buf = &data[start & md->mask];
	size = end - start;
	start += size;

	if (push(to, buf, size) < 0) {
		rc = -1;
		goto out;
	}

	md->prev = head;
326
	perf_mmap__consume(md, backward);
327 328 329
out:
	return rc;
}