ordered-events.c 6.7 KB
Newer Older
1
#include <linux/list.h>
2
#include <linux/compiler.h>
3
#include <linux/string.h>
4 5 6 7 8 9
#include "ordered-events.h"
#include "evlist.h"
#include "session.h"
#include "asm/bug.h"
#include "debug.h"

10 11 12 13 14
#define pr_N(n, fmt, ...) \
	eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)

#define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)

15 16 17 18 19 20 21 22 23
static void queue_event(struct ordered_events *oe, struct ordered_event *new)
{
	struct ordered_event *last = oe->last;
	u64 timestamp = new->timestamp;
	struct list_head *p;

	++oe->nr_events;
	oe->last = new;

24 25
	pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);

26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
	if (!last) {
		list_add(&new->list, &oe->events);
		oe->max_timestamp = timestamp;
		return;
	}

	/*
	 * last event might point to some random place in the list as it's
	 * the last queued event. We expect that the new event is close to
	 * this.
	 */
	if (last->timestamp <= timestamp) {
		while (last->timestamp <= timestamp) {
			p = last->list.next;
			if (p == &oe->events) {
				list_add_tail(&new->list, &oe->events);
				oe->max_timestamp = timestamp;
				return;
			}
			last = list_entry(p, struct ordered_event, list);
		}
		list_add_tail(&new->list, &last->list);
	} else {
		while (last->timestamp > timestamp) {
			p = last->list.prev;
			if (p == &oe->events) {
				list_add(&new->list, &oe->events);
				return;
			}
			last = list_entry(p, struct ordered_event, list);
		}
		list_add(&new->list, &last->list);
	}
}

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
static union perf_event *__dup_event(struct ordered_events *oe,
				     union perf_event *event)
{
	union perf_event *new_event = NULL;

	if (oe->cur_alloc_size < oe->max_alloc_size) {
		new_event = memdup(event, event->header.size);
		if (new_event)
			oe->cur_alloc_size += event->header.size;
	}

	return new_event;
}

static union perf_event *dup_event(struct ordered_events *oe,
				   union perf_event *event)
{
	return oe->copy_on_queue ? __dup_event(oe, event) : event;
}

static void free_dup_event(struct ordered_events *oe, union perf_event *event)
{
	if (oe->copy_on_queue) {
		oe->cur_alloc_size -= event->header.size;
		free(event);
	}
}

89
#define MAX_SAMPLE_BUFFER	(64 * 1024 / sizeof(struct ordered_event))
90 91
static struct ordered_event *alloc_event(struct ordered_events *oe,
					 union perf_event *event)
92 93 94
{
	struct list_head *cache = &oe->cache;
	struct ordered_event *new = NULL;
95 96 97 98 99
	union perf_event *new_event;

	new_event = dup_event(oe, event);
	if (!new_event)
		return NULL;
100 101 102 103 104 105 106 107 108 109 110 111

	if (!list_empty(cache)) {
		new = list_entry(cache->next, struct ordered_event, list);
		list_del(&new->list);
	} else if (oe->buffer) {
		new = oe->buffer + oe->buffer_idx;
		if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
			oe->buffer = NULL;
	} else if (oe->cur_alloc_size < oe->max_alloc_size) {
		size_t size = MAX_SAMPLE_BUFFER * sizeof(*new);

		oe->buffer = malloc(size);
112 113
		if (!oe->buffer) {
			free_dup_event(oe, new_event);
114
			return NULL;
115
		}
116

117 118 119
		pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
		   oe->cur_alloc_size, size, oe->max_alloc_size);

120 121 122 123 124 125
		oe->cur_alloc_size += size;
		list_add(&oe->buffer->list, &oe->to_free);

		/* First entry is abused to maintain the to_free list. */
		oe->buffer_idx = 2;
		new = oe->buffer + 1;
126 127
	} else {
		pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
128 129
	}

130
	new->event = new_event;
131 132 133 134
	return new;
}

struct ordered_event *
135 136
ordered_events__new(struct ordered_events *oe, u64 timestamp,
		    union perf_event *event)
137 138 139
{
	struct ordered_event *new;

140
	new = alloc_event(oe, event);
141 142 143 144 145 146 147 148 149 150
	if (new) {
		new->timestamp = timestamp;
		queue_event(oe, new);
	}

	return new;
}

void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
{
151
	list_move(&event->list, &oe->cache);
152
	oe->nr_events--;
153
	free_dup_event(oe, event->event);
154 155
}

156
static int __ordered_events__flush(struct ordered_events *oe)
157 158 159 160 161 162 163 164 165 166
{
	struct list_head *head = &oe->events;
	struct ordered_event *tmp, *iter;
	struct perf_sample sample;
	u64 limit = oe->next_flush;
	u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
	bool show_progress = limit == ULLONG_MAX;
	struct ui_progress prog;
	int ret;

167
	if (!limit)
168 169 170 171 172 173 174 175 176 177 178 179
		return 0;

	if (show_progress)
		ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");

	list_for_each_entry_safe(iter, tmp, head, list) {
		if (session_done())
			return 0;

		if (iter->timestamp > limit)
			break;

180
		ret = perf_evlist__parse_sample(oe->evlist, iter->event, &sample);
181 182 183
		if (ret)
			pr_err("Can't parse sample, err = %d\n", ret);
		else {
184 185
			ret = machines__deliver_event(oe->machines, oe->evlist, iter->event,
						      &sample, oe->tool, iter->file_offset);
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
			if (ret)
				return ret;
		}

		ordered_events__delete(oe, iter);
		oe->last_flush = iter->timestamp;

		if (show_progress)
			ui_progress__update(&prog, 1);
	}

	if (list_empty(head))
		oe->last = NULL;
	else if (last_ts <= limit)
		oe->last = list_entry(head->prev, struct ordered_event, list);

	return 0;
}

205
int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
206
{
207
	static const char * const str[] = {
208
		"NONE",
209 210 211 212
		"FINAL",
		"ROUND",
		"HALF ",
	};
213 214
	int err;

215 216 217
	if (oe->nr_events == 0)
		return 0;

218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
	switch (how) {
	case OE_FLUSH__FINAL:
		oe->next_flush = ULLONG_MAX;
		break;

	case OE_FLUSH__HALF:
	{
		struct ordered_event *first, *last;
		struct list_head *head = &oe->events;

		first = list_entry(head->next, struct ordered_event, list);
		last = oe->last;

		/* Warn if we are called before any event got allocated. */
		if (WARN_ONCE(!last || list_empty(head), "empty queue"))
			return 0;

		oe->next_flush  = first->timestamp;
		oe->next_flush += (last->timestamp - first->timestamp) / 2;
		break;
	}

	case OE_FLUSH__ROUND:
241
	case OE_FLUSH__NONE:
242 243 244 245
	default:
		break;
	};

246 247 248 249
	pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE  %s, nr_events %u\n",
		   str[how], oe->nr_events);
	pr_oe_time(oe->max_timestamp, "max_timestamp\n");

250
	err = __ordered_events__flush(oe);
251 252 253 254

	if (!err) {
		if (how == OE_FLUSH__ROUND)
			oe->next_flush = oe->max_timestamp;
255 256

		oe->last_flush_type = how;
257 258
	}

259 260 261 262
	pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
		   str[how], oe->nr_events);
	pr_oe_time(oe->last_flush, "last_flush\n");

263 264
	return err;
}
265

266 267
void ordered_events__init(struct ordered_events *oe, struct machines *machines,
			  struct perf_evlist *evlist, struct perf_tool *tool)
268 269 270 271 272 273
{
	INIT_LIST_HEAD(&oe->events);
	INIT_LIST_HEAD(&oe->cache);
	INIT_LIST_HEAD(&oe->to_free);
	oe->max_alloc_size = (u64) -1;
	oe->cur_alloc_size = 0;
274 275 276
	oe->evlist	   = evlist;
	oe->machines	   = machines;
	oe->tool	   = tool;
277
}
278 279 280 281 282 283 284 285

void ordered_events__free(struct ordered_events *oe)
{
	while (!list_empty(&oe->to_free)) {
		struct ordered_event *event;

		event = list_entry(oe->to_free.next, struct ordered_event, list);
		list_del(&event->list);
286
		free_dup_event(oe, event->event);
287 288 289
		free(event);
	}
}