metricgroup.c 31.7 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6 7 8
/*
 * Copyright (c) 2017, Intel Corporation.
 */

/* Manage metrics and groups of metrics from JSON files */

#include "metricgroup.h"
9
#include "debug.h"
10
#include "evlist.h"
11
#include "evsel.h"
12 13 14 15 16 17 18 19
#include "strbuf.h"
#include "pmu.h"
#include "expr.h"
#include "rblist.h"
#include <string.h>
#include <errno.h>
#include "strlist.h"
#include <assert.h>
20
#include <linux/ctype.h>
21
#include <linux/string.h>
22
#include <linux/zalloc.h>
23
#include <subcmd/parse-options.h>
24 25
#include <api/fs/fs.h>
#include "util.h"
26
#include <asm/bug.h>
27
#include "cgroup.h"
28 29

struct metric_event *metricgroup__lookup(struct rblist *metric_events,
30
					 struct evsel *evsel,
31 32 33 34 35 36
					 bool create)
{
	struct rb_node *nd;
	struct metric_event me = {
		.evsel = evsel
	};
37 38 39 40

	if (!metric_events)
		return NULL;

41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
	nd = rblist__find(metric_events, &me);
	if (nd)
		return container_of(nd, struct metric_event, nd);
	if (create) {
		rblist__add_node(metric_events, &me);
		nd = rblist__find(metric_events, &me);
		if (nd)
			return container_of(nd, struct metric_event, nd);
	}
	return NULL;
}

static int metric_event_cmp(struct rb_node *rb_node, const void *entry)
{
	struct metric_event *a = container_of(rb_node,
					      struct metric_event,
					      nd);
	const struct metric_event *b = entry;

	if (a->evsel == b->evsel)
		return 0;
	if ((char *)a->evsel < (char *)b->evsel)
		return -1;
	return +1;
}

static struct rb_node *metric_event_new(struct rblist *rblist __maybe_unused,
					const void *entry)
{
	struct metric_event *me = malloc(sizeof(struct metric_event));

	if (!me)
		return NULL;
	memcpy(me, entry, sizeof(struct metric_event));
	me->evsel = ((struct metric_event *)entry)->evsel;
	INIT_LIST_HEAD(&me->head);
	return &me->nd;
}

80 81 82 83 84 85 86
static void metric_event_delete(struct rblist *rblist __maybe_unused,
				struct rb_node *rb_node)
{
	struct metric_event *me = container_of(rb_node, struct metric_event, nd);
	struct metric_expr *expr, *tmp;

	list_for_each_entry_safe(expr, tmp, &me->head, nd) {
87
		free(expr->metric_refs);
88
		free(expr->metric_events);
89 90 91 92 93 94
		free(expr);
	}

	free(me);
}

95 96 97 98 99
static void metricgroup__rblist_init(struct rblist *metric_events)
{
	rblist__init(metric_events);
	metric_events->node_cmp = metric_event_cmp;
	metric_events->node_new = metric_event_new;
100 101 102 103 104 105
	metric_events->node_delete = metric_event_delete;
}

void metricgroup__rblist_exit(struct rblist *metric_events)
{
	rblist__exit(metric_events);
106 107
}

108 109 110 111 112 113 114 115 116 117 118
/*
 * A node in the list of referenced metrics. metric_expr
 * is held as a convenience to avoid a search through the
 * metric list.
 */
struct metric_ref_node {
	const char *metric_name;
	const char *metric_expr;
	struct list_head list;
};

119
struct metric {
120
	struct list_head nd;
121
	struct expr_parse_ctx pctx;
122 123
	const char *metric_name;
	const char *metric_expr;
124
	const char *metric_unit;
125 126
	struct list_head metric_refs;
	int metric_refs_cnt;
127
	int runtime;
128
	bool has_constraint;
129 130
};

131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
#define RECURSION_ID_MAX 1000

struct expr_ids {
	struct expr_id	id[RECURSION_ID_MAX];
	int		cnt;
};

static struct expr_id *expr_ids__alloc(struct expr_ids *ids)
{
	if (ids->cnt >= RECURSION_ID_MAX)
		return NULL;
	return &ids->id[ids->cnt++];
}

static void expr_ids__exit(struct expr_ids *ids)
{
	int i;

	for (i = 0; i < ids->cnt; i++)
		free(ids->id[i].id);
}

153 154 155 156 157 158 159 160 161 162 163 164
static bool contains_event(struct evsel **metric_events, int num_events,
			const char *event_name)
{
	int i;

	for (i = 0; i < num_events; i++) {
		if (!strcmp(metric_events[i]->name, event_name))
			return true;
	}
	return false;
}

165
/**
166
 * Find a group of events in perf_evlist that correspond to those from a parsed
167 168 169
 * metric expression. Note, as find_evsel_group is called in the same order as
 * perf_evlist was constructed, metric_no_merge doesn't need to test for
 * underfilling a group.
170 171 172 173
 * @perf_evlist: a list of events something like: {metric1 leader, metric1
 * sibling, metric1 sibling}:W,duration_time,{metric2 leader, metric2 sibling,
 * metric2 sibling}:W,duration_time
 * @pctx: the parse context for the metric expression.
174 175
 * @metric_no_merge: don't attempt to share events for the metric with other
 * metrics.
176 177 178 179 180 181 182
 * @has_constraint: is there a contraint on the group of events? In which case
 * the events won't be grouped.
 * @metric_events: out argument, null terminated array of evsel's associated
 * with the metric.
 * @evlist_used: in/out argument, bitmap tracking which evlist events are used.
 * @return the first metric event or NULL on failure.
 */
183
static struct evsel *find_evsel_group(struct evlist *perf_evlist,
184
				      struct expr_parse_ctx *pctx,
185
				      bool metric_no_merge,
186
				      bool has_constraint,
187
				      struct evsel **metric_events,
188
				      unsigned long *evlist_used)
189
{
190
	struct evsel *ev, *current_leader = NULL;
191
	struct expr_id_data *val_ptr;
192 193 194
	int i = 0, matched_events = 0, events_to_match;
	const int idnum = (int)hashmap__size(&pctx->ids);

195 196 197 198 199
	/*
	 * duration_time is always grouped separately, when events are grouped
	 * (ie has_constraint is false) then ignore it in the matching loop and
	 * add it to metric_events at the end.
	 */
200 201 202 203 204
	if (!has_constraint &&
	    hashmap__find(&pctx->ids, "duration_time", (void **)&val_ptr))
		events_to_match = idnum - 1;
	else
		events_to_match = idnum;
205 206

	evlist__for_each_entry (perf_evlist, ev) {
207 208 209 210 211
		/*
		 * Events with a constraint aren't grouped and match the first
		 * events available.
		 */
		if (has_constraint && ev->weak_group)
212
			continue;
213 214 215
		/* Ignore event if already used and merging is disabled. */
		if (metric_no_merge && test_bit(ev->idx, evlist_used))
			continue;
216 217 218 219 220 221
		if (!has_constraint && ev->leader != current_leader) {
			/*
			 * Start of a new group, discard the whole match and
			 * start again.
			 */
			matched_events = 0;
222 223
			memset(metric_events, 0,
				sizeof(struct evsel *) * idnum);
224 225
			current_leader = ev->leader;
		}
226 227 228 229 230 231 232 233 234 235 236 237
		/*
		 * Check for duplicate events with the same name. For example,
		 * uncore_imc/cas_count_read/ will turn into 6 events per socket
		 * on skylakex. Only the first such event is placed in
		 * metric_events. If events aren't grouped then this also
		 * ensures that the same event in different sibling groups
		 * aren't both added to metric_events.
		 */
		if (contains_event(metric_events, matched_events, ev->name))
			continue;
		/* Does this event belong to the parse context? */
		if (hashmap__find(&pctx->ids, ev->name, (void **)&val_ptr))
238
			metric_events[matched_events++] = ev;
239

240 241 242 243 244 245 246 247 248 249 250
		if (matched_events == events_to_match)
			break;
	}

	if (events_to_match != idnum) {
		/* Add the first duration_time. */
		evlist__for_each_entry(perf_evlist, ev) {
			if (!strcmp(ev->name, "duration_time")) {
				metric_events[matched_events++] = ev;
				break;
			}
251 252
		}
	}
253

254
	if (matched_events != idnum) {
255
		/* Not a whole match */
256 257 258 259 260 261
		return NULL;
	}

	metric_events[idnum] = NULL;

	for (i = 0; i < idnum; i++) {
262
		ev = metric_events[i];
263
		/* Don't free the used events. */
264
		set_bit(ev->idx, evlist_used);
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
		/*
		 * The metric leader points to the identically named event in
		 * metric_events.
		 */
		ev->metric_leader = ev;
		/*
		 * Mark two events with identical names in the same group (or
		 * globally) as being in use as uncore events may be duplicated
		 * for each pmu. Set the metric leader of such events to be the
		 * event that appears in metric_events.
		 */
		evlist__for_each_entry_continue(perf_evlist, ev) {
			/*
			 * If events are grouped then the search can terminate
			 * when then group is left.
			 */
			if (!has_constraint &&
282 283 284
			    ev->leader != metric_events[i]->leader &&
			    !strcmp(ev->leader->pmu_name,
				    metric_events[i]->leader->pmu_name))
285 286 287 288 289 290
				break;
			if (!strcmp(metric_events[i]->name, ev->name)) {
				set_bit(ev->idx, evlist_used);
				ev->metric_leader = metric_events[i];
			}
		}
291 292 293
	}

	return metric_events[0];
294 295 296
}

static int metricgroup__setup_events(struct list_head *groups,
297
				     bool metric_no_merge,
298
				     struct evlist *perf_evlist,
299 300 301 302 303 304
				     struct rblist *metric_events_list)
{
	struct metric_event *me;
	struct metric_expr *expr;
	int i = 0;
	int ret = 0;
305
	struct metric *m;
306
	struct evsel *evsel, *tmp;
307
	unsigned long *evlist_used;
308

309 310 311
	evlist_used = bitmap_alloc(perf_evlist->core.nr_entries);
	if (!evlist_used)
		return -ENOMEM;
312

313
	list_for_each_entry (m, groups, nd) {
314
		struct evsel **metric_events;
315
		struct metric_ref *metric_refs = NULL;
316

317
		metric_events = calloc(sizeof(void *),
318
				hashmap__size(&m->pctx.ids) + 1);
319 320 321 322
		if (!metric_events) {
			ret = -ENOMEM;
			break;
		}
323
		evsel = find_evsel_group(perf_evlist, &m->pctx,
324
					 metric_no_merge,
325
					 m->has_constraint, metric_events,
326
					 evlist_used);
327 328
		if (!evsel) {
			pr_debug("Cannot resolve %s: %s\n",
329
					m->metric_name, m->metric_expr);
330
			free(metric_events);
331 332
			continue;
		}
333
		for (i = 0; metric_events[i]; i++)
334 335 336 337
			metric_events[i]->collect_stat = true;
		me = metricgroup__lookup(metric_events_list, evsel, true);
		if (!me) {
			ret = -ENOMEM;
338
			free(metric_events);
339 340 341 342 343
			break;
		}
		expr = malloc(sizeof(struct metric_expr));
		if (!expr) {
			ret = -ENOMEM;
344
			free(metric_events);
345 346
			break;
		}
347 348 349 350 351

		/*
		 * Collect and store collected nested expressions
		 * for metric processing.
		 */
352
		if (m->metric_refs_cnt) {
353 354
			struct metric_ref_node *ref;

355
			metric_refs = zalloc(sizeof(struct metric_ref) * (m->metric_refs_cnt + 1));
356 357 358
			if (!metric_refs) {
				ret = -ENOMEM;
				free(metric_events);
359
				free(expr);
360 361 362 363
				break;
			}

			i = 0;
364
			list_for_each_entry(ref, &m->metric_refs, list) {
365 366 367 368 369 370 371 372 373 374 375 376 377
				/*
				 * Intentionally passing just const char pointers,
				 * originally from 'struct pmu_event' object.
				 * We don't need to change them, so there's no
				 * need to create our own copy.
				 */
				metric_refs[i].metric_name = ref->metric_name;
				metric_refs[i].metric_expr = ref->metric_expr;
				i++;
			}
		};

		expr->metric_refs = metric_refs;
378 379 380
		expr->metric_expr = m->metric_expr;
		expr->metric_name = m->metric_name;
		expr->metric_unit = m->metric_unit;
381
		expr->metric_events = metric_events;
382
		expr->runtime = m->runtime;
383 384
		list_add(&expr->nd, &me->head);
	}
385

386 387 388 389 390 391
	evlist__for_each_entry_safe(perf_evlist, tmp, evsel) {
		if (!test_bit(evsel->idx, evlist_used)) {
			evlist__remove(perf_evlist, evsel);
			evsel__delete(evsel);
		}
	}
392
	bitmap_free(evlist_used);
393

394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
	return ret;
}

static bool match_metric(const char *n, const char *list)
{
	int len;
	char *m;

	if (!list)
		return false;
	if (!strcmp(list, "all"))
		return true;
	if (!n)
		return !strcasecmp(list, "No_group");
	len = strlen(list);
	m = strcasestr(n, list);
	if (!m)
		return false;
	if ((m == n || m[-1] == ';' || m[-1] == ' ') &&
	    (m[len] == 0 || m[len] == ';'))
		return true;
	return false;
}

418 419 420 421 422 423
static bool match_pe_metric(struct pmu_event *pe, const char *metric)
{
	return match_metric(pe->metric_group, metric) ||
	       match_metric(pe->metric_name, metric);
}

424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
struct mep {
	struct rb_node nd;
	const char *name;
	struct strlist *metrics;
};

static int mep_cmp(struct rb_node *rb_node, const void *entry)
{
	struct mep *a = container_of(rb_node, struct mep, nd);
	struct mep *b = (struct mep *)entry;

	return strcmp(a->name, b->name);
}

static struct rb_node *mep_new(struct rblist *rl __maybe_unused,
					const void *entry)
{
	struct mep *me = malloc(sizeof(struct mep));

	if (!me)
		return NULL;
	memcpy(me, entry, sizeof(struct mep));
	me->name = strdup(me->name);
	if (!me->name)
		goto out_me;
	me->metrics = strlist__new(NULL, NULL);
	if (!me->metrics)
		goto out_name;
	return &me->nd;
out_name:
454
	zfree(&me->name);
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
out_me:
	free(me);
	return NULL;
}

static struct mep *mep_lookup(struct rblist *groups, const char *name)
{
	struct rb_node *nd;
	struct mep me = {
		.name = name
	};
	nd = rblist__find(groups, &me);
	if (nd)
		return container_of(nd, struct mep, nd);
	rblist__add_node(groups, &me);
	nd = rblist__find(groups, &me);
	if (nd)
		return container_of(nd, struct mep, nd);
	return NULL;
}

static void mep_delete(struct rblist *rl __maybe_unused,
		       struct rb_node *nd)
{
	struct mep *me = container_of(nd, struct mep, nd);

	strlist__delete(me->metrics);
482
	zfree(&me->name);
483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
	free(me);
}

static void metricgroup__print_strlist(struct strlist *metrics, bool raw)
{
	struct str_node *sn;
	int n = 0;

	strlist__for_each_entry (sn, metrics) {
		if (raw)
			printf("%s%s", n > 0 ? " " : "", sn->s);
		else
			printf("  %s\n", sn->s);
		n++;
	}
	if (raw)
		putchar('\n');
}

502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
static int metricgroup__print_pmu_event(struct pmu_event *pe,
					bool metricgroups, char *filter,
					bool raw, bool details,
					struct rblist *groups,
					struct strlist *metriclist)
{
	const char *g;
	char *omg, *mg;

	g = pe->metric_group;
	if (!g && pe->metric_name) {
		if (pe->name)
			return 0;
		g = "No_group";
	}

	if (!g)
		return 0;

	mg = strdup(g);

	if (!mg)
		return -ENOMEM;
	omg = mg;
	while ((g = strsep(&mg, ";")) != NULL) {
		struct mep *me;
		char *s;

		g = skip_spaces(g);
		if (*g == 0)
			g = "No_group";
		if (filter && !strstr(g, filter))
			continue;
		if (raw)
			s = (char *)pe->metric_name;
		else {
			if (asprintf(&s, "%s\n%*s%s]",
				     pe->metric_name, 8, "[", pe->desc) < 0)
				return -1;
			if (details) {
				if (asprintf(&s, "%s\n%*s%s]",
					     s, 8, "[", pe->metric_expr) < 0)
					return -1;
			}
		}

		if (!s)
			continue;

		if (!metricgroups) {
			strlist__add(metriclist, s);
		} else {
			me = mep_lookup(groups, g);
			if (!me)
				continue;
			strlist__add(me->metrics, s);
		}

		if (!raw)
			free(s);
	}
	free(omg);

	return 0;
}

568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610
struct metricgroup_print_sys_idata {
	struct strlist *metriclist;
	char *filter;
	struct rblist *groups;
	bool metricgroups;
	bool raw;
	bool details;
};

typedef int (*metricgroup_sys_event_iter_fn)(struct pmu_event *pe, void *);

struct metricgroup_iter_data {
	metricgroup_sys_event_iter_fn fn;
	void *data;
};

static int metricgroup__sys_event_iter(struct pmu_event *pe, void *data)
{
	struct metricgroup_iter_data *d = data;
	struct perf_pmu *pmu = NULL;

	if (!pe->metric_expr || !pe->compat)
		return 0;

	while ((pmu = perf_pmu__scan(pmu))) {

		if (!pmu->id || strcmp(pmu->id, pe->compat))
			continue;

		return d->fn(pe, d->data);
	}

	return 0;
}

static int metricgroup__print_sys_event_iter(struct pmu_event *pe, void *data)
{
	struct metricgroup_print_sys_idata *d = data;

	return metricgroup__print_pmu_event(pe, d->metricgroups, d->filter, d->raw,
				     d->details, d->groups, d->metriclist);
}

611
void metricgroup__print(bool metrics, bool metricgroups, char *filter,
612
			bool raw, bool details)
613
{
614
	struct pmu_events_map *map = perf_pmu__find_map(NULL);
615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
	struct pmu_event *pe;
	int i;
	struct rblist groups;
	struct rb_node *node, *next;
	struct strlist *metriclist = NULL;

	if (!metricgroups) {
		metriclist = strlist__new(NULL, NULL);
		if (!metriclist)
			return;
	}

	rblist__init(&groups);
	groups.node_new = mep_new;
	groups.node_cmp = mep_cmp;
	groups.node_delete = mep_delete;
631
	for (i = 0; map; i++) {
632 633 634 635 636 637
		pe = &map->table[i];

		if (!pe->name && !pe->metric_group && !pe->metric_name)
			break;
		if (!pe->metric_expr)
			continue;
638 639 640 641
		if (metricgroup__print_pmu_event(pe, metricgroups, filter,
						 raw, details, &groups,
						 metriclist) < 0)
			return;
642 643
	}

644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659
	{
		struct metricgroup_iter_data data = {
			.fn = metricgroup__print_sys_event_iter,
			.data = (void *) &(struct metricgroup_print_sys_idata){
				.metriclist = metriclist,
				.metricgroups = metricgroups,
				.filter = filter,
				.raw = raw,
				.details = details,
				.groups = &groups,
			},
		};

		pmu_for_each_sys_event(metricgroup__sys_event_iter, &data);
	}

660 661 662 663 664 665
	if (!filter || !rblist__empty(&groups)) {
		if (metricgroups && !raw)
			printf("\nMetric Groups:\n\n");
		else if (metrics && !raw)
			printf("\nMetrics:\n\n");
	}
666

667
	for (node = rb_first_cached(&groups.entries); node; node = next) {
668 669 670
		struct mep *me = container_of(node, struct mep, nd);

		if (metricgroups)
671
			printf("%s%s%s", me->name, metrics && !raw ? ":" : "", raw ? " " : "\n");
672 673 674 675 676 677 678 679 680 681
		if (metrics)
			metricgroup__print_strlist(me->metrics, raw);
		next = rb_next(node);
		rblist__remove_node(&groups, node);
	}
	if (!metricgroups)
		metricgroup__print_strlist(metriclist, raw);
	strlist__delete(metriclist);
}

682
static void metricgroup__add_metric_weak_group(struct strbuf *events,
683
					       struct expr_parse_ctx *ctx)
684
{
685
	struct hashmap_entry *cur;
686 687
	size_t bkt;
	bool no_group = true, has_duration = false;
688

689 690
	hashmap__for_each_entry((&ctx->ids), cur, bkt) {
		pr_debug("found event %s\n", (const char *)cur->key);
691 692 693 694 695
		/*
		 * Duration time maps to a software event and can make
		 * groups not count. Always use it outside a
		 * group.
		 */
696
		if (!strcmp(cur->key, "duration_time")) {
697
			has_duration = true;
698 699 700
			continue;
		}
		strbuf_addf(events, "%s%s",
701
			no_group ? "{" : ",",
702
			(const char *)cur->key);
703 704
		no_group = false;
	}
705
	if (!no_group) {
706
		strbuf_addf(events, "}:W");
707 708 709 710
		if (has_duration)
			strbuf_addf(events, ",duration_time");
	} else if (has_duration)
		strbuf_addf(events, "duration_time");
711 712
}

713
static void metricgroup__add_metric_non_group(struct strbuf *events,
714
					      struct expr_parse_ctx *ctx)
715
{
716 717
	struct hashmap_entry *cur;
	size_t bkt;
718
	bool first = true;
719

720 721 722 723 724 725
	hashmap__for_each_entry((&ctx->ids), cur, bkt) {
		if (!first)
			strbuf_addf(events, ",");
		strbuf_addf(events, "%s", (const char *)cur->key);
		first = false;
	}
726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760
}

static void metricgroup___watchdog_constraint_hint(const char *name, bool foot)
{
	static bool violate_nmi_constraint;

	if (!foot) {
		pr_warning("Splitting metric group %s into standalone metrics.\n", name);
		violate_nmi_constraint = true;
		return;
	}

	if (!violate_nmi_constraint)
		return;

	pr_warning("Try disabling the NMI watchdog to comply NO_NMI_WATCHDOG metric constraint:\n"
		   "    echo 0 > /proc/sys/kernel/nmi_watchdog\n"
		   "    perf stat ...\n"
		   "    echo 1 > /proc/sys/kernel/nmi_watchdog\n");
}

static bool metricgroup__has_constraint(struct pmu_event *pe)
{
	if (!pe->metric_constraint)
		return false;

	if (!strcmp(pe->metric_constraint, "NO_NMI_WATCHDOG") &&
	    sysctl__nmi_watchdog_enabled()) {
		metricgroup___watchdog_constraint_hint(pe->metric_name, false);
		return true;
	}

	return false;
}

761
int __weak arch_get_runtimeparam(struct pmu_event *pe __maybe_unused)
762 763 764 765
{
	return 1;
}

766 767 768 769 770 771 772 773 774 775
struct metricgroup_add_iter_data {
	struct list_head *metric_list;
	const char *metric;
	struct metric **m;
	struct expr_ids *ids;
	int *ret;
	bool *has_match;
	bool metric_no_group;
};

776
static int __add_metric(struct list_head *metric_list,
777 778
			struct pmu_event *pe,
			bool metric_no_group,
779
			int runtime,
780
			struct metric **mp,
781 782
			struct expr_id *parent,
			struct expr_ids *ids)
783
{
784
	struct metric_ref_node *ref;
785
	struct metric *m;
786

787
	if (*mp == NULL) {
788 789 790 791
		/*
		 * We got in here for the parent group,
		 * allocate it and put it on the list.
		 */
792 793
		m = zalloc(sizeof(*m));
		if (!m)
794 795
			return -ENOMEM;

796 797 798 799 800 801 802 803
		expr__ctx_init(&m->pctx);
		m->metric_name = pe->metric_name;
		m->metric_expr = pe->metric_expr;
		m->metric_unit = pe->unit;
		m->runtime = runtime;
		m->has_constraint = metric_no_group || metricgroup__has_constraint(pe);
		INIT_LIST_HEAD(&m->metric_refs);
		m->metric_refs_cnt = 0;
804 805 806

		parent = expr_ids__alloc(ids);
		if (!parent) {
807
			free(m);
808 809 810 811 812
			return -EINVAL;
		}

		parent->id = strdup(pe->metric_name);
		if (!parent->id) {
813
			free(m);
814 815
			return -ENOMEM;
		}
816
		*mp = m;
817 818 819 820 821 822
	} else {
		/*
		 * We got here for the referenced metric, via the
		 * recursive metricgroup__add_metric call, add
		 * it to the parent group.
		 */
823
		m = *mp;
824 825 826 827 828 829 830 831 832 833 834 835 836

		ref = malloc(sizeof(*ref));
		if (!ref)
			return -ENOMEM;

		/*
		 * Intentionally passing just const char pointers,
		 * from 'pe' object, so they never go away. We don't
		 * need to change them, so there's no need to create
		 * our own copy.
		 */
		ref->metric_name = pe->metric_name;
		ref->metric_expr = pe->metric_expr;
837

838 839
		list_add(&ref->list, &m->metric_refs);
		m->metric_refs_cnt++;
840
	}
841

842 843
	/* Force all found IDs in metric to have us as parent ID. */
	WARN_ON_ONCE(!parent);
844
	m->pctx.parent = parent;
845

846 847 848 849
	/*
	 * For both the parent and referenced metrics, we parse
	 * all the metric's IDs and add it to the parent context.
	 */
850
	if (expr__find_other(pe->metric_expr, NULL, &m->pctx, runtime) < 0) {
851 852 853 854 855
		if (m->metric_refs_cnt == 0) {
			expr__ctx_clear(&m->pctx);
			free(m);
			*mp = NULL;
		}
856 857 858
		return -EINVAL;
	}

859 860 861 862
	/*
	 * We add new group only in the 'parent' call,
	 * so bail out for referenced metric case.
	 */
863
	if (m->metric_refs_cnt)
864 865
		return 0;

866 867
	if (list_empty(metric_list))
		list_add(&m->nd, metric_list);
868 869 870 871
	else {
		struct list_head *pos;

		/* Place the largest groups at the front. */
872
		list_for_each_prev(pos, metric_list) {
873
			struct metric *old = list_entry(pos, struct metric, nd);
874

875
			if (hashmap__size(&m->pctx.ids) <=
876 877 878
			    hashmap__size(&old->pctx.ids))
				break;
		}
879
		list_add(&m->nd, pos);
880
	}
881 882 883 884

	return 0;
}

885 886 887 888 889
#define map_for_each_event(__pe, __idx, __map)					\
	if (__map)								\
		for (__idx = 0, __pe = &__map->table[__idx];			\
		     __pe->name || __pe->metric_group || __pe->metric_name;	\
		     __pe = &__map->table[++__idx])
890 891 892 893 894 895 896

#define map_for_each_metric(__pe, __idx, __map, __metric)		\
	map_for_each_event(__pe, __idx, __map)				\
		if (__pe->metric_expr &&				\
		    (match_metric(__pe->metric_group, __metric) ||	\
		     match_metric(__pe->metric_name, __metric)))

897 898 899 900 901 902 903 904 905 906 907 908 909
static struct pmu_event *find_metric(const char *metric, struct pmu_events_map *map)
{
	struct pmu_event *pe;
	int i;

	map_for_each_event(pe, i, map) {
		if (match_metric(pe->metric_name, metric))
			return pe;
	}

	return NULL;
}

910
static int recursion_check(struct metric *m, const char *id, struct expr_id **parent,
911 912 913 914 915 916 917 918 919 920 921 922
			   struct expr_ids *ids)
{
	struct expr_id_data *data;
	struct expr_id *p;
	int ret;

	/*
	 * We get the parent referenced by 'id' argument and
	 * traverse through all the parent object IDs to check
	 * if we already processed 'id', if we did, it's recursion
	 * and we fail.
	 */
923
	ret = expr__get_id(&m->pctx, id, &data);
924 925 926
	if (ret)
		return ret;

927
	p = expr_id_data__parent(data);
928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947

	while (p->parent) {
		if (!strcmp(p->id, id)) {
			pr_err("failed: recursion detected for %s\n", id);
			return -1;
		}
		p = p->parent;
	}

	/*
	 * If we are over the limit of static entris, the metric
	 * is too difficult/nested to process, fail as well.
	 */
	p = expr_ids__alloc(ids);
	if (!p) {
		pr_err("failed: too many nested metrics\n");
		return -EINVAL;
	}

	p->id     = strdup(id);
948
	p->parent = expr_id_data__parent(data);
949 950 951 952 953
	*parent   = p;

	return p->id ? 0 : -ENOMEM;
}

954
static int add_metric(struct list_head *metric_list,
955 956
		      struct pmu_event *pe,
		      bool metric_no_group,
957
		      struct metric **mp,
958 959
		      struct expr_id *parent,
		      struct expr_ids *ids);
960

961
static int __resolve_metric(struct metric *m,
962
			    bool metric_no_group,
963
			    struct list_head *metric_list,
964 965
			    struct pmu_events_map *map,
			    struct expr_ids *ids)
966 967 968 969 970 971 972 973 974 975 976 977
{
	struct hashmap_entry *cur;
	size_t bkt;
	bool all;
	int ret;

	/*
	 * Iterate all the parsed IDs and if there's metric,
	 * add it to the context.
	 */
	do {
		all = true;
978
		hashmap__for_each_entry((&m->pctx.ids), cur, bkt) {
979
			struct expr_id *parent;
980 981 982 983 984 985
			struct pmu_event *pe;

			pe = find_metric(cur->key, map);
			if (!pe)
				continue;

986
			ret = recursion_check(m, cur->key, &parent, ids);
987 988 989
			if (ret)
				return ret;

990 991
			all = false;
			/* The metric key itself needs to go out.. */
992
			expr__del_id(&m->pctx, cur->key);
993 994

			/* ... and it gets resolved to the parent context. */
995
			ret = add_metric(metric_list, pe, metric_no_group, &m, parent, ids);
996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011
			if (ret)
				return ret;

			/*
			 * We added new metric to hashmap, so we need
			 * to break the iteration and start over.
			 */
			break;
		}
	} while (!all);

	return 0;
}

static int resolve_metric(bool metric_no_group,
			  struct list_head *metric_list,
1012 1013
			  struct pmu_events_map *map,
			  struct expr_ids *ids)
1014
{
1015
	struct metric *m;
1016 1017
	int err;

1018 1019
	list_for_each_entry(m, metric_list, nd) {
		err = __resolve_metric(m, metric_no_group, metric_list, map, ids);
1020 1021 1022 1023 1024 1025
		if (err)
			return err;
	}
	return 0;
}

1026
static int add_metric(struct list_head *metric_list,
J
Jiri Olsa 已提交
1027
		      struct pmu_event *pe,
1028
		      bool metric_no_group,
1029
		      struct metric **m,
1030 1031
		      struct expr_id *parent,
		      struct expr_ids *ids)
J
Jiri Olsa 已提交
1032
{
1033
	struct metric *orig = *m;
J
Jiri Olsa 已提交
1034 1035 1036 1037 1038
	int ret = 0;

	pr_debug("metric expr %s for %s\n", pe->metric_expr, pe->metric_name);

	if (!strstr(pe->metric_expr, "?")) {
1039
		ret = __add_metric(metric_list, pe, metric_no_group, 1, m, parent, ids);
J
Jiri Olsa 已提交
1040 1041 1042
	} else {
		int j, count;

1043
		count = arch_get_runtimeparam(pe);
J
Jiri Olsa 已提交
1044 1045 1046

		/* This loop is added to create multiple
		 * events depend on count value and add
1047
		 * those events to metric_list.
J
Jiri Olsa 已提交
1048 1049
		 */

1050
		for (j = 0; j < count && !ret; j++, *m = orig)
1051
			ret = __add_metric(metric_list, pe, metric_no_group, j, m, parent, ids);
J
Jiri Olsa 已提交
1052 1053 1054 1055 1056
	}

	return ret;
}

1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
static int metricgroup__add_metric_sys_event_iter(struct pmu_event *pe,
						  void *data)
{
	struct metricgroup_add_iter_data *d = data;
	int ret;

	if (!match_pe_metric(pe, d->metric))
		return 0;

	ret = add_metric(d->metric_list, pe, d->metric_no_group, d->m, NULL, d->ids);
	if (ret)
		return ret;

	ret = resolve_metric(d->metric_no_group,
				     d->metric_list, NULL, d->ids);
	if (ret)
		return ret;

	*(d->has_match) = true;

	return *d->ret;
}

1080 1081
static int metricgroup__add_metric(const char *metric, bool metric_no_group,
				   struct strbuf *events,
1082
				   struct list_head *metric_list,
1083
				   struct pmu_events_map *map)
1084
{
1085
	struct expr_ids ids = { .cnt = 0, };
1086
	struct pmu_event *pe;
1087
	struct metric *m;
1088
	LIST_HEAD(list);
1089 1090
	int i, ret;
	bool has_match = false;
1091

1092 1093
	map_for_each_metric(pe, i, map, metric) {
		has_match = true;
1094
		m = NULL;
1095

1096
		ret = add_metric(&list, pe, metric_no_group, &m, NULL, &ids);
1097
		if (ret)
1098
			goto out;
1099

1100 1101 1102 1103 1104
		/*
		 * Process any possible referenced metrics
		 * included in the expression.
		 */
		ret = resolve_metric(metric_no_group,
1105
				     &list, map, &ids);
J
Jiri Olsa 已提交
1106
		if (ret)
1107
			goto out;
1108
	}
1109

1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125
	{
		struct metricgroup_iter_data data = {
			.fn = metricgroup__add_metric_sys_event_iter,
			.data = (void *) &(struct metricgroup_add_iter_data) {
				.metric_list = &list,
				.metric = metric,
				.metric_no_group = metric_no_group,
				.m = &m,
				.ids = &ids,
				.has_match = &has_match,
				.ret = &ret,
			},
		};

		pmu_for_each_sys_event(metricgroup__sys_event_iter, &data);
	}
1126
	/* End of pmu events. */
1127 1128 1129 1130
	if (!has_match) {
		ret = -EINVAL;
		goto out;
	}
1131

1132
	list_for_each_entry(m, &list, nd) {
1133 1134 1135
		if (events->len > 0)
			strbuf_addf(events, ",");

1136
		if (m->has_constraint) {
1137
			metricgroup__add_metric_non_group(events,
1138
							  &m->pctx);
1139 1140
		} else {
			metricgroup__add_metric_weak_group(events,
1141
							   &m->pctx);
1142 1143
		}
	}
1144

1145 1146 1147 1148 1149
out:
	/*
	 * add to metric_list so that they can be released
	 * even if it's failed
	 */
1150
	list_splice(&list, metric_list);
1151
	expr_ids__exit(&ids);
1152
	return ret;
1153 1154
}

1155 1156
static int metricgroup__add_metric_list(const char *list, bool metric_no_group,
					struct strbuf *events,
1157
					struct list_head *metric_list,
1158
					struct pmu_events_map *map)
1159 1160 1161 1162 1163 1164 1165 1166
{
	char *llist, *nlist, *p;
	int ret = -EINVAL;

	nlist = strdup(list);
	if (!nlist)
		return -ENOMEM;
	llist = nlist;
1167 1168 1169 1170

	strbuf_init(events, 100);
	strbuf_addf(events, "%s", "");

1171
	while ((p = strsep(&llist, ",")) != NULL) {
1172
		ret = metricgroup__add_metric(p, metric_no_group, events,
1173
					      metric_list, map);
1174 1175 1176 1177 1178 1179 1180
		if (ret == -EINVAL) {
			fprintf(stderr, "Cannot find metric or group `%s'\n",
					p);
			break;
		}
	}
	free(nlist);
1181 1182 1183 1184

	if (!ret)
		metricgroup___watchdog_constraint_hint(NULL, true);

1185 1186 1187
	return ret;
}

1188
static void metric__free_refs(struct metric *metric)
1189 1190 1191
{
	struct metric_ref_node *ref, *tmp;

1192
	list_for_each_entry_safe(ref, tmp, &metric->metric_refs, list) {
1193 1194 1195 1196 1197
		list_del(&ref->list);
		free(ref);
	}
}

1198
static void metricgroup__free_metrics(struct list_head *metric_list)
1199
{
1200
	struct metric *m, *tmp;
1201

1202
	list_for_each_entry_safe (m, tmp, metric_list, nd) {
1203 1204 1205 1206
		metric__free_refs(m);
		expr__ctx_clear(&m->pctx);
		list_del_init(&m->nd);
		free(m);
1207 1208 1209
	}
}

1210 1211 1212
static int parse_groups(struct evlist *perf_evlist, const char *str,
			bool metric_no_group,
			bool metric_no_merge,
1213
			struct perf_pmu *fake_pmu,
1214 1215
			struct rblist *metric_events,
			struct pmu_events_map *map)
1216 1217 1218
{
	struct parse_events_error parse_error;
	struct strbuf extra_events;
1219
	LIST_HEAD(metric_list);
1220 1221 1222 1223
	int ret;

	if (metric_events->nr_entries == 0)
		metricgroup__rblist_init(metric_events);
1224
	ret = metricgroup__add_metric_list(str, metric_no_group,
1225
					   &extra_events, &metric_list, map);
1226
	if (ret)
1227
		goto out;
1228
	pr_debug("adding %s\n", extra_events.buf);
1229
	bzero(&parse_error, sizeof(parse_error));
1230
	ret = __parse_events(perf_evlist, extra_events.buf, &parse_error, fake_pmu);
1231
	if (ret) {
1232
		parse_events_print_error(&parse_error, extra_events.buf);
1233 1234
		goto out;
	}
1235
	ret = metricgroup__setup_events(&metric_list, metric_no_merge,
1236
					perf_evlist, metric_events);
1237
out:
1238
	metricgroup__free_metrics(&metric_list);
1239
	strbuf_release(&extra_events);
1240 1241
	return ret;
}
1242

1243 1244 1245 1246 1247 1248 1249
int metricgroup__parse_groups(const struct option *opt,
			      const char *str,
			      bool metric_no_group,
			      bool metric_no_merge,
			      struct rblist *metric_events)
{
	struct evlist *perf_evlist = *(struct evlist **)opt->value;
1250 1251
	struct pmu_events_map *map = perf_pmu__find_map(NULL);

1252 1253

	return parse_groups(perf_evlist, str, metric_no_group,
1254
			    metric_no_merge, NULL, metric_events, map);
1255 1256
}

1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267
int metricgroup__parse_groups_test(struct evlist *evlist,
				   struct pmu_events_map *map,
				   const char *str,
				   bool metric_no_group,
				   bool metric_no_merge,
				   struct rblist *metric_events)
{
	return parse_groups(evlist, str, metric_no_group,
			    metric_no_merge, &perf_pmu__fake, metric_events, map);
}

1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288
bool metricgroup__has_metric(const char *metric)
{
	struct pmu_events_map *map = perf_pmu__find_map(NULL);
	struct pmu_event *pe;
	int i;

	if (!map)
		return false;

	for (i = 0; ; i++) {
		pe = &map->table[i];

		if (!pe->name && !pe->metric_group && !pe->metric_name)
			break;
		if (!pe->metric_expr)
			continue;
		if (match_metric(pe->metric_name, metric))
			return true;
	}
	return false;
}
1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372

int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
				    struct rblist *new_metric_events,
				    struct rblist *old_metric_events)
{
	unsigned i;

	for (i = 0; i < rblist__nr_entries(old_metric_events); i++) {
		struct rb_node *nd;
		struct metric_event *old_me, *new_me;
		struct metric_expr *old_expr, *new_expr;
		struct evsel *evsel;
		size_t alloc_size;
		int idx, nr;

		nd = rblist__entry(old_metric_events, i);
		old_me = container_of(nd, struct metric_event, nd);

		evsel = evlist__find_evsel(evlist, old_me->evsel->idx);
		if (!evsel)
			return -EINVAL;
		new_me = metricgroup__lookup(new_metric_events, evsel, true);
		if (!new_me)
			return -ENOMEM;

		pr_debug("copying metric event for cgroup '%s': %s (idx=%d)\n",
			 cgrp ? cgrp->name : "root", evsel->name, evsel->idx);

		list_for_each_entry(old_expr, &old_me->head, nd) {
			new_expr = malloc(sizeof(*new_expr));
			if (!new_expr)
				return -ENOMEM;

			new_expr->metric_expr = old_expr->metric_expr;
			new_expr->metric_name = old_expr->metric_name;
			new_expr->metric_unit = old_expr->metric_unit;
			new_expr->runtime = old_expr->runtime;

			if (old_expr->metric_refs) {
				/* calculate number of metric_events */
				for (nr = 0; old_expr->metric_refs[nr].metric_name; nr++)
					continue;
				alloc_size = sizeof(*new_expr->metric_refs);
				new_expr->metric_refs = calloc(nr + 1, alloc_size);
				if (!new_expr->metric_refs) {
					free(new_expr);
					return -ENOMEM;
				}

				memcpy(new_expr->metric_refs, old_expr->metric_refs,
				       nr * alloc_size);
			} else {
				new_expr->metric_refs = NULL;
			}

			/* calculate number of metric_events */
			for (nr = 0; old_expr->metric_events[nr]; nr++)
				continue;
			alloc_size = sizeof(*new_expr->metric_events);
			new_expr->metric_events = calloc(nr + 1, alloc_size);
			if (!new_expr->metric_events) {
				free(new_expr->metric_refs);
				free(new_expr);
				return -ENOMEM;
			}

			/* copy evsel in the same position */
			for (idx = 0; idx < nr; idx++) {
				evsel = old_expr->metric_events[idx];
				evsel = evlist__find_evsel(evlist, evsel->idx);
				if (evsel == NULL) {
					free(new_expr->metric_events);
					free(new_expr->metric_refs);
					free(new_expr);
					return -EINVAL;
				}
				new_expr->metric_events[idx] = evsel;
			}

			list_add(&new_expr->nd, &new_me->head);
		}
	}
	return 0;
}