evsel.h 5.2 KB
Newer Older
1 2 3 4
#ifndef __PERF_EVSEL_H
#define __PERF_EVSEL_H 1

#include <linux/list.h>
5
#include <stdbool.h>
6
#include "../../../include/linux/perf_event.h"
7 8
#include "types.h"
#include "xyarray.h"
S
Stephane Eranian 已提交
9
#include "cgroup.h"
10
#include "hist.h"
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 
struct perf_counts_values {
	union {
		struct {
			u64 val;
			u64 ena;
			u64 run;
		};
		u64 values[3];
	};
};

struct perf_counts {
	s8		   	  scaled;
	struct perf_counts_values aggr;
	struct perf_counts_values cpu[];
};
28

29 30 31 32 33 34 35 36 37 38 39 40
struct perf_evsel;

/*
 * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
 * more than one entry in the evlist.
 */
struct perf_sample_id {
	struct hlist_node 	node;
	u64		 	id;
	struct perf_evsel	*evsel;
};

41 42 43 44 45 46
/** struct perf_evsel - event selector
 *
 * @name - Can be set to retain the original event name passed by the user,
 *         so that when showing results in tools such as 'perf stat', we
 *         show the name used, not some alias.
 */
47 48 49 50 51
struct perf_evsel {
	struct list_head	node;
	struct perf_event_attr	attr;
	char			*filter;
	struct xyarray		*fd;
52 53
	struct xyarray		*sample_id;
	u64			*id;
54
	struct perf_counts	*counts;
55
	int			idx;
56
	int			ids;
57
	struct hists		hists;
58
	char			*name;
59
	struct event_format	*tp_format;
60 61 62 63
	union {
		void		*priv;
		off_t		id_offset;
	};
S
Stephane Eranian 已提交
64
	struct cgroup_sel	*cgrp;
65 66 67 68
	struct {
		void		*func;
		void		*data;
	} handler;
69
	unsigned int		sample_size;
70
	bool 			supported;
71 72
};

73 74
struct cpu_map;
struct thread_map;
75
struct perf_evlist;
76
struct perf_record_opts;
77

78
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx);
79 80 81
void perf_evsel__init(struct perf_evsel *evsel,
		      struct perf_event_attr *attr, int idx);
void perf_evsel__exit(struct perf_evsel *evsel);
82 83
void perf_evsel__delete(struct perf_evsel *evsel);

84
void perf_evsel__config(struct perf_evsel *evsel,
85 86
			struct perf_record_opts *opts,
			struct perf_evsel *first);
87

88 89 90 91 92 93 94 95 96 97 98 99
bool perf_evsel__is_cache_op_valid(u8 type, u8 op);

#define PERF_EVSEL__MAX_ALIASES 8

extern const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
				       [PERF_EVSEL__MAX_ALIASES];
extern const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
					  [PERF_EVSEL__MAX_ALIASES];
const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
				       [PERF_EVSEL__MAX_ALIASES];
int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
					    char *bf, size_t size);
100
const char *perf_evsel__name(struct perf_evsel *evsel);
101

102
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
103
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
104
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
105
void perf_evsel__free_fd(struct perf_evsel *evsel);
106
void perf_evsel__free_id(struct perf_evsel *evsel);
107
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
108

109
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
110 111
			     struct cpu_map *cpus, bool group,
			     struct xyarray *group_fds);
112
int perf_evsel__open_per_thread(struct perf_evsel *evsel,
113 114
				struct thread_map *threads, bool group,
				struct xyarray *group_fds);
115
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
116 117 118
		     struct thread_map *threads, bool group,
		     struct xyarray *group_fds);
void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads);
119

120 121 122 123
#define perf_evsel__match(evsel, t, c)		\
	(evsel->attr.type == PERF_TYPE_##t &&	\
	 evsel->attr.config == PERF_COUNT_##c)

124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
			      int cpu, int thread, bool scale);

/**
 * perf_evsel__read_on_cpu - Read out the results on a CPU and thread
 *
 * @evsel - event selector to read value
 * @cpu - CPU of interest
 * @thread - thread of interest
 */
static inline int perf_evsel__read_on_cpu(struct perf_evsel *evsel,
					  int cpu, int thread)
{
	return __perf_evsel__read_on_cpu(evsel, cpu, thread, false);
}

/**
 * perf_evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled
 *
 * @evsel - event selector to read value
 * @cpu - CPU of interest
 * @thread - thread of interest
 */
static inline int perf_evsel__read_on_cpu_scaled(struct perf_evsel *evsel,
						 int cpu, int thread)
{
	return __perf_evsel__read_on_cpu(evsel, cpu, thread, true);
}

int __perf_evsel__read(struct perf_evsel *evsel, int ncpus, int nthreads,
		       bool scale);

/**
 * perf_evsel__read - Read the aggregate results on all CPUs
 *
 * @evsel - event selector to read value
 * @ncpus - Number of cpus affected, from zero
 * @nthreads - Number of threads affected, from zero
 */
static inline int perf_evsel__read(struct perf_evsel *evsel,
				    int ncpus, int nthreads)
{
	return __perf_evsel__read(evsel, ncpus, nthreads, false);
}

/**
 * perf_evsel__read_scaled - Read the aggregate results on all CPUs, scaled
 *
 * @evsel - event selector to read value
 * @ncpus - Number of cpus affected, from zero
 * @nthreads - Number of threads affected, from zero
 */
static inline int perf_evsel__read_scaled(struct perf_evsel *evsel,
					  int ncpus, int nthreads)
{
	return __perf_evsel__read(evsel, ncpus, nthreads, true);
}

182 183
void hists__init(struct hists *hists);

184 185
int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
			     struct perf_sample *sample, bool swapped);
186
#endif /* __PERF_EVSEL_H */