evsel.h 10.5 KB
Newer Older
1 2 3 4
#ifndef __PERF_EVSEL_H
#define __PERF_EVSEL_H 1

#include <linux/list.h>
5
#include <stdbool.h>
6
#include <stddef.h>
7
#include <linux/perf_event.h>
B
Borislav Petkov 已提交
8
#include <linux/types.h>
9
#include "xyarray.h"
S
Stephane Eranian 已提交
10
#include "cgroup.h"
11
#include "hist.h"
12
#include "symbol.h"
B
Borislav Petkov 已提交
13

14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
struct perf_counts_values {
	union {
		struct {
			u64 val;
			u64 ena;
			u64 run;
		};
		u64 values[3];
	};
};

struct perf_counts {
	s8		   	  scaled;
	struct perf_counts_values aggr;
	struct perf_counts_values cpu[];
};
30

31 32 33 34 35 36 37 38 39 40
struct perf_evsel;

/*
 * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
 * more than one entry in the evlist.
 */
struct perf_sample_id {
	struct hlist_node 	node;
	u64		 	id;
	struct perf_evsel	*evsel;
41 42 43

	/* Holds total ID period value for PERF_SAMPLE_READ processing. */
	u64			period;
44 45
};

46 47 48 49 50
/** struct perf_evsel - event selector
 *
 * @name - Can be set to retain the original event name passed by the user,
 *         so that when showing results in tools such as 'perf stat', we
 *         show the name used, not some alias.
51 52 53 54 55 56
 * @id_pos: the position of the event id (PERF_SAMPLE_ID or
 *          PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of
 *          struct sample_event
 * @is_pos: the position (counting backwards) of the event id (PERF_SAMPLE_ID or
 *          PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if sample_id_all
 *          is used there is an id sample appended to non-sample events
57
 */
58 59 60 61 62
struct perf_evsel {
	struct list_head	node;
	struct perf_event_attr	attr;
	char			*filter;
	struct xyarray		*fd;
63 64
	struct xyarray		*sample_id;
	u64			*id;
65
	struct perf_counts	*counts;
66
	struct perf_counts	*prev_raw_counts;
67
	int			idx;
68
	u32			ids;
69
	struct hists		hists;
70
	char			*name;
71 72
	double			scale;
	const char		*unit;
73
	struct event_format	*tp_format;
74 75 76 77
	union {
		void		*priv;
		off_t		id_offset;
	};
S
Stephane Eranian 已提交
78
	struct cgroup_sel	*cgrp;
79
	void			*handler;
Y
Yan, Zheng 已提交
80
	struct cpu_map		*cpus;
81
	unsigned int		sample_size;
82 83
	int			id_pos;
	int			is_pos;
84
	bool 			supported;
85
	bool 			needs_swap;
86
	bool			no_aux_samples;
87 88
	/* parse modifier helper */
	int			exclude_GH;
89
	int			nr_members;
90
	int			sample_read;
91 92
	struct perf_evsel	*leader;
	char			*group_name;
93 94
};

95 96 97 98 99
union u64_swap {
	u64 val64;
	u32 val32[2];
};

100 101
#define hists_to_evsel(h) container_of(h, struct perf_evsel, hists)

102 103
struct cpu_map;
struct thread_map;
104
struct perf_evlist;
105
struct record_opts;
106

107 108 109 110 111 112 113 114 115 116 117 118 119
struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx);

static inline struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
{
	return perf_evsel__new_idx(attr, 0);
}

struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx);

static inline struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name)
{
	return perf_evsel__newtp_idx(sys, name, 0);
}
120 121 122

struct event_format *event_format__new(const char *sys, const char *name);

123 124 125
void perf_evsel__init(struct perf_evsel *evsel,
		      struct perf_event_attr *attr, int idx);
void perf_evsel__exit(struct perf_evsel *evsel);
126 127
void perf_evsel__delete(struct perf_evsel *evsel);

128
void perf_evsel__config(struct perf_evsel *evsel,
129
			struct record_opts *opts);
130

131 132 133
int __perf_evsel__sample_size(u64 sample_type);
void perf_evsel__calc_id_pos(struct perf_evsel *evsel);

134 135 136 137 138 139 140 141
bool perf_evsel__is_cache_op_valid(u8 type, u8 op);

#define PERF_EVSEL__MAX_ALIASES 8

extern const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
				       [PERF_EVSEL__MAX_ALIASES];
extern const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
					  [PERF_EVSEL__MAX_ALIASES];
142 143 144 145
extern const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
					      [PERF_EVSEL__MAX_ALIASES];
extern const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX];
extern const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX];
146 147
int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
					    char *bf, size_t size);
148
const char *perf_evsel__name(struct perf_evsel *evsel);
149

150 151
const char *perf_evsel__group_name(struct perf_evsel *evsel);
int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size);
152

153
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
154
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
155
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
156
void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus);
157
void perf_evsel__free_fd(struct perf_evsel *evsel);
158
void perf_evsel__free_id(struct perf_evsel *evsel);
159
void perf_evsel__free_counts(struct perf_evsel *evsel);
160
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
161

162 163 164 165 166 167 168 169 170 171 172
void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
				  enum perf_event_sample_format bit);
void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
				    enum perf_event_sample_format bit);

#define perf_evsel__set_sample_bit(evsel, bit) \
	__perf_evsel__set_sample_bit(evsel, PERF_SAMPLE_##bit)

#define perf_evsel__reset_sample_bit(evsel, bit) \
	__perf_evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit)

173 174
void perf_evsel__set_sample_id(struct perf_evsel *evsel,
			       bool use_sample_identifier);
175

176 177
int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
			   const char *filter);
178
int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads);
179

180
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
181
			     struct cpu_map *cpus);
182
int perf_evsel__open_per_thread(struct perf_evsel *evsel,
183
				struct thread_map *threads);
184
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
185
		     struct thread_map *threads);
186
void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads);
187

188 189
struct perf_sample;

190
void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
191 192 193 194
			 const char *name);
u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
		       const char *name);

195 196 197 198 199 200 201
static inline char *perf_evsel__strval(struct perf_evsel *evsel,
				       struct perf_sample *sample,
				       const char *name)
{
	return perf_evsel__rawptr(evsel, sample, name);
}

202 203 204 205
struct format_field;

struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name);

206 207 208 209
#define perf_evsel__match(evsel, t, c)		\
	(evsel->attr.type == PERF_TYPE_##t &&	\
	 evsel->attr.config == PERF_COUNT_##c)

210 211 212 213 214 215 216
static inline bool perf_evsel__match2(struct perf_evsel *e1,
				      struct perf_evsel *e2)
{
	return (e1->attr.type == e2->attr.type) &&
	       (e1->attr.config == e2->attr.config);
}

217 218 219 220 221 222
#define perf_evsel__cmp(a, b)			\
	((a) &&					\
	 (b) &&					\
	 (a)->attr.type == (b)->attr.type &&	\
	 (a)->attr.config == (b)->attr.config)

223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
			      int cpu, int thread, bool scale);

/**
 * perf_evsel__read_on_cpu - Read out the results on a CPU and thread
 *
 * @evsel - event selector to read value
 * @cpu - CPU of interest
 * @thread - thread of interest
 */
static inline int perf_evsel__read_on_cpu(struct perf_evsel *evsel,
					  int cpu, int thread)
{
	return __perf_evsel__read_on_cpu(evsel, cpu, thread, false);
}

/**
 * perf_evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled
 *
 * @evsel - event selector to read value
 * @cpu - CPU of interest
 * @thread - thread of interest
 */
static inline int perf_evsel__read_on_cpu_scaled(struct perf_evsel *evsel,
						 int cpu, int thread)
{
	return __perf_evsel__read_on_cpu(evsel, cpu, thread, true);
}

int __perf_evsel__read(struct perf_evsel *evsel, int ncpus, int nthreads,
		       bool scale);

/**
 * perf_evsel__read - Read the aggregate results on all CPUs
 *
 * @evsel - event selector to read value
 * @ncpus - Number of cpus affected, from zero
 * @nthreads - Number of threads affected, from zero
 */
static inline int perf_evsel__read(struct perf_evsel *evsel,
				    int ncpus, int nthreads)
{
	return __perf_evsel__read(evsel, ncpus, nthreads, false);
}

/**
 * perf_evsel__read_scaled - Read the aggregate results on all CPUs, scaled
 *
 * @evsel - event selector to read value
 * @ncpus - Number of cpus affected, from zero
 * @nthreads - Number of threads affected, from zero
 */
static inline int perf_evsel__read_scaled(struct perf_evsel *evsel,
					  int ncpus, int nthreads)
{
	return __perf_evsel__read(evsel, ncpus, nthreads, true);
}

281 282
void hists__init(struct hists *hists);

283
int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
284
			     struct perf_sample *sample);
285 286 287 288 289

static inline struct perf_evsel *perf_evsel__next(struct perf_evsel *evsel)
{
	return list_entry(evsel->node.next, struct perf_evsel, node);
}
290

291 292 293 294 295
static inline struct perf_evsel *perf_evsel__prev(struct perf_evsel *evsel)
{
	return list_entry(evsel->node.prev, struct perf_evsel, node);
}

296 297 298 299 300 301 302
/**
 * perf_evsel__is_group_leader - Return whether given evsel is a leader event
 *
 * @evsel - evsel selector to be tested
 *
 * Return %true if @evsel is a group leader or a stand-alone event
 */
303
static inline bool perf_evsel__is_group_leader(const struct perf_evsel *evsel)
304
{
305
	return evsel->leader == evsel;
306
}
307

308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
/**
 * perf_evsel__is_group_event - Return whether given evsel is a group event
 *
 * @evsel - evsel selector to be tested
 *
 * Return %true iff event group view is enabled and @evsel is a actual group
 * leader which has other members in the group
 */
static inline bool perf_evsel__is_group_event(struct perf_evsel *evsel)
{
	if (!symbol_conf.event_group)
		return false;

	return perf_evsel__is_group_leader(evsel) && evsel->nr_members > 1;
}

324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
/**
 * perf_evsel__is_function_event - Return whether given evsel is a function
 * trace event
 *
 * @evsel - evsel selector to be tested
 *
 * Return %true if event is function trace event
 */
static inline bool perf_evsel__is_function_event(struct perf_evsel *evsel)
{
#define FUNCTION_EVENT "ftrace:function"

	return evsel->name &&
	       !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT));

#undef FUNCTION_EVENT
}

342 343 344
struct perf_attr_details {
	bool freq;
	bool verbose;
345
	bool event_group;
346 347 348 349
};

int perf_evsel__fprintf(struct perf_evsel *evsel,
			struct perf_attr_details *details, FILE *fp);
350 351 352

bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
			  char *msg, size_t msgsize);
353
int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
354
			      int err, char *msg, size_t size);
355 356 357 358 359

static inline int perf_evsel__group_idx(struct perf_evsel *evsel)
{
	return evsel->idx - evsel->leader->idx;
}
360 361 362 363 364 365

#define for_each_group_member(_evsel, _leader) 					\
for ((_evsel) = list_entry((_leader)->node.next, struct perf_evsel, node); 	\
     (_evsel) && (_evsel)->leader == (_leader);					\
     (_evsel) = list_entry((_evsel)->node.next, struct perf_evsel, node))

366
#endif /* __PERF_EVSEL_H */