evsel.h 8.0 KB
Newer Older
1 2 3 4
#ifndef __PERF_EVSEL_H
#define __PERF_EVSEL_H 1

#include <linux/list.h>
5
#include <stdbool.h>
6
#include <stddef.h>
7
#include <linux/perf_event.h>
8 9
#include "types.h"
#include "xyarray.h"
S
Stephane Eranian 已提交
10
#include "cgroup.h"
11
#include "hist.h"
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
 
struct perf_counts_values {
	union {
		struct {
			u64 val;
			u64 ena;
			u64 run;
		};
		u64 values[3];
	};
};

struct perf_counts {
	s8		   	  scaled;
	struct perf_counts_values aggr;
	struct perf_counts_values cpu[];
};
29

30 31 32 33 34 35 36 37 38 39 40 41
struct perf_evsel;

/*
 * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
 * more than one entry in the evlist.
 */
struct perf_sample_id {
	struct hlist_node 	node;
	u64		 	id;
	struct perf_evsel	*evsel;
};

42 43 44 45 46 47
/** struct perf_evsel - event selector
 *
 * @name - Can be set to retain the original event name passed by the user,
 *         so that when showing results in tools such as 'perf stat', we
 *         show the name used, not some alias.
 */
48 49 50 51 52
struct perf_evsel {
	struct list_head	node;
	struct perf_event_attr	attr;
	char			*filter;
	struct xyarray		*fd;
53 54
	struct xyarray		*sample_id;
	u64			*id;
55
	struct perf_counts	*counts;
56
	struct perf_counts	*prev_raw_counts;
57
	int			idx;
58
	u32			ids;
59
	struct hists		hists;
60
	char			*name;
61
	struct event_format	*tp_format;
62 63 64 65
	union {
		void		*priv;
		off_t		id_offset;
	};
S
Stephane Eranian 已提交
66
	struct cgroup_sel	*cgrp;
67 68 69 70
	struct {
		void		*func;
		void		*data;
	} handler;
Y
Yan, Zheng 已提交
71
	struct cpu_map		*cpus;
72
	unsigned int		sample_size;
73
	bool 			supported;
74
	bool 			needs_swap;
75 76
	/* parse modifier helper */
	int			exclude_GH;
77
	int			nr_members;
78 79
	struct perf_evsel	*leader;
	char			*group_name;
80 81
};

82 83
#define hists_to_evsel(h) container_of(h, struct perf_evsel, hists)

84 85
struct cpu_map;
struct thread_map;
86
struct perf_evlist;
87
struct perf_record_opts;
88

89
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx);
90
struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx);
91 92 93

struct event_format *event_format__new(const char *sys, const char *name);

94 95 96
void perf_evsel__init(struct perf_evsel *evsel,
		      struct perf_event_attr *attr, int idx);
void perf_evsel__exit(struct perf_evsel *evsel);
97 98
void perf_evsel__delete(struct perf_evsel *evsel);

99
void perf_evsel__config(struct perf_evsel *evsel,
100
			struct perf_record_opts *opts);
101

102 103 104 105 106 107 108 109
bool perf_evsel__is_cache_op_valid(u8 type, u8 op);

#define PERF_EVSEL__MAX_ALIASES 8

extern const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
				       [PERF_EVSEL__MAX_ALIASES];
extern const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
					  [PERF_EVSEL__MAX_ALIASES];
110 111 112 113
extern const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
					      [PERF_EVSEL__MAX_ALIASES];
extern const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX];
extern const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX];
114 115
int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
					    char *bf, size_t size);
116
const char *perf_evsel__name(struct perf_evsel *evsel);
117 118
const char *perf_evsel__group_name(struct perf_evsel *evsel);
int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size);
119

120
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
121
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
122
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
123
void perf_evsel__free_fd(struct perf_evsel *evsel);
124
void perf_evsel__free_id(struct perf_evsel *evsel);
125
void perf_evsel__free_counts(struct perf_evsel *evsel);
126
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
127

128 129 130 131 132 133 134 135 136 137 138
void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
				  enum perf_event_sample_format bit);
void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
				    enum perf_event_sample_format bit);

#define perf_evsel__set_sample_bit(evsel, bit) \
	__perf_evsel__set_sample_bit(evsel, PERF_SAMPLE_##bit)

#define perf_evsel__reset_sample_bit(evsel, bit) \
	__perf_evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit)

139 140
void perf_evsel__set_sample_id(struct perf_evsel *evsel);

141 142 143
int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
			   const char *filter);

144
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
145
			     struct cpu_map *cpus);
146
int perf_evsel__open_per_thread(struct perf_evsel *evsel,
147
				struct thread_map *threads);
148
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
149
		     struct thread_map *threads);
150
void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads);
151

152 153
struct perf_sample;

154
void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
155 156 157 158
			 const char *name);
u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
		       const char *name);

159 160 161 162 163 164 165
static inline char *perf_evsel__strval(struct perf_evsel *evsel,
				       struct perf_sample *sample,
				       const char *name)
{
	return perf_evsel__rawptr(evsel, sample, name);
}

166 167 168 169
struct format_field;

struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name);

170 171 172 173
#define perf_evsel__match(evsel, t, c)		\
	(evsel->attr.type == PERF_TYPE_##t &&	\
	 evsel->attr.config == PERF_COUNT_##c)

174 175 176 177 178 179 180
static inline bool perf_evsel__match2(struct perf_evsel *e1,
				      struct perf_evsel *e2)
{
	return (e1->attr.type == e2->attr.type) &&
	       (e1->attr.config == e2->attr.config);
}

181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
			      int cpu, int thread, bool scale);

/**
 * perf_evsel__read_on_cpu - Read out the results on a CPU and thread
 *
 * @evsel - event selector to read value
 * @cpu - CPU of interest
 * @thread - thread of interest
 */
static inline int perf_evsel__read_on_cpu(struct perf_evsel *evsel,
					  int cpu, int thread)
{
	return __perf_evsel__read_on_cpu(evsel, cpu, thread, false);
}

/**
 * perf_evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled
 *
 * @evsel - event selector to read value
 * @cpu - CPU of interest
 * @thread - thread of interest
 */
static inline int perf_evsel__read_on_cpu_scaled(struct perf_evsel *evsel,
						 int cpu, int thread)
{
	return __perf_evsel__read_on_cpu(evsel, cpu, thread, true);
}

int __perf_evsel__read(struct perf_evsel *evsel, int ncpus, int nthreads,
		       bool scale);

/**
 * perf_evsel__read - Read the aggregate results on all CPUs
 *
 * @evsel - event selector to read value
 * @ncpus - Number of cpus affected, from zero
 * @nthreads - Number of threads affected, from zero
 */
static inline int perf_evsel__read(struct perf_evsel *evsel,
				    int ncpus, int nthreads)
{
	return __perf_evsel__read(evsel, ncpus, nthreads, false);
}

/**
 * perf_evsel__read_scaled - Read the aggregate results on all CPUs, scaled
 *
 * @evsel - event selector to read value
 * @ncpus - Number of cpus affected, from zero
 * @nthreads - Number of threads affected, from zero
 */
static inline int perf_evsel__read_scaled(struct perf_evsel *evsel,
					  int ncpus, int nthreads)
{
	return __perf_evsel__read(evsel, ncpus, nthreads, true);
}

239 240
void hists__init(struct hists *hists);

241
int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
242
			     struct perf_sample *sample);
243 244 245 246 247

static inline struct perf_evsel *perf_evsel__next(struct perf_evsel *evsel)
{
	return list_entry(evsel->node.next, struct perf_evsel, node);
}
248

249
static inline bool perf_evsel__is_group_leader(const struct perf_evsel *evsel)
250
{
251
	return evsel->leader == evsel;
252
}
253 254 255 256

struct perf_attr_details {
	bool freq;
	bool verbose;
257
	bool event_group;
258 259 260 261
};

int perf_evsel__fprintf(struct perf_evsel *evsel,
			struct perf_attr_details *details, FILE *fp);
262 263 264

bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
			  char *msg, size_t msgsize);
265 266 267
int perf_evsel__open_strerror(struct perf_evsel *evsel,
			      struct perf_target *target,
			      int err, char *msg, size_t size);
268 269 270 271 272

static inline int perf_evsel__group_idx(struct perf_evsel *evsel)
{
	return evsel->idx - evsel->leader->idx;
}
273 274 275 276 277 278

#define for_each_group_member(_evsel, _leader) 					\
for ((_evsel) = list_entry((_leader)->node.next, struct perf_evsel, node); 	\
     (_evsel) && (_evsel)->leader == (_leader);					\
     (_evsel) = list_entry((_evsel)->node.next, struct perf_evsel, node))

279
#endif /* __PERF_EVSEL_H */