hist.h 3.6 KB
Newer Older
1 2 3
#ifndef __PERF_HIST_H
#define __PERF_HIST_H

4
#include <linux/types.h>
5 6 7 8
#include "callchain.h"

extern struct callchain_param callchain_param;

9 10 11
struct hist_entry;
struct addr_location;
struct symbol;
12
struct rb_root;
13

14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
struct objdump_line {
	struct list_head node;
	s64		 offset;
	char		 *line;
};

void objdump_line__free(struct objdump_line *self);
struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
					       struct objdump_line *pos);

struct sym_hist {
	u64		sum;
	u64		ip[0];
};

struct sym_ext {
	struct rb_node	node;
	double		percent;
	char		*path;
};

struct sym_priv {
	struct sym_hist	*hist;
	struct sym_ext	*ext;
};

40 41 42 43 44 45 46 47 48 49 50 51
/*
 * The kernel collects the number of events it couldn't send in a stretch and
 * when possible sends this number in a PERF_RECORD_LOST event. The number of
 * such "chunks" of lost events is stored in .nr_events[PERF_EVENT_LOST] while
 * total_lost tells exactly how many events the kernel in fact lost, i.e. it is
 * the sum of all struct lost_event.lost fields reported.
 *
 * The total_period is needed because by default auto-freq is used, so
 * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get
 * the total number of low level events, it is necessary to to sum all struct
 * sample_event.period and stash the result in total_period.
 */
52
struct events_stats {
53 54
	u64 total_period;
	u64 total_lost;
55 56
	u32 nr_events[PERF_RECORD_HEADER_MAX];
	u32 nr_unknown_events;
57 58 59 60 61
};

struct hists {
	struct rb_node		rb_node;
	struct rb_root		entries;
62
	u64			nr_entries;
63 64 65 66
	struct events_stats	stats;
	u64			config;
	u64			event_stream;
	u32			type;
67
	u32			max_sym_namelen;
68 69 70 71
};

struct hist_entry *__hists__add_entry(struct hists *self,
				      struct addr_location *al,
72
				      struct symbol *parent, u64 period);
73 74
extern int64_t hist_entry__cmp(struct hist_entry *, struct hist_entry *);
extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *);
75 76 77 78 79 80
int hist_entry__fprintf(struct hist_entry *self, struct hists *pair_hists,
			bool show_displacement, long displacement, FILE *fp,
			u64 total);
int hist_entry__snprintf(struct hist_entry *self, char *bf, size_t size,
			 struct hists *pair_hists, bool show_displacement,
			 long displacement, bool color, u64 total);
81 82
void hist_entry__free(struct hist_entry *);

83
void hists__output_resort(struct hists *self);
84
void hists__collapse_resort(struct hists *self);
85 86 87 88

void hists__inc_nr_events(struct hists *self, u32 type);
size_t hists__fprintf_nr_events(struct hists *self, FILE *fp);

89 90
size_t hists__fprintf(struct hists *self, struct hists *pair,
		      bool show_displacement, FILE *fp);
91

92 93 94
int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip);
int hist_entry__annotate(struct hist_entry *self, struct list_head *head);

95 96 97 98
void hists__filter_by_dso(struct hists *self, const struct dso *dso);
void hists__filter_by_thread(struct hists *self, const struct thread *thread);

#ifdef NO_NEWT_SUPPORT
99
static inline int hists__browse(struct hists *self __used,
100
				const char *helpline __used,
101
				const char *ev_name __used)
102 103 104
{
	return 0;
}
105 106 107 108 109 110 111

static inline int hists__tui_browse_tree(struct rb_root *self __used,
					 const char *help __used)
{
	return 0;
}

112 113 114 115 116 117
static inline int hist_entry__tui_annotate(struct hist_entry *self __used)
{
	return 0;
}
#define KEY_LEFT -1
#define KEY_RIGHT -2
118
#else
119
#include <newt.h>
120
int hists__browse(struct hists *self, const char *helpline,
121
		  const char *ev_name);
122
int hist_entry__tui_annotate(struct hist_entry *self);
123

124 125
#define KEY_LEFT NEWT_KEY_LEFT
#define KEY_RIGHT NEWT_KEY_RIGHT
126 127

int hists__tui_browse_tree(struct rb_root *self, const char *help);
128
#endif
129
#endif	/* __PERF_HIST_H */