perf_counter.h 5.9 KB
Newer Older
T
Thomas Gleixner 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 *  Performance counters:
 *
 *   Copyright(C) 2008, Thomas Gleixner <tglx@linutronix.de>
 *   Copyright(C) 2008, Red Hat, Inc., Ingo Molnar
 *
 *  Data type definitions, declarations, prototypes.
 *
 *  Started by: Thomas Gleixner and Ingo Molnar
 *
 *  For licencing details see kernel-base/COPYING
 */
#ifndef _LINUX_PERF_COUNTER_H
#define _LINUX_PERF_COUNTER_H

#include <asm/atomic.h>

#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/spinlock.h>

struct task_struct;

/*
I
Ingo Molnar 已提交
27 28 29 30 31 32
 * User-space ABI bits:
 */

/*
 * Generalized performance counter event types, used by the hw_event.type
 * parameter of the sys_perf_counter_open() syscall:
T
Thomas Gleixner 已提交
33 34 35
 */
enum hw_event_types {
	/*
I
Ingo Molnar 已提交
36
	 * Common hardware events, generalized by the kernel:
T
Thomas Gleixner 已提交
37
	 */
I
Ingo Molnar 已提交
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
	PERF_COUNT_CYCLES		=  0,
	PERF_COUNT_INSTRUCTIONS		=  1,
	PERF_COUNT_CACHE_REFERENCES	=  2,
	PERF_COUNT_CACHE_MISSES		=  3,
	PERF_COUNT_BRANCH_INSTRUCTIONS	=  4,
	PERF_COUNT_BRANCH_MISSES	=  5,

	/*
	 * Special "software" counters provided by the kernel, even if
	 * the hardware does not support performance counters. These
	 * counters measure various physical and sw events of the
	 * kernel (and allow the profiling of them as well):
	 */
	PERF_COUNT_CPU_CLOCK		= -1,
	PERF_COUNT_TASK_CLOCK		= -2,
53 54 55 56 57
	/*
	 * Future software events:
	 */
	/* PERF_COUNT_PAGE_FAULTS	= -3,
	   PERF_COUNT_CONTEXT_SWITCHES	= -4, */
T
Thomas Gleixner 已提交
58 59 60 61 62
};

/*
 * IRQ-notification data record type:
 */
I
Ingo Molnar 已提交
63 64 65 66
enum perf_counter_record_type {
	PERF_RECORD_SIMPLE		=  0,
	PERF_RECORD_IRQ			=  1,
	PERF_RECORD_GROUP		=  2,
T
Thomas Gleixner 已提交
67 68
};

I
Ingo Molnar 已提交
69 70 71 72
/*
 * Hardware event to monitor via a performance monitoring counter:
 */
struct perf_counter_hw_event {
73
	s64			type;
I
Ingo Molnar 已提交
74 75 76 77 78 79 80 81 82 83

	u64			irq_period;
	u32			record_type;

	u32			disabled     :  1, /* off by default */
				nmi	     :  1, /* NMI sampling   */
				raw	     :  1, /* raw event type */
				__reserved_1 : 29;

	u64			__reserved_2;
84 85
};

I
Ingo Molnar 已提交
86 87 88 89
/*
 * Kernel-internal data types:
 */

T
Thomas Gleixner 已提交
90
/**
I
Ingo Molnar 已提交
91
 * struct hw_perf_counter - performance counter hardware details:
T
Thomas Gleixner 已提交
92 93
 */
struct hw_perf_counter {
I
Ingo Molnar 已提交
94 95 96 97 98 99 100 101
	u64				config;
	unsigned long			config_base;
	unsigned long			counter_base;
	int				nmi;
	unsigned int			idx;
	u64				prev_count;
	u64				irq_period;
	s32				next_count;
T
Thomas Gleixner 已提交
102 103 104 105 106
};

/*
 * Hardcoded buffer length limit for now, for IRQ-fed events:
 */
I
Ingo Molnar 已提交
107
#define PERF_DATA_BUFLEN		2048
T
Thomas Gleixner 已提交
108 109 110 111 112

/**
 * struct perf_data - performance counter IRQ data sampling ...
 */
struct perf_data {
I
Ingo Molnar 已提交
113 114 115 116
	int				len;
	int				rd_idx;
	int				overrun;
	u8				data[PERF_DATA_BUFLEN];
T
Thomas Gleixner 已提交
117 118
};

I
Ingo Molnar 已提交
119 120 121 122 123 124 125 126 127 128 129
struct perf_counter;

/**
 * struct hw_perf_counter_ops - performance counter hw ops
 */
struct hw_perf_counter_ops {
	void (*hw_perf_counter_enable)	(struct perf_counter *counter);
	void (*hw_perf_counter_disable)	(struct perf_counter *counter);
	void (*hw_perf_counter_read)	(struct perf_counter *counter);
};

130 131 132 133 134 135 136 137 138
/**
 * enum perf_counter_active_state - the states of a counter
 */
enum perf_counter_active_state {
	PERF_COUNTER_STATE_OFF		= -1,
	PERF_COUNTER_STATE_INACTIVE	=  0,
	PERF_COUNTER_STATE_ACTIVE	=  1,
};

T
Thomas Gleixner 已提交
139 140 141 142
/**
 * struct perf_counter - performance counter kernel representation:
 */
struct perf_counter {
143 144 145
	struct list_head		list_entry;
	struct list_head		sibling_list;
	struct perf_counter		*group_leader;
146
	const struct hw_perf_counter_ops *hw_ops;
147

148
	enum perf_counter_active_state	state;
T
Thomas Gleixner 已提交
149 150 151 152 153
#if BITS_PER_LONG == 64
	atomic64_t			count;
#else
	atomic_t			count32[2];
#endif
I
Ingo Molnar 已提交
154
	struct perf_counter_hw_event	hw_event;
T
Thomas Gleixner 已提交
155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
	struct hw_perf_counter		hw;

	struct perf_counter_context	*ctx;
	struct task_struct		*task;

	/*
	 * Protect attach/detach:
	 */
	struct mutex			mutex;

	int				oncpu;
	int				cpu;

	/* read() / irq related data */
	wait_queue_head_t		waitq;
	/* optional: for NMIs */
	int				wakeup_pending;
	struct perf_data		*irqdata;
	struct perf_data		*usrdata;
	struct perf_data		data[2];
};

/**
 * struct perf_counter_context - counter context structure
 *
 * Used as a container for task counters and CPU counters as well:
 */
struct perf_counter_context {
#ifdef CONFIG_PERF_COUNTERS
	/*
	 * Protect the list of counters:
	 */
	spinlock_t		lock;
188 189

	struct list_head	counter_list;
T
Thomas Gleixner 已提交
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
	int			nr_counters;
	int			nr_active;
	struct task_struct	*task;
#endif
};

/**
 * struct perf_counter_cpu_context - per cpu counter context structure
 */
struct perf_cpu_context {
	struct perf_counter_context	ctx;
	struct perf_counter_context	*task_ctx;
	int				active_oncpu;
	int				max_pertask;
};

/*
 * Set by architecture code:
 */
extern int perf_max_counters;

#ifdef CONFIG_PERF_COUNTERS
212
extern const struct hw_perf_counter_ops *
I
Ingo Molnar 已提交
213 214
hw_perf_counter_init(struct perf_counter *counter);

T
Thomas Gleixner 已提交
215 216 217 218 219 220
extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
extern void perf_counter_task_sched_out(struct task_struct *task, int cpu);
extern void perf_counter_task_tick(struct task_struct *task, int cpu);
extern void perf_counter_init_task(struct task_struct *task);
extern void perf_counter_notify(struct pt_regs *regs);
extern void perf_counter_print_debug(void);
221 222
extern u64 hw_perf_save_disable(void);
extern void hw_perf_restore(u64 ctrl);
223 224
extern void atomic64_counter_set(struct perf_counter *counter, u64 val64);
extern u64 atomic64_counter_read(struct perf_counter *counter);
225 226
extern int perf_counter_task_disable(void);
extern int perf_counter_task_enable(void);
227

T
Thomas Gleixner 已提交
228 229 230 231 232 233 234 235 236 237
#else
static inline void
perf_counter_task_sched_in(struct task_struct *task, int cpu)		{ }
static inline void
perf_counter_task_sched_out(struct task_struct *task, int cpu)		{ }
static inline void
perf_counter_task_tick(struct task_struct *task, int cpu)		{ }
static inline void perf_counter_init_task(struct task_struct *task)	{ }
static inline void perf_counter_notify(struct pt_regs *regs)		{ }
static inline void perf_counter_print_debug(void)			{ }
238 239
static inline void hw_perf_restore(u64 ctrl)			{ }
static inline u64 hw_perf_save_disable(void)		      { return 0; }
240 241
static inline int perf_counter_task_disable(void)	{ return -EINVAL; }
static inline int perf_counter_task_enable(void)	{ return -EINVAL; }
T
Thomas Gleixner 已提交
242 243 244
#endif

#endif /* _LINUX_PERF_COUNTER_H */