perf_event.h 22.6 KB
Newer Older
T
Thomas Gleixner 已提交
1
/*
I
Ingo Molnar 已提交
2
 * Performance events:
T
Thomas Gleixner 已提交
3
 *
I
Ingo Molnar 已提交
4 5 6
 *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
 *    Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
 *    Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
T
Thomas Gleixner 已提交
7
 *
I
Ingo Molnar 已提交
8
 * Data type definitions, declarations, prototypes.
T
Thomas Gleixner 已提交
9
 *
I
Ingo Molnar 已提交
10
 *    Started by: Thomas Gleixner and Ingo Molnar
T
Thomas Gleixner 已提交
11
 *
I
Ingo Molnar 已提交
12
 * For licencing details see kernel-base/COPYING
T
Thomas Gleixner 已提交
13
 */
14 15
#ifndef _LINUX_PERF_EVENT_H
#define _LINUX_PERF_EVENT_H
T
Thomas Gleixner 已提交
16

17 18
#include <linux/types.h>
#include <linux/ioctl.h>
19
#include <asm/byteorder.h>
T
Thomas Gleixner 已提交
20

21 22 23 24
#ifdef CONFIG_HAVE_HW_BREAKPOINT
#include <asm/hw_breakpoint.h>
#endif

T
Thomas Gleixner 已提交
25
/*
I
Ingo Molnar 已提交
26 27 28 29
 * User-space ABI bits:
 */

/*
30
 * attr.type
T
Thomas Gleixner 已提交
31
 */
P
Peter Zijlstra 已提交
32
enum perf_type_id {
I
Ingo Molnar 已提交
33 34 35 36 37
	PERF_TYPE_HARDWARE			= 0,
	PERF_TYPE_SOFTWARE			= 1,
	PERF_TYPE_TRACEPOINT			= 2,
	PERF_TYPE_HW_CACHE			= 3,
	PERF_TYPE_RAW				= 4,
38
	PERF_TYPE_BREAKPOINT			= 5,
39

I
Ingo Molnar 已提交
40
	PERF_TYPE_MAX,				/* non-ABI */
41
};
42

43
/*
44 45
 * Generalized performance event event_id types, used by the
 * attr.event_id parameter of the sys_perf_event_open()
I
Ingo Molnar 已提交
46
 * syscall:
47
 */
P
Peter Zijlstra 已提交
48
enum perf_hw_id {
I
Ingo Molnar 已提交
49
	/*
50
	 * Common hardware events, generalized by the kernel:
I
Ingo Molnar 已提交
51
	 */
52 53 54 55 56 57 58 59
	PERF_COUNT_HW_CPU_CYCLES		= 0,
	PERF_COUNT_HW_INSTRUCTIONS		= 1,
	PERF_COUNT_HW_CACHE_REFERENCES		= 2,
	PERF_COUNT_HW_CACHE_MISSES		= 3,
	PERF_COUNT_HW_BRANCH_INSTRUCTIONS	= 4,
	PERF_COUNT_HW_BRANCH_MISSES		= 5,
	PERF_COUNT_HW_BUS_CYCLES		= 6,

I
Ingo Molnar 已提交
60
	PERF_COUNT_HW_MAX,			/* non-ABI */
61
};
62

63
/*
64
 * Generalized hardware cache events:
65
 *
66
 *       { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
67 68 69
 *       { read, write, prefetch } x
 *       { accesses, misses }
 */
P
Peter Zijlstra 已提交
70
enum perf_hw_cache_id {
I
Ingo Molnar 已提交
71 72 73 74 75 76 77 78
	PERF_COUNT_HW_CACHE_L1D			= 0,
	PERF_COUNT_HW_CACHE_L1I			= 1,
	PERF_COUNT_HW_CACHE_LL			= 2,
	PERF_COUNT_HW_CACHE_DTLB		= 3,
	PERF_COUNT_HW_CACHE_ITLB		= 4,
	PERF_COUNT_HW_CACHE_BPU			= 5,

	PERF_COUNT_HW_CACHE_MAX,		/* non-ABI */
79 80
};

P
Peter Zijlstra 已提交
81
enum perf_hw_cache_op_id {
I
Ingo Molnar 已提交
82 83 84
	PERF_COUNT_HW_CACHE_OP_READ		= 0,
	PERF_COUNT_HW_CACHE_OP_WRITE		= 1,
	PERF_COUNT_HW_CACHE_OP_PREFETCH		= 2,
85

I
Ingo Molnar 已提交
86
	PERF_COUNT_HW_CACHE_OP_MAX,		/* non-ABI */
87 88
};

P
Peter Zijlstra 已提交
89 90 91
enum perf_hw_cache_op_result_id {
	PERF_COUNT_HW_CACHE_RESULT_ACCESS	= 0,
	PERF_COUNT_HW_CACHE_RESULT_MISS		= 1,
92

I
Ingo Molnar 已提交
93
	PERF_COUNT_HW_CACHE_RESULT_MAX,		/* non-ABI */
94 95
};

96
/*
97 98
 * Special "software" events provided by the kernel, even if the hardware
 * does not support performance events. These events measure various
99 100 101
 * physical and sw events of the kernel (and allow the profiling of them as
 * well):
 */
P
Peter Zijlstra 已提交
102
enum perf_sw_ids {
I
Ingo Molnar 已提交
103 104 105 106 107 108 109
	PERF_COUNT_SW_CPU_CLOCK			= 0,
	PERF_COUNT_SW_TASK_CLOCK		= 1,
	PERF_COUNT_SW_PAGE_FAULTS		= 2,
	PERF_COUNT_SW_CONTEXT_SWITCHES		= 3,
	PERF_COUNT_SW_CPU_MIGRATIONS		= 4,
	PERF_COUNT_SW_PAGE_FAULTS_MIN		= 5,
	PERF_COUNT_SW_PAGE_FAULTS_MAJ		= 6,
110 111
	PERF_COUNT_SW_ALIGNMENT_FAULTS		= 7,
	PERF_COUNT_SW_EMULATION_FAULTS		= 8,
I
Ingo Molnar 已提交
112 113

	PERF_COUNT_SW_MAX,			/* non-ABI */
T
Thomas Gleixner 已提交
114 115
};

116
/*
117
 * Bits that can be set in attr.sample_type to request information
118 119
 * in the overflow packets.
 */
120
enum perf_event_sample_format {
I
Ingo Molnar 已提交
121 122 123 124
	PERF_SAMPLE_IP				= 1U << 0,
	PERF_SAMPLE_TID				= 1U << 1,
	PERF_SAMPLE_TIME			= 1U << 2,
	PERF_SAMPLE_ADDR			= 1U << 3,
125
	PERF_SAMPLE_READ			= 1U << 4,
I
Ingo Molnar 已提交
126 127 128 129
	PERF_SAMPLE_CALLCHAIN			= 1U << 5,
	PERF_SAMPLE_ID				= 1U << 6,
	PERF_SAMPLE_CPU				= 1U << 7,
	PERF_SAMPLE_PERIOD			= 1U << 8,
130
	PERF_SAMPLE_STREAM_ID			= 1U << 9,
131
	PERF_SAMPLE_RAW				= 1U << 10,
132

133
	PERF_SAMPLE_MAX = 1U << 11,		/* non-ABI */
134 135
};

136
/*
137
 * The format of the data returned by read() on a perf event fd,
138 139 140
 * as specified by attr.read_format:
 *
 * struct read_format {
I
Ingo Molnar 已提交
141 142 143 144 145
 *	{ u64		value;
 *	  { u64		time_enabled; } && PERF_FORMAT_ENABLED
 *	  { u64		time_running; } && PERF_FORMAT_RUNNING
 *	  { u64		id;           } && PERF_FORMAT_ID
 *	} && !PERF_FORMAT_GROUP
146
 *
I
Ingo Molnar 已提交
147 148 149 150 151 152 153
 *	{ u64		nr;
 *	  { u64		time_enabled; } && PERF_FORMAT_ENABLED
 *	  { u64		time_running; } && PERF_FORMAT_RUNNING
 *	  { u64		value;
 *	    { u64	id;           } && PERF_FORMAT_ID
 *	  }		cntr[nr];
 *	} && PERF_FORMAT_GROUP
154
 * };
155
 */
156
enum perf_event_read_format {
I
Ingo Molnar 已提交
157 158 159
	PERF_FORMAT_TOTAL_TIME_ENABLED		= 1U << 0,
	PERF_FORMAT_TOTAL_TIME_RUNNING		= 1U << 1,
	PERF_FORMAT_ID				= 1U << 2,
160
	PERF_FORMAT_GROUP			= 1U << 3,
161

I
Ingo Molnar 已提交
162
	PERF_FORMAT_MAX = 1U << 4,		/* non-ABI */
163 164
};

165 166
#define PERF_ATTR_SIZE_VER0	64	/* sizeof first published struct */

I
Ingo Molnar 已提交
167
/*
168
 * Hardware event_id to monitor via a performance monitoring event:
I
Ingo Molnar 已提交
169
 */
170
struct perf_event_attr {
171

172
	/*
173 174 175
	 * Major type: hardware/software/tracepoint/etc.
	 */
	__u32			type;
176 177 178 179 180

	/*
	 * Size of the attr structure, for fwd/bwd compat.
	 */
	__u32			size;
181 182 183

	/*
	 * Type specific configuration information.
184 185
	 */
	__u64			config;
I
Ingo Molnar 已提交
186

187
	union {
188 189
		__u64		sample_period;
		__u64		sample_freq;
190 191
	};

192 193
	__u64			sample_type;
	__u64			read_format;
I
Ingo Molnar 已提交
194

195
	__u64			disabled       :  1, /* off by default        */
196 197 198 199 200 201
				inherit	       :  1, /* children inherit it   */
				pinned	       :  1, /* must always be on PMU */
				exclusive      :  1, /* only group on PMU     */
				exclude_user   :  1, /* don't count user      */
				exclude_kernel :  1, /* ditto kernel          */
				exclude_hv     :  1, /* ditto hypervisor      */
202
				exclude_idle   :  1, /* don't count when idle */
203
				mmap           :  1, /* include mmap data     */
204
				comm	       :  1, /* include comm data     */
205
				freq           :  1, /* use freq, not period  */
206
				inherit_stat   :  1, /* per task counts       */
207
				enable_on_exec :  1, /* next exec enables     */
P
Peter Zijlstra 已提交
208
				task           :  1, /* trace fork/exit       */
209
				watermark      :  1, /* wakeup_watermark      */
210

211
				__reserved_1   : 49;
212

213 214 215 216
	union {
		__u32		wakeup_events;	  /* wakeup every n events */
		__u32		wakeup_watermark; /* bytes before wakeup   */
	};
217

218 219 220 221 222 223
	struct { /* Hardware breakpoint info */
		__u64		bp_addr;
		__u32		bp_type;
		__u32		bp_len;
		__u64		__bp_reserved_1;
		__u64		__bp_reserved_2;
224 225
	};

226
	__u32			__reserved_2;
I
Ingo Molnar 已提交
227

228
	__u64			__reserved_3;
229 230
};

231
/*
232
 * Ioctls that can be done on a perf event fd:
233
 */
234
#define PERF_EVENT_IOC_ENABLE		_IO ('$', 0)
I
Ingo Molnar 已提交
235 236
#define PERF_EVENT_IOC_DISABLE		_IO ('$', 1)
#define PERF_EVENT_IOC_REFRESH		_IO ('$', 2)
237
#define PERF_EVENT_IOC_RESET		_IO ('$', 3)
238
#define PERF_EVENT_IOC_PERIOD		_IOW('$', 4, __u64)
239
#define PERF_EVENT_IOC_SET_OUTPUT	_IO ('$', 5)
L
Li Zefan 已提交
240
#define PERF_EVENT_IOC_SET_FILTER	_IOW('$', 6, char *)
241 242

enum perf_event_ioc_flags {
P
Peter Zijlstra 已提交
243 244
	PERF_IOC_FLAG_GROUP		= 1U << 0,
};
245

246 247 248
/*
 * Structure of the page that can be mapped via mmap
 */
249
struct perf_event_mmap_page {
250 251
	__u32	version;		/* version number of this structure */
	__u32	compat_version;		/* lowest version this is compat with */
252 253

	/*
254
	 * Bits needed to read the hw events in user-space.
255
	 *
256 257
	 *   u32 seq;
	 *   s64 count;
258
	 *
259 260
	 *   do {
	 *     seq = pc->lock;
261
	 *
262 263 264 265 266 267
	 *     barrier()
	 *     if (pc->index) {
	 *       count = pmc_read(pc->index - 1);
	 *       count += pc->offset;
	 *     } else
	 *       goto regular_read;
268
	 *
269 270
	 *     barrier();
	 *   } while (pc->lock != seq);
271
	 *
272 273
	 * NOTE: for obvious reason this only works on self-monitoring
	 *       processes.
274
	 */
275
	__u32	lock;			/* seqlock for synchronization */
276 277 278 279
	__u32	index;			/* hardware event identifier */
	__s64	offset;			/* add to hardware event value */
	__u64	time_enabled;		/* time event active */
	__u64	time_running;		/* time event on cpu */
280

281 282 283 284
		/*
		 * Hole for extension of the self monitor capabilities
		 */

285
	__u64	__reserved[123];	/* align to 1k */
286

287 288 289
	/*
	 * Control data for the mmap() data buffer.
	 *
290 291
	 * User-space reading the @data_head value should issue an rmb(), on
	 * SMP capable platforms, after reading this value -- see
292
	 * perf_event_wakeup().
293 294 295 296
	 *
	 * When the mapping is PROT_WRITE the @data_tail value should be
	 * written by userspace to reflect the last read data. In this case
	 * the kernel will not over-write unread data.
297
	 */
298
	__u64   data_head;		/* head in the data section */
299
	__u64	data_tail;		/* user-space written tail */
300 301
};

302 303 304 305 306
#define PERF_RECORD_MISC_CPUMODE_MASK		(3 << 0)
#define PERF_RECORD_MISC_CPUMODE_UNKNOWN		(0 << 0)
#define PERF_RECORD_MISC_KERNEL			(1 << 0)
#define PERF_RECORD_MISC_USER			(2 << 0)
#define PERF_RECORD_MISC_HYPERVISOR		(3 << 0)
307

P
Peter Zijlstra 已提交
308 309
struct perf_event_header {
	__u32	type;
310 311
	__u16	misc;
	__u16	size;
P
Peter Zijlstra 已提交
312 313 314
};

enum perf_event_type {
315

316 317 318 319 320
	/*
	 * The MMAP events record the PROT_EXEC mappings so that we can
	 * correlate userspace IPs to code. They have the following structure:
	 *
	 * struct {
I
Ingo Molnar 已提交
321
	 *	struct perf_event_header	header;
322
	 *
I
Ingo Molnar 已提交
323 324 325 326 327
	 *	u32				pid, tid;
	 *	u64				addr;
	 *	u64				len;
	 *	u64				pgoff;
	 *	char				filename[];
328 329
	 * };
	 */
330
	PERF_RECORD_MMAP			= 1,
331

332 333
	/*
	 * struct {
I
Ingo Molnar 已提交
334 335 336
	 *	struct perf_event_header	header;
	 *	u64				id;
	 *	u64				lost;
337 338
	 * };
	 */
339
	PERF_RECORD_LOST			= 2,
340

341 342
	/*
	 * struct {
I
Ingo Molnar 已提交
343
	 *	struct perf_event_header	header;
344
	 *
I
Ingo Molnar 已提交
345 346
	 *	u32				pid, tid;
	 *	char				comm[];
347 348
	 * };
	 */
349
	PERF_RECORD_COMM			= 3,
350

P
Peter Zijlstra 已提交
351 352 353 354 355
	/*
	 * struct {
	 *	struct perf_event_header	header;
	 *	u32				pid, ppid;
	 *	u32				tid, ptid;
356
	 *	u64				time;
P
Peter Zijlstra 已提交
357 358
	 * };
	 */
359
	PERF_RECORD_EXIT			= 4,
P
Peter Zijlstra 已提交
360

361 362
	/*
	 * struct {
I
Ingo Molnar 已提交
363 364
	 *	struct perf_event_header	header;
	 *	u64				time;
365
	 *	u64				id;
366
	 *	u64				stream_id;
367 368
	 * };
	 */
369 370
	PERF_RECORD_THROTTLE		= 5,
	PERF_RECORD_UNTHROTTLE		= 6,
371

P
Peter Zijlstra 已提交
372 373
	/*
	 * struct {
374 375
	 *	struct perf_event_header	header;
	 *	u32				pid, ppid;
P
Peter Zijlstra 已提交
376
	 *	u32				tid, ptid;
377
	 *	u64				time;
P
Peter Zijlstra 已提交
378 379
	 * };
	 */
380
	PERF_RECORD_FORK			= 7,
P
Peter Zijlstra 已提交
381

382 383 384 385
	/*
	 * struct {
	 * 	struct perf_event_header	header;
	 * 	u32				pid, tid;
386 387
	 *
	 * 	struct read_format		values;
388 389
	 * };
	 */
390
	PERF_RECORD_READ			= 8,
391

392
	/*
393
	 * struct {
I
Ingo Molnar 已提交
394
	 *	struct perf_event_header	header;
395
	 *
396 397 398 399
	 *	{ u64			ip;	  } && PERF_SAMPLE_IP
	 *	{ u32			pid, tid; } && PERF_SAMPLE_TID
	 *	{ u64			time;     } && PERF_SAMPLE_TIME
	 *	{ u64			addr;     } && PERF_SAMPLE_ADDR
400
	 *	{ u64			id;	  } && PERF_SAMPLE_ID
401
	 *	{ u64			stream_id;} && PERF_SAMPLE_STREAM_ID
402
	 *	{ u32			cpu, res; } && PERF_SAMPLE_CPU
I
Ingo Molnar 已提交
403
	 *	{ u64			period;   } && PERF_SAMPLE_PERIOD
404
	 *
405
	 *	{ struct read_format	values;	  } && PERF_SAMPLE_READ
406
	 *
407
	 *	{ u64			nr,
408
	 *	  u64			ips[nr];  } && PERF_SAMPLE_CALLCHAIN
409
	 *
I
Ingo Molnar 已提交
410 411 412 413 414 415 416 417 418 419
	 *	#
	 *	# The RAW record below is opaque data wrt the ABI
	 *	#
	 *	# That is, the ABI doesn't make any promises wrt to
	 *	# the stability of its content, it may vary depending
	 *	# on event, hardware, kernel version and phase of
	 *	# the moon.
	 *	#
	 *	# In other words, PERF_SAMPLE_RAW contents are not an ABI.
	 *	#
420
	 *
421 422
	 *	{ u32			size;
	 *	  char                  data[size];}&& PERF_SAMPLE_RAW
423
	 * };
424
	 */
425
	PERF_RECORD_SAMPLE		= 9,
426

427
	PERF_RECORD_MAX,			/* non-ABI */
P
Peter Zijlstra 已提交
428 429
};

430 431 432 433
enum perf_callchain_context {
	PERF_CONTEXT_HV			= (__u64)-32,
	PERF_CONTEXT_KERNEL		= (__u64)-128,
	PERF_CONTEXT_USER		= (__u64)-512,
434

435 436 437 438 439
	PERF_CONTEXT_GUEST		= (__u64)-2048,
	PERF_CONTEXT_GUEST_KERNEL	= (__u64)-2176,
	PERF_CONTEXT_GUEST_USER		= (__u64)-2560,

	PERF_CONTEXT_MAX		= (__u64)-4095,
440 441
};

442 443 444
#define PERF_FLAG_FD_NO_GROUP	(1U << 0)
#define PERF_FLAG_FD_OUTPUT	(1U << 1)

445
#ifdef __KERNEL__
I
Ingo Molnar 已提交
446
/*
447
 * Kernel-internal data types and definitions:
I
Ingo Molnar 已提交
448 449
 */

450 451
#ifdef CONFIG_PERF_EVENTS
# include <asm/perf_event.h>
452 453 454 455 456 457 458
#endif

#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/spinlock.h>
459
#include <linux/hrtimer.h>
P
Peter Zijlstra 已提交
460
#include <linux/fs.h>
461
#include <linux/pid_namespace.h>
462
#include <linux/workqueue.h>
463 464
#include <asm/atomic.h>

465 466 467 468 469 470 471
#define PERF_MAX_STACK_DEPTH		255

struct perf_callchain_entry {
	__u64				nr;
	__u64				ip[PERF_MAX_STACK_DEPTH];
};

472 473 474
struct perf_raw_record {
	u32				size;
	void				*data;
475 476
};

477 478
struct task_struct;

T
Thomas Gleixner 已提交
479
/**
480
 * struct hw_perf_event - performance event hardware details:
T
Thomas Gleixner 已提交
481
 */
482 483
struct hw_perf_event {
#ifdef CONFIG_PERF_EVENTS
484 485
	union {
		struct { /* hardware */
I
Ingo Molnar 已提交
486 487
			u64		config;
			unsigned long	config_base;
488
			unsigned long	event_base;
I
Ingo Molnar 已提交
489
			int		idx;
490
		};
491 492
		struct { /* software */
			s64		remaining;
I
Ingo Molnar 已提交
493
			struct hrtimer	hrtimer;
494
		};
495 496 497 498 499
#ifdef CONFIG_HAVE_HW_BREAKPOINT
		union { /* breakpoint */
			struct arch_hw_breakpoint	info;
		};
#endif
500
	};
501
	atomic64_t			prev_count;
502
	u64				sample_period;
503
	u64				last_period;
504
	atomic64_t			period_left;
505
	u64				interrupts;
506 507 508

	u64				freq_count;
	u64				freq_interrupts;
509
	u64				freq_stamp;
510
#endif
T
Thomas Gleixner 已提交
511 512
};

513
struct perf_event;
I
Ingo Molnar 已提交
514 515

/**
516
 * struct pmu - generic performance monitoring unit
I
Ingo Molnar 已提交
517
 */
518
struct pmu {
519 520 521 522
	int (*enable)			(struct perf_event *event);
	void (*disable)			(struct perf_event *event);
	void (*read)			(struct perf_event *event);
	void (*unthrottle)		(struct perf_event *event);
I
Ingo Molnar 已提交
523 524
};

525
/**
526
 * enum perf_event_active_state - the states of a event
527
 */
528
enum perf_event_active_state {
I
Ingo Molnar 已提交
529
	PERF_EVENT_STATE_ERROR		= -2,
530 531
	PERF_EVENT_STATE_OFF		= -1,
	PERF_EVENT_STATE_INACTIVE	=  0,
I
Ingo Molnar 已提交
532
	PERF_EVENT_STATE_ACTIVE		=  1,
533 534
};

535 536
struct file;

537 538
struct perf_mmap_data {
	struct rcu_head			rcu_head;
539 540 541 542
#ifdef CONFIG_PERF_USE_VMALLOC
	struct work_struct		work;
#endif
	int				data_order;
P
Peter Zijlstra 已提交
543
	int				nr_pages;	/* nr of data pages  */
544
	int				writable;	/* are we writable   */
545
	int				nr_locked;	/* nr pages mlocked  */
P
Peter Zijlstra 已提交
546

547
	atomic_t			poll;		/* POLL_ for wakeups */
548
	atomic_t			events;		/* event_id limit       */
P
Peter Zijlstra 已提交
549

550 551 552
	atomic_long_t			head;		/* write position    */
	atomic_long_t			done_head;	/* completed head    */

553
	atomic_t			lock;		/* concurrent writes */
554
	atomic_t			wakeup;		/* needs a wakeup    */
555
	atomic_t			lost;		/* nr records lost   */
556

557 558
	long				watermark;	/* wakeup watermark  */

I
Ingo Molnar 已提交
559
	struct perf_event_mmap_page	*user_page;
I
Ingo Molnar 已提交
560
	void				*data_pages[0];
561 562
};

563 564 565
struct perf_pending_entry {
	struct perf_pending_entry *next;
	void (*func)(struct perf_pending_entry *);
566 567
};

568 569
typedef void (*perf_callback_t)(struct perf_event *, void *);

570 571
struct perf_sample_data;

T
Thomas Gleixner 已提交
572
/**
573
 * struct perf_event - performance event kernel representation:
T
Thomas Gleixner 已提交
574
 */
575 576
struct perf_event {
#ifdef CONFIG_PERF_EVENTS
577
	struct list_head		group_entry;
P
Peter Zijlstra 已提交
578
	struct list_head		event_entry;
579
	struct list_head		sibling_list;
I
Ingo Molnar 已提交
580
	int				nr_siblings;
581 582
	struct perf_event		*group_leader;
	struct perf_event		*output;
583
	const struct pmu		*pmu;
584

585
	enum perf_event_active_state	state;
T
Thomas Gleixner 已提交
586
	atomic64_t			count;
587

588
	/*
589
	 * These are the total time in nanoseconds that the event
590
	 * has been enabled (i.e. eligible to run, and the task has
591
	 * been scheduled in, if this is a per-task event)
592 593 594
	 * and running (scheduled onto the CPU), respectively.
	 *
	 * They are computed from tstamp_enabled, tstamp_running and
595
	 * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
596 597 598 599 600 601
	 */
	u64				total_time_enabled;
	u64				total_time_running;

	/*
	 * These are timestamps used for computing total_time_enabled
602
	 * and total_time_running when the event is in INACTIVE or
603 604
	 * ACTIVE state, measured in nanoseconds from an arbitrary point
	 * in time.
605 606
	 * tstamp_enabled: the notional time when the event was enabled
	 * tstamp_running: the notional time when the event was scheduled on
607
	 * tstamp_stopped: in INACTIVE state, the notional time when the
608
	 *	event was scheduled off.
609 610 611 612 613
	 */
	u64				tstamp_enabled;
	u64				tstamp_running;
	u64				tstamp_stopped;

614
	struct perf_event_attr		attr;
615
	struct hw_perf_event		hw;
T
Thomas Gleixner 已提交
616

617
	struct perf_event_context	*ctx;
618
	struct file			*filp;
T
Thomas Gleixner 已提交
619

620 621
	/*
	 * These accumulate total time (in nanoseconds) that children
622
	 * events have been enabled and running, respectively.
623 624 625 626
	 */
	atomic64_t			child_total_time_enabled;
	atomic64_t			child_total_time_running;

T
Thomas Gleixner 已提交
627
	/*
628
	 * Protect attach/detach and child_list:
T
Thomas Gleixner 已提交
629
	 */
630 631
	struct mutex			child_mutex;
	struct list_head		child_list;
632
	struct perf_event		*parent;
T
Thomas Gleixner 已提交
633 634 635 636

	int				oncpu;
	int				cpu;

637 638 639
	struct list_head		owner_entry;
	struct task_struct		*owner;

640 641 642 643
	/* mmap bits */
	struct mutex			mmap_mutex;
	atomic_t			mmap_count;
	struct perf_mmap_data		*data;
644

645
	/* poll related */
T
Thomas Gleixner 已提交
646
	wait_queue_head_t		waitq;
P
Peter Zijlstra 已提交
647
	struct fasync_struct		*fasync;
648 649 650

	/* delayed work for NMIs and such */
	int				pending_wakeup;
651
	int				pending_kill;
652
	int				pending_disable;
653
	struct perf_pending_entry	pending;
P
Peter Zijlstra 已提交
654

655 656
	atomic_t			event_limit;

657
	void (*destroy)(struct perf_event *);
P
Peter Zijlstra 已提交
658
	struct rcu_head			rcu_head;
659 660

	struct pid_namespace		*ns;
661
	u64				id;
L
Li Zefan 已提交
662

663 664 665 666
	void (*overflow_handler)(struct perf_event *event,
			int nmi, struct perf_sample_data *data,
			struct pt_regs *regs);

L
Li Zefan 已提交
667 668
#ifdef CONFIG_EVENT_PROFILE
	struct event_filter		*filter;
669
#endif
L
Li Zefan 已提交
670

671 672
	perf_callback_t			callback;

L
Li Zefan 已提交
673
#endif /* CONFIG_PERF_EVENTS */
T
Thomas Gleixner 已提交
674 675 676
};

/**
677
 * struct perf_event_context - event context structure
T
Thomas Gleixner 已提交
678
 *
679
 * Used as a container for task events and CPU events as well:
T
Thomas Gleixner 已提交
680
 */
681
struct perf_event_context {
T
Thomas Gleixner 已提交
682
	/*
683
	 * Protect the states of the events in the list,
684
	 * nr_active, and the list:
T
Thomas Gleixner 已提交
685
	 */
I
Ingo Molnar 已提交
686
	spinlock_t			lock;
687
	/*
688
	 * Protect the list of events.  Locking either mutex or lock
689 690 691
	 * is sufficient to ensure the list doesn't change; to change
	 * the list you need to lock both the mutex and the spinlock.
	 */
I
Ingo Molnar 已提交
692
	struct mutex			mutex;
693

694
	struct list_head		group_list;
I
Ingo Molnar 已提交
695
	struct list_head		event_list;
696
	int				nr_events;
I
Ingo Molnar 已提交
697 698
	int				nr_active;
	int				is_active;
699
	int				nr_stat;
I
Ingo Molnar 已提交
700 701
	atomic_t			refcount;
	struct task_struct		*task;
702 703

	/*
704
	 * Context clock, runs when context enabled.
705
	 */
I
Ingo Molnar 已提交
706 707
	u64				time;
	u64				timestamp;
708 709 710 711 712

	/*
	 * These fields let us detect when two contexts have both
	 * been cloned (inherited) from a common ancestor.
	 */
713
	struct perf_event_context	*parent_ctx;
I
Ingo Molnar 已提交
714 715 716 717
	u64				parent_gen;
	u64				generation;
	int				pin_count;
	struct rcu_head			rcu_head;
T
Thomas Gleixner 已提交
718 719 720
};

/**
721
 * struct perf_event_cpu_context - per cpu event context structure
T
Thomas Gleixner 已提交
722 723
 */
struct perf_cpu_context {
724 725
	struct perf_event_context	ctx;
	struct perf_event_context	*task_ctx;
T
Thomas Gleixner 已提交
726 727
	int				active_oncpu;
	int				max_pertask;
728
	int				exclusive;
P
Peter Zijlstra 已提交
729 730 731 732 733 734

	/*
	 * Recursion avoidance:
	 *
	 * task, softirq, irq, nmi context
	 */
735
	int				recursion[4];
T
Thomas Gleixner 已提交
736 737
};

738
struct perf_output_handle {
I
Ingo Molnar 已提交
739 740 741 742 743 744 745
	struct perf_event		*event;
	struct perf_mmap_data		*data;
	unsigned long			head;
	unsigned long			offset;
	int				nmi;
	int				sample;
	int				locked;
746 747
};

748
#ifdef CONFIG_PERF_EVENTS
749

T
Thomas Gleixner 已提交
750 751 752
/*
 * Set by architecture code:
 */
753
extern int perf_max_events;
T
Thomas Gleixner 已提交
754

755
extern const struct pmu *hw_perf_event_init(struct perf_event *event);
I
Ingo Molnar 已提交
756

757 758
extern void perf_event_task_sched_in(struct task_struct *task, int cpu);
extern void perf_event_task_sched_out(struct task_struct *task,
759
					struct task_struct *next, int cpu);
760 761 762 763 764 765 766
extern void perf_event_task_tick(struct task_struct *task, int cpu);
extern int perf_event_init_task(struct task_struct *child);
extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task);
extern void set_perf_event_pending(void);
extern void perf_event_do_pending(void);
extern void perf_event_print_debug(void);
767 768 769 770
extern void __perf_disable(void);
extern bool __perf_enable(void);
extern void perf_disable(void);
extern void perf_enable(void);
771 772 773
extern int perf_event_task_disable(void);
extern int perf_event_task_enable(void);
extern int hw_perf_group_sched_in(struct perf_event *group_leader,
774
	       struct perf_cpu_context *cpuctx,
775 776
	       struct perf_event_context *ctx, int cpu);
extern void perf_event_update_userpage(struct perf_event *event);
777 778 779 780
extern int perf_event_release_kernel(struct perf_event *event);
extern struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr,
				int cpu,
781 782
				pid_t pid,
				perf_callback_t callback);
783 784
extern u64 perf_event_read_value(struct perf_event *event,
				 u64 *enabled, u64 *running);
785

786
struct perf_sample_data {
787 788 789 790 791 792 793 794
	u64				type;

	u64				ip;
	struct {
		u32	pid;
		u32	tid;
	}				tid_entry;
	u64				time;
I
Ingo Molnar 已提交
795
	u64				addr;
796 797 798 799 800 801
	u64				id;
	u64				stream_id;
	struct {
		u32	cpu;
		u32	reserved;
	}				cpu_entry;
I
Ingo Molnar 已提交
802
	u64				period;
803
	struct perf_callchain_entry	*callchain;
804
	struct perf_raw_record		*raw;
805 806
};

807 808 809
extern void perf_output_sample(struct perf_output_handle *handle,
			       struct perf_event_header *header,
			       struct perf_sample_data *data,
810
			       struct perf_event *event);
811 812
extern void perf_prepare_sample(struct perf_event_header *header,
				struct perf_sample_data *data,
813
				struct perf_event *event,
814 815
				struct pt_regs *regs);

816
extern int perf_event_overflow(struct perf_event *event, int nmi,
817 818
				 struct perf_sample_data *data,
				 struct pt_regs *regs);
819

820
/*
821
 * Return 1 for a software event, 0 for a hardware event
822
 */
823
static inline int is_software_event(struct perf_event *event)
824
{
825 826 827
	return (event->attr.type != PERF_TYPE_RAW) &&
		(event->attr.type != PERF_TYPE_HARDWARE) &&
		(event->attr.type != PERF_TYPE_HW_CACHE);
828 829
}

830
extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
831

832
extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
833 834

static inline void
835
perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
836
{
837 838
	if (atomic_read(&perf_swevent_enabled[event_id]))
		__perf_sw_event(event_id, nr, nmi, regs, addr);
839
}
840

841
extern void __perf_event_mmap(struct vm_area_struct *vma);
842

843
static inline void perf_event_mmap(struct vm_area_struct *vma)
844 845
{
	if (vma->vm_flags & VM_EXEC)
846
		__perf_event_mmap(vma);
847
}
848

849 850
extern void perf_event_comm(struct task_struct *tsk);
extern void perf_event_fork(struct task_struct *tsk);
851

852 853
extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);

854 855 856
extern int sysctl_perf_event_paranoid;
extern int sysctl_perf_event_mlock;
extern int sysctl_perf_event_sample_rate;
857

858 859
extern void perf_event_init(void);
extern void perf_tp_event(int event_id, u64 addr, u64 count,
860
				 void *record, int entry_size);
861
extern void perf_bp_event(struct perf_event *event, void *data);
862

863
#ifndef perf_misc_flags
864 865
#define perf_misc_flags(regs)	(user_mode(regs) ? PERF_RECORD_MISC_USER : \
				 PERF_RECORD_MISC_KERNEL)
866 867 868
#define perf_instruction_pointer(regs)	instruction_pointer(regs)
#endif

869
extern int perf_output_begin(struct perf_output_handle *handle,
870
			     struct perf_event *event, unsigned int size,
871 872 873 874
			     int nmi, int sample);
extern void perf_output_end(struct perf_output_handle *handle);
extern void perf_output_copy(struct perf_output_handle *handle,
			     const void *buf, unsigned int len);
875 876
extern int perf_swevent_get_recursion_context(void);
extern void perf_swevent_put_recursion_context(int rctx);
T
Thomas Gleixner 已提交
877 878
#else
static inline void
879
perf_event_task_sched_in(struct task_struct *task, int cpu)		{ }
T
Thomas Gleixner 已提交
880
static inline void
881
perf_event_task_sched_out(struct task_struct *task,
882
			    struct task_struct *next, int cpu)		{ }
T
Thomas Gleixner 已提交
883
static inline void
I
Ingo Molnar 已提交
884
perf_event_task_tick(struct task_struct *task, int cpu)			{ }
885 886 887
static inline int perf_event_init_task(struct task_struct *child)	{ return 0; }
static inline void perf_event_exit_task(struct task_struct *child)	{ }
static inline void perf_event_free_task(struct task_struct *task)	{ }
I
Ingo Molnar 已提交
888 889
static inline void perf_event_do_pending(void)				{ }
static inline void perf_event_print_debug(void)				{ }
890 891
static inline void perf_disable(void)					{ }
static inline void perf_enable(void)					{ }
I
Ingo Molnar 已提交
892 893
static inline int perf_event_task_disable(void)				{ return -EINVAL; }
static inline int perf_event_task_enable(void)				{ return -EINVAL; }
894

895
static inline void
896
perf_sw_event(u32 event_id, u64 nr, int nmi,
897
		     struct pt_regs *regs, u64 addr)			{ }
898 899
static inline void
perf_bp_event(struct perf_event *event, void *data)		{ }
900

I
Ingo Molnar 已提交
901
static inline void perf_event_mmap(struct vm_area_struct *vma)		{ }
902 903 904
static inline void perf_event_comm(struct task_struct *tsk)		{ }
static inline void perf_event_fork(struct task_struct *tsk)		{ }
static inline void perf_event_init(void)				{ }
905 906
static inline int  perf_swevent_get_recursion_context(void)  { return -1; }
static inline void perf_swevent_put_recursion_context(int rctx)		{ }
907

T
Thomas Gleixner 已提交
908 909
#endif

910 911 912
#define perf_output_put(handle, x) \
	perf_output_copy((handle), &(x), sizeof(x))

913
#endif /* __KERNEL__ */
914
#endif /* _LINUX_PERF_EVENT_H */