perf_event.h 27.7 KB
Newer Older
T
Thomas Gleixner 已提交
1
/*
I
Ingo Molnar 已提交
2
 * Performance events:
T
Thomas Gleixner 已提交
3
 *
I
Ingo Molnar 已提交
4 5 6
 *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
 *    Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
 *    Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
T
Thomas Gleixner 已提交
7
 *
I
Ingo Molnar 已提交
8
 * Data type definitions, declarations, prototypes.
T
Thomas Gleixner 已提交
9
 *
I
Ingo Molnar 已提交
10
 *    Started by: Thomas Gleixner and Ingo Molnar
T
Thomas Gleixner 已提交
11
 *
I
Ingo Molnar 已提交
12
 * For licencing details see kernel-base/COPYING
T
Thomas Gleixner 已提交
13
 */
14 15
#ifndef _LINUX_PERF_EVENT_H
#define _LINUX_PERF_EVENT_H
T
Thomas Gleixner 已提交
16

17 18
#include <linux/types.h>
#include <linux/ioctl.h>
19
#include <asm/byteorder.h>
T
Thomas Gleixner 已提交
20 21

/*
I
Ingo Molnar 已提交
22 23 24 25
 * User-space ABI bits:
 */

/*
26
 * attr.type
T
Thomas Gleixner 已提交
27
 */
P
Peter Zijlstra 已提交
28
enum perf_type_id {
I
Ingo Molnar 已提交
29 30 31 32 33
	PERF_TYPE_HARDWARE			= 0,
	PERF_TYPE_SOFTWARE			= 1,
	PERF_TYPE_TRACEPOINT			= 2,
	PERF_TYPE_HW_CACHE			= 3,
	PERF_TYPE_RAW				= 4,
34
	PERF_TYPE_BREAKPOINT			= 5,
35

I
Ingo Molnar 已提交
36
	PERF_TYPE_MAX,				/* non-ABI */
37
};
38

39
/*
40 41
 * Generalized performance event event_id types, used by the
 * attr.event_id parameter of the sys_perf_event_open()
I
Ingo Molnar 已提交
42
 * syscall:
43
 */
P
Peter Zijlstra 已提交
44
enum perf_hw_id {
I
Ingo Molnar 已提交
45
	/*
46
	 * Common hardware events, generalized by the kernel:
I
Ingo Molnar 已提交
47
	 */
48 49 50 51 52 53 54 55
	PERF_COUNT_HW_CPU_CYCLES		= 0,
	PERF_COUNT_HW_INSTRUCTIONS		= 1,
	PERF_COUNT_HW_CACHE_REFERENCES		= 2,
	PERF_COUNT_HW_CACHE_MISSES		= 3,
	PERF_COUNT_HW_BRANCH_INSTRUCTIONS	= 4,
	PERF_COUNT_HW_BRANCH_MISSES		= 5,
	PERF_COUNT_HW_BUS_CYCLES		= 6,

I
Ingo Molnar 已提交
56
	PERF_COUNT_HW_MAX,			/* non-ABI */
57
};
58

59
/*
60
 * Generalized hardware cache events:
61
 *
62
 *       { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
63 64 65
 *       { read, write, prefetch } x
 *       { accesses, misses }
 */
P
Peter Zijlstra 已提交
66
enum perf_hw_cache_id {
I
Ingo Molnar 已提交
67 68 69 70 71 72 73 74
	PERF_COUNT_HW_CACHE_L1D			= 0,
	PERF_COUNT_HW_CACHE_L1I			= 1,
	PERF_COUNT_HW_CACHE_LL			= 2,
	PERF_COUNT_HW_CACHE_DTLB		= 3,
	PERF_COUNT_HW_CACHE_ITLB		= 4,
	PERF_COUNT_HW_CACHE_BPU			= 5,

	PERF_COUNT_HW_CACHE_MAX,		/* non-ABI */
75 76
};

P
Peter Zijlstra 已提交
77
enum perf_hw_cache_op_id {
I
Ingo Molnar 已提交
78 79 80
	PERF_COUNT_HW_CACHE_OP_READ		= 0,
	PERF_COUNT_HW_CACHE_OP_WRITE		= 1,
	PERF_COUNT_HW_CACHE_OP_PREFETCH		= 2,
81

I
Ingo Molnar 已提交
82
	PERF_COUNT_HW_CACHE_OP_MAX,		/* non-ABI */
83 84
};

P
Peter Zijlstra 已提交
85 86 87
enum perf_hw_cache_op_result_id {
	PERF_COUNT_HW_CACHE_RESULT_ACCESS	= 0,
	PERF_COUNT_HW_CACHE_RESULT_MISS		= 1,
88

I
Ingo Molnar 已提交
89
	PERF_COUNT_HW_CACHE_RESULT_MAX,		/* non-ABI */
90 91
};

92
/*
93 94
 * Special "software" events provided by the kernel, even if the hardware
 * does not support performance events. These events measure various
95 96 97
 * physical and sw events of the kernel (and allow the profiling of them as
 * well):
 */
P
Peter Zijlstra 已提交
98
enum perf_sw_ids {
I
Ingo Molnar 已提交
99 100 101 102 103 104 105
	PERF_COUNT_SW_CPU_CLOCK			= 0,
	PERF_COUNT_SW_TASK_CLOCK		= 1,
	PERF_COUNT_SW_PAGE_FAULTS		= 2,
	PERF_COUNT_SW_CONTEXT_SWITCHES		= 3,
	PERF_COUNT_SW_CPU_MIGRATIONS		= 4,
	PERF_COUNT_SW_PAGE_FAULTS_MIN		= 5,
	PERF_COUNT_SW_PAGE_FAULTS_MAJ		= 6,
106 107
	PERF_COUNT_SW_ALIGNMENT_FAULTS		= 7,
	PERF_COUNT_SW_EMULATION_FAULTS		= 8,
I
Ingo Molnar 已提交
108 109

	PERF_COUNT_SW_MAX,			/* non-ABI */
T
Thomas Gleixner 已提交
110 111
};

112
/*
113
 * Bits that can be set in attr.sample_type to request information
114 115
 * in the overflow packets.
 */
116
enum perf_event_sample_format {
I
Ingo Molnar 已提交
117 118 119 120
	PERF_SAMPLE_IP				= 1U << 0,
	PERF_SAMPLE_TID				= 1U << 1,
	PERF_SAMPLE_TIME			= 1U << 2,
	PERF_SAMPLE_ADDR			= 1U << 3,
121
	PERF_SAMPLE_READ			= 1U << 4,
I
Ingo Molnar 已提交
122 123 124 125
	PERF_SAMPLE_CALLCHAIN			= 1U << 5,
	PERF_SAMPLE_ID				= 1U << 6,
	PERF_SAMPLE_CPU				= 1U << 7,
	PERF_SAMPLE_PERIOD			= 1U << 8,
126
	PERF_SAMPLE_STREAM_ID			= 1U << 9,
127
	PERF_SAMPLE_RAW				= 1U << 10,
128

129
	PERF_SAMPLE_MAX = 1U << 11,		/* non-ABI */
130 131
};

132
/*
133
 * The format of the data returned by read() on a perf event fd,
134 135 136
 * as specified by attr.read_format:
 *
 * struct read_format {
I
Ingo Molnar 已提交
137 138 139 140 141
 *	{ u64		value;
 *	  { u64		time_enabled; } && PERF_FORMAT_ENABLED
 *	  { u64		time_running; } && PERF_FORMAT_RUNNING
 *	  { u64		id;           } && PERF_FORMAT_ID
 *	} && !PERF_FORMAT_GROUP
142
 *
I
Ingo Molnar 已提交
143 144 145 146 147 148 149
 *	{ u64		nr;
 *	  { u64		time_enabled; } && PERF_FORMAT_ENABLED
 *	  { u64		time_running; } && PERF_FORMAT_RUNNING
 *	  { u64		value;
 *	    { u64	id;           } && PERF_FORMAT_ID
 *	  }		cntr[nr];
 *	} && PERF_FORMAT_GROUP
150
 * };
151
 */
152
enum perf_event_read_format {
I
Ingo Molnar 已提交
153 154 155
	PERF_FORMAT_TOTAL_TIME_ENABLED		= 1U << 0,
	PERF_FORMAT_TOTAL_TIME_RUNNING		= 1U << 1,
	PERF_FORMAT_ID				= 1U << 2,
156
	PERF_FORMAT_GROUP			= 1U << 3,
157

I
Ingo Molnar 已提交
158
	PERF_FORMAT_MAX = 1U << 4,		/* non-ABI */
159 160
};

161 162
#define PERF_ATTR_SIZE_VER0	64	/* sizeof first published struct */

I
Ingo Molnar 已提交
163
/*
164
 * Hardware event_id to monitor via a performance monitoring event:
I
Ingo Molnar 已提交
165
 */
166
struct perf_event_attr {
167

168
	/*
169 170 171
	 * Major type: hardware/software/tracepoint/etc.
	 */
	__u32			type;
172 173 174 175 176

	/*
	 * Size of the attr structure, for fwd/bwd compat.
	 */
	__u32			size;
177 178 179

	/*
	 * Type specific configuration information.
180 181
	 */
	__u64			config;
I
Ingo Molnar 已提交
182

183
	union {
184 185
		__u64		sample_period;
		__u64		sample_freq;
186 187
	};

188 189
	__u64			sample_type;
	__u64			read_format;
I
Ingo Molnar 已提交
190

191
	__u64			disabled       :  1, /* off by default        */
192 193 194 195 196 197
				inherit	       :  1, /* children inherit it   */
				pinned	       :  1, /* must always be on PMU */
				exclusive      :  1, /* only group on PMU     */
				exclude_user   :  1, /* don't count user      */
				exclude_kernel :  1, /* ditto kernel          */
				exclude_hv     :  1, /* ditto hypervisor      */
198
				exclude_idle   :  1, /* don't count when idle */
199
				mmap           :  1, /* include mmap data     */
200
				comm	       :  1, /* include comm data     */
201
				freq           :  1, /* use freq, not period  */
202
				inherit_stat   :  1, /* per task counts       */
203
				enable_on_exec :  1, /* next exec enables     */
P
Peter Zijlstra 已提交
204
				task           :  1, /* trace fork/exit       */
205
				watermark      :  1, /* wakeup_watermark      */
P
Peter Zijlstra 已提交
206 207 208 209 210 211 212 213 214 215 216
				/*
				 * precise_ip:
				 *
				 *  0 - SAMPLE_IP can have arbitrary skid
				 *  1 - SAMPLE_IP must have constant skid
				 *  2 - SAMPLE_IP requested to have 0 skid
				 *  3 - SAMPLE_IP must have 0 skid
				 *
				 *  See also PERF_RECORD_MISC_EXACT_IP
				 */
				precise_ip     :  2, /* skid constraint       */
217
				mmap_data      :  1, /* non-exec mmap data    */
P
Peter Zijlstra 已提交
218

219
				__reserved_1   : 46;
220

221 222 223 224
	union {
		__u32		wakeup_events;	  /* wakeup every n events */
		__u32		wakeup_watermark; /* bytes before wakeup   */
	};
225

226
	__u32			bp_type;
227 228
	__u64			bp_addr;
	__u64			bp_len;
229 230
};

231
/*
232
 * Ioctls that can be done on a perf event fd:
233
 */
234
#define PERF_EVENT_IOC_ENABLE		_IO ('$', 0)
I
Ingo Molnar 已提交
235 236
#define PERF_EVENT_IOC_DISABLE		_IO ('$', 1)
#define PERF_EVENT_IOC_REFRESH		_IO ('$', 2)
237
#define PERF_EVENT_IOC_RESET		_IO ('$', 3)
238
#define PERF_EVENT_IOC_PERIOD		_IOW('$', 4, __u64)
239
#define PERF_EVENT_IOC_SET_OUTPUT	_IO ('$', 5)
L
Li Zefan 已提交
240
#define PERF_EVENT_IOC_SET_FILTER	_IOW('$', 6, char *)
241 242

enum perf_event_ioc_flags {
P
Peter Zijlstra 已提交
243 244
	PERF_IOC_FLAG_GROUP		= 1U << 0,
};
245

246 247 248
/*
 * Structure of the page that can be mapped via mmap
 */
249
struct perf_event_mmap_page {
250 251
	__u32	version;		/* version number of this structure */
	__u32	compat_version;		/* lowest version this is compat with */
252 253

	/*
254
	 * Bits needed to read the hw events in user-space.
255
	 *
256 257
	 *   u32 seq;
	 *   s64 count;
258
	 *
259 260
	 *   do {
	 *     seq = pc->lock;
261
	 *
262 263 264 265 266 267
	 *     barrier()
	 *     if (pc->index) {
	 *       count = pmc_read(pc->index - 1);
	 *       count += pc->offset;
	 *     } else
	 *       goto regular_read;
268
	 *
269 270
	 *     barrier();
	 *   } while (pc->lock != seq);
271
	 *
272 273
	 * NOTE: for obvious reason this only works on self-monitoring
	 *       processes.
274
	 */
275
	__u32	lock;			/* seqlock for synchronization */
276 277 278 279
	__u32	index;			/* hardware event identifier */
	__s64	offset;			/* add to hardware event value */
	__u64	time_enabled;		/* time event active */
	__u64	time_running;		/* time event on cpu */
280

281 282 283 284
		/*
		 * Hole for extension of the self monitor capabilities
		 */

285
	__u64	__reserved[123];	/* align to 1k */
286

287 288 289
	/*
	 * Control data for the mmap() data buffer.
	 *
290 291
	 * User-space reading the @data_head value should issue an rmb(), on
	 * SMP capable platforms, after reading this value -- see
292
	 * perf_event_wakeup().
293 294 295 296
	 *
	 * When the mapping is PROT_WRITE the @data_tail value should be
	 * written by userspace to reflect the last read data. In this case
	 * the kernel will not over-write unread data.
297
	 */
298
	__u64   data_head;		/* head in the data section */
299
	__u64	data_tail;		/* user-space written tail */
300 301
};

302
#define PERF_RECORD_MISC_CPUMODE_MASK		(7 << 0)
303
#define PERF_RECORD_MISC_CPUMODE_UNKNOWN	(0 << 0)
304 305 306
#define PERF_RECORD_MISC_KERNEL			(1 << 0)
#define PERF_RECORD_MISC_USER			(2 << 0)
#define PERF_RECORD_MISC_HYPERVISOR		(3 << 0)
307 308
#define PERF_RECORD_MISC_GUEST_KERNEL		(4 << 0)
#define PERF_RECORD_MISC_GUEST_USER		(5 << 0)
309

P
Peter Zijlstra 已提交
310 311 312 313 314 315
/*
 * Indicates that the content of PERF_SAMPLE_IP points to
 * the actual instruction that triggered the event. See also
 * perf_event_attr::precise_ip.
 */
#define PERF_RECORD_MISC_EXACT_IP		(1 << 14)
316 317 318 319 320
/*
 * Reserve the last bit to indicate some extended misc field
 */
#define PERF_RECORD_MISC_EXT_RESERVED		(1 << 15)

P
Peter Zijlstra 已提交
321 322
struct perf_event_header {
	__u32	type;
323 324
	__u16	misc;
	__u16	size;
P
Peter Zijlstra 已提交
325 326 327
};

enum perf_event_type {
328

329 330 331 332 333
	/*
	 * The MMAP events record the PROT_EXEC mappings so that we can
	 * correlate userspace IPs to code. They have the following structure:
	 *
	 * struct {
I
Ingo Molnar 已提交
334
	 *	struct perf_event_header	header;
335
	 *
I
Ingo Molnar 已提交
336 337 338 339 340
	 *	u32				pid, tid;
	 *	u64				addr;
	 *	u64				len;
	 *	u64				pgoff;
	 *	char				filename[];
341 342
	 * };
	 */
343
	PERF_RECORD_MMAP			= 1,
344

345 346
	/*
	 * struct {
I
Ingo Molnar 已提交
347 348 349
	 *	struct perf_event_header	header;
	 *	u64				id;
	 *	u64				lost;
350 351
	 * };
	 */
352
	PERF_RECORD_LOST			= 2,
353

354 355
	/*
	 * struct {
I
Ingo Molnar 已提交
356
	 *	struct perf_event_header	header;
357
	 *
I
Ingo Molnar 已提交
358 359
	 *	u32				pid, tid;
	 *	char				comm[];
360 361
	 * };
	 */
362
	PERF_RECORD_COMM			= 3,
363

P
Peter Zijlstra 已提交
364 365 366 367 368
	/*
	 * struct {
	 *	struct perf_event_header	header;
	 *	u32				pid, ppid;
	 *	u32				tid, ptid;
369
	 *	u64				time;
P
Peter Zijlstra 已提交
370 371
	 * };
	 */
372
	PERF_RECORD_EXIT			= 4,
P
Peter Zijlstra 已提交
373

374 375
	/*
	 * struct {
I
Ingo Molnar 已提交
376 377
	 *	struct perf_event_header	header;
	 *	u64				time;
378
	 *	u64				id;
379
	 *	u64				stream_id;
380 381
	 * };
	 */
382 383
	PERF_RECORD_THROTTLE			= 5,
	PERF_RECORD_UNTHROTTLE			= 6,
384

P
Peter Zijlstra 已提交
385 386
	/*
	 * struct {
387 388
	 *	struct perf_event_header	header;
	 *	u32				pid, ppid;
P
Peter Zijlstra 已提交
389
	 *	u32				tid, ptid;
390
	 *	u64				time;
P
Peter Zijlstra 已提交
391 392
	 * };
	 */
393
	PERF_RECORD_FORK			= 7,
P
Peter Zijlstra 已提交
394

395 396
	/*
	 * struct {
397 398
	 *	struct perf_event_header	header;
	 *	u32				pid, tid;
399
	 *
400
	 *	struct read_format		values;
401 402
	 * };
	 */
403
	PERF_RECORD_READ			= 8,
404

405
	/*
406
	 * struct {
I
Ingo Molnar 已提交
407
	 *	struct perf_event_header	header;
408
	 *
409 410 411 412
	 *	{ u64			ip;	  } && PERF_SAMPLE_IP
	 *	{ u32			pid, tid; } && PERF_SAMPLE_TID
	 *	{ u64			time;     } && PERF_SAMPLE_TIME
	 *	{ u64			addr;     } && PERF_SAMPLE_ADDR
413
	 *	{ u64			id;	  } && PERF_SAMPLE_ID
414
	 *	{ u64			stream_id;} && PERF_SAMPLE_STREAM_ID
415
	 *	{ u32			cpu, res; } && PERF_SAMPLE_CPU
I
Ingo Molnar 已提交
416
	 *	{ u64			period;   } && PERF_SAMPLE_PERIOD
417
	 *
418
	 *	{ struct read_format	values;	  } && PERF_SAMPLE_READ
419
	 *
420
	 *	{ u64			nr,
421
	 *	  u64			ips[nr];  } && PERF_SAMPLE_CALLCHAIN
422
	 *
I
Ingo Molnar 已提交
423 424 425 426 427 428 429 430 431 432
	 *	#
	 *	# The RAW record below is opaque data wrt the ABI
	 *	#
	 *	# That is, the ABI doesn't make any promises wrt to
	 *	# the stability of its content, it may vary depending
	 *	# on event, hardware, kernel version and phase of
	 *	# the moon.
	 *	#
	 *	# In other words, PERF_SAMPLE_RAW contents are not an ABI.
	 *	#
433
	 *
434 435
	 *	{ u32			size;
	 *	  char                  data[size];}&& PERF_SAMPLE_RAW
436
	 * };
437
	 */
438
	PERF_RECORD_SAMPLE			= 9,
439

440
	PERF_RECORD_MAX,			/* non-ABI */
P
Peter Zijlstra 已提交
441 442
};

443 444 445 446
enum perf_callchain_context {
	PERF_CONTEXT_HV			= (__u64)-32,
	PERF_CONTEXT_KERNEL		= (__u64)-128,
	PERF_CONTEXT_USER		= (__u64)-512,
447

448 449 450 451 452
	PERF_CONTEXT_GUEST		= (__u64)-2048,
	PERF_CONTEXT_GUEST_KERNEL	= (__u64)-2176,
	PERF_CONTEXT_GUEST_USER		= (__u64)-2560,

	PERF_CONTEXT_MAX		= (__u64)-4095,
453 454
};

455 456 457
#define PERF_FLAG_FD_NO_GROUP	(1U << 0)
#define PERF_FLAG_FD_OUTPUT	(1U << 1)

458
#ifdef __KERNEL__
I
Ingo Molnar 已提交
459
/*
460
 * Kernel-internal data types and definitions:
I
Ingo Molnar 已提交
461 462
 */

463 464
#ifdef CONFIG_PERF_EVENTS
# include <asm/perf_event.h>
465
# include <asm/local64.h>
466 467
#endif

468 469 470 471 472 473
struct perf_guest_info_callbacks {
	int (*is_in_guest) (void);
	int (*is_user_mode) (void);
	unsigned long (*get_guest_ip) (void);
};

474 475 476 477
#ifdef CONFIG_HAVE_HW_BREAKPOINT
#include <asm/hw_breakpoint.h>
#endif

478 479 480 481 482
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/spinlock.h>
483
#include <linux/hrtimer.h>
P
Peter Zijlstra 已提交
484
#include <linux/fs.h>
485
#include <linux/pid_namespace.h>
486
#include <linux/workqueue.h>
487
#include <linux/ftrace.h>
488
#include <linux/cpu.h>
489
#include <asm/atomic.h>
490
#include <asm/local.h>
491

492 493 494 495 496 497 498
#define PERF_MAX_STACK_DEPTH		255

struct perf_callchain_entry {
	__u64				nr;
	__u64				ip[PERF_MAX_STACK_DEPTH];
};

499 500 501
struct perf_raw_record {
	u32				size;
	void				*data;
502 503
};

504 505 506 507 508 509 510 511 512 513 514
struct perf_branch_entry {
	__u64				from;
	__u64				to;
	__u64				flags;
};

struct perf_branch_stack {
	__u64				nr;
	struct perf_branch_entry	entries[0];
};

515 516
struct task_struct;

T
Thomas Gleixner 已提交
517
/**
518
 * struct hw_perf_event - performance event hardware details:
T
Thomas Gleixner 已提交
519
 */
520 521
struct hw_perf_event {
#ifdef CONFIG_PERF_EVENTS
522 523
	union {
		struct { /* hardware */
I
Ingo Molnar 已提交
524
			u64		config;
525
			u64		last_tag;
I
Ingo Molnar 已提交
526
			unsigned long	config_base;
527
			unsigned long	event_base;
I
Ingo Molnar 已提交
528
			int		idx;
529
			int		last_cpu;
530
		};
531 532
		struct { /* software */
			s64		remaining;
I
Ingo Molnar 已提交
533
			struct hrtimer	hrtimer;
534
		};
535
#ifdef CONFIG_HAVE_HW_BREAKPOINT
536 537 538 539
		struct { /* breakpoint */
			struct arch_hw_breakpoint	info;
			struct list_head		bp_list;
		};
540
#endif
541
	};
542
	local64_t			prev_count;
543
	u64				sample_period;
544
	u64				last_period;
545
	local64_t			period_left;
546
	u64				interrupts;
547

548 549
	u64				freq_time_stamp;
	u64				freq_count_stamp;
550
#endif
T
Thomas Gleixner 已提交
551 552
};

553
struct perf_event;
I
Ingo Molnar 已提交
554

555 556 557 558
/*
 * Common implementation detail of pmu::{start,commit,cancel}_txn
 */
#define PERF_EVENT_TXN 0x1
559

I
Ingo Molnar 已提交
560
/**
561
 * struct pmu - generic performance monitoring unit
I
Ingo Molnar 已提交
562
 */
563
struct pmu {
564 565
	int (*enable)			(struct perf_event *event);
	void (*disable)			(struct perf_event *event);
566 567
	int (*start)			(struct perf_event *event);
	void (*stop)			(struct perf_event *event);
568 569
	void (*read)			(struct perf_event *event);
	void (*unthrottle)		(struct perf_event *event);
570 571

	/*
572 573 574
	 * Group events scheduling is treated as a transaction, add group
	 * events as a whole and perform one schedulability test. If the test
	 * fails, roll back the whole group
575 576
	 */

577 578 579 580
	/*
	 * Start the transaction, after this ->enable() doesn't need
	 * to do schedulability tests.
	 */
P
Peter Zijlstra 已提交
581
	void (*start_txn)	(struct pmu *pmu);
582 583 584 585 586 587
	/*
	 * If ->start_txn() disabled the ->enable() schedulability test
	 * then ->commit_txn() is required to perform one. On success
	 * the transaction is closed. On error the transaction is kept
	 * open until ->cancel_txn() is called.
	 */
P
Peter Zijlstra 已提交
588
	int  (*commit_txn)	(struct pmu *pmu);
589 590 591 592
	/*
	 * Will cancel the transaction, assumes ->disable() is called for
	 * each successfull ->enable() during the transaction.
	 */
P
Peter Zijlstra 已提交
593
	void (*cancel_txn)	(struct pmu *pmu);
I
Ingo Molnar 已提交
594 595
};

596
/**
597
 * enum perf_event_active_state - the states of a event
598
 */
599
enum perf_event_active_state {
I
Ingo Molnar 已提交
600
	PERF_EVENT_STATE_ERROR		= -2,
601 602
	PERF_EVENT_STATE_OFF		= -1,
	PERF_EVENT_STATE_INACTIVE	=  0,
I
Ingo Molnar 已提交
603
	PERF_EVENT_STATE_ACTIVE		=  1,
604 605
};

606 607
struct file;

608 609
#define PERF_BUFFER_WRITABLE		0x01

610
struct perf_buffer {
611
	atomic_t			refcount;
612
	struct rcu_head			rcu_head;
613 614
#ifdef CONFIG_PERF_USE_VMALLOC
	struct work_struct		work;
615
	int				page_order;	/* allocation order  */
616
#endif
P
Peter Zijlstra 已提交
617
	int				nr_pages;	/* nr of data pages  */
618
	int				writable;	/* are we writable   */
P
Peter Zijlstra 已提交
619

620
	atomic_t			poll;		/* POLL_ for wakeups */
P
Peter Zijlstra 已提交
621

622 623 624
	local_t				head;		/* write position    */
	local_t				nest;		/* nested writers    */
	local_t				events;		/* event limit       */
625
	local_t				wakeup;		/* wakeup stamp      */
626
	local_t				lost;		/* nr records lost   */
627

628 629
	long				watermark;	/* wakeup watermark  */

I
Ingo Molnar 已提交
630
	struct perf_event_mmap_page	*user_page;
I
Ingo Molnar 已提交
631
	void				*data_pages[0];
632 633
};

634 635 636
struct perf_pending_entry {
	struct perf_pending_entry *next;
	void (*func)(struct perf_pending_entry *);
637 638
};

639 640
struct perf_sample_data;

641 642 643 644
typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
					struct perf_sample_data *,
					struct pt_regs *regs);

645 646 647 648
enum perf_group_flag {
	PERF_GROUP_SOFTWARE = 0x1,
};

649 650 651 652 653 654 655 656
#define SWEVENT_HLIST_BITS	8
#define SWEVENT_HLIST_SIZE	(1 << SWEVENT_HLIST_BITS)

struct swevent_hlist {
	struct hlist_head	heads[SWEVENT_HLIST_SIZE];
	struct rcu_head		rcu_head;
};

657 658 659
#define PERF_ATTACH_CONTEXT	0x01
#define PERF_ATTACH_GROUP	0x02

T
Thomas Gleixner 已提交
660
/**
661
 * struct perf_event - performance event kernel representation:
T
Thomas Gleixner 已提交
662
 */
663 664
struct perf_event {
#ifdef CONFIG_PERF_EVENTS
665
	struct list_head		group_entry;
P
Peter Zijlstra 已提交
666
	struct list_head		event_entry;
667
	struct list_head		sibling_list;
668
	struct hlist_node		hlist_entry;
I
Ingo Molnar 已提交
669
	int				nr_siblings;
670
	int				group_flags;
671
	struct perf_event		*group_leader;
P
Peter Zijlstra 已提交
672
	struct pmu		*pmu;
673

674
	enum perf_event_active_state	state;
675
	unsigned int			attach_state;
676
	local64_t			count;
677
	atomic64_t			child_count;
678

679
	/*
680
	 * These are the total time in nanoseconds that the event
681
	 * has been enabled (i.e. eligible to run, and the task has
682
	 * been scheduled in, if this is a per-task event)
683 684 685
	 * and running (scheduled onto the CPU), respectively.
	 *
	 * They are computed from tstamp_enabled, tstamp_running and
686
	 * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
687 688 689 690 691 692
	 */
	u64				total_time_enabled;
	u64				total_time_running;

	/*
	 * These are timestamps used for computing total_time_enabled
693
	 * and total_time_running when the event is in INACTIVE or
694 695
	 * ACTIVE state, measured in nanoseconds from an arbitrary point
	 * in time.
696 697
	 * tstamp_enabled: the notional time when the event was enabled
	 * tstamp_running: the notional time when the event was scheduled on
698
	 * tstamp_stopped: in INACTIVE state, the notional time when the
699
	 *	event was scheduled off.
700 701 702 703 704
	 */
	u64				tstamp_enabled;
	u64				tstamp_running;
	u64				tstamp_stopped;

705
	struct perf_event_attr		attr;
706
	struct hw_perf_event		hw;
T
Thomas Gleixner 已提交
707

708
	struct perf_event_context	*ctx;
709
	struct file			*filp;
T
Thomas Gleixner 已提交
710

711 712
	/*
	 * These accumulate total time (in nanoseconds) that children
713
	 * events have been enabled and running, respectively.
714 715 716 717
	 */
	atomic64_t			child_total_time_enabled;
	atomic64_t			child_total_time_running;

T
Thomas Gleixner 已提交
718
	/*
719
	 * Protect attach/detach and child_list:
T
Thomas Gleixner 已提交
720
	 */
721 722
	struct mutex			child_mutex;
	struct list_head		child_list;
723
	struct perf_event		*parent;
T
Thomas Gleixner 已提交
724 725 726 727

	int				oncpu;
	int				cpu;

728 729 730
	struct list_head		owner_entry;
	struct task_struct		*owner;

731 732 733
	/* mmap bits */
	struct mutex			mmap_mutex;
	atomic_t			mmap_count;
734 735
	int				mmap_locked;
	struct user_struct		*mmap_user;
736
	struct perf_buffer		*buffer;
737

738
	/* poll related */
T
Thomas Gleixner 已提交
739
	wait_queue_head_t		waitq;
P
Peter Zijlstra 已提交
740
	struct fasync_struct		*fasync;
741 742 743

	/* delayed work for NMIs and such */
	int				pending_wakeup;
744
	int				pending_kill;
745
	int				pending_disable;
746
	struct perf_pending_entry	pending;
P
Peter Zijlstra 已提交
747

748 749
	atomic_t			event_limit;

750
	void (*destroy)(struct perf_event *);
P
Peter Zijlstra 已提交
751
	struct rcu_head			rcu_head;
752 753

	struct pid_namespace		*ns;
754
	u64				id;
L
Li Zefan 已提交
755

756
	perf_overflow_handler_t		overflow_handler;
757

758
#ifdef CONFIG_EVENT_TRACING
759
	struct ftrace_event_call	*tp_event;
L
Li Zefan 已提交
760
	struct event_filter		*filter;
761
#endif
L
Li Zefan 已提交
762 763

#endif /* CONFIG_PERF_EVENTS */
T
Thomas Gleixner 已提交
764 765 766
};

/**
767
 * struct perf_event_context - event context structure
T
Thomas Gleixner 已提交
768
 *
769
 * Used as a container for task events and CPU events as well:
T
Thomas Gleixner 已提交
770
 */
771
struct perf_event_context {
T
Thomas Gleixner 已提交
772
	/*
773
	 * Protect the states of the events in the list,
774
	 * nr_active, and the list:
T
Thomas Gleixner 已提交
775
	 */
776
	raw_spinlock_t			lock;
777
	/*
778
	 * Protect the list of events.  Locking either mutex or lock
779 780 781
	 * is sufficient to ensure the list doesn't change; to change
	 * the list you need to lock both the mutex and the spinlock.
	 */
I
Ingo Molnar 已提交
782
	struct mutex			mutex;
783

784 785
	struct list_head		pinned_groups;
	struct list_head		flexible_groups;
I
Ingo Molnar 已提交
786
	struct list_head		event_list;
787
	int				nr_events;
I
Ingo Molnar 已提交
788 789
	int				nr_active;
	int				is_active;
790
	int				nr_stat;
I
Ingo Molnar 已提交
791 792
	atomic_t			refcount;
	struct task_struct		*task;
793 794

	/*
795
	 * Context clock, runs when context enabled.
796
	 */
I
Ingo Molnar 已提交
797 798
	u64				time;
	u64				timestamp;
799 800 801 802 803

	/*
	 * These fields let us detect when two contexts have both
	 * been cloned (inherited) from a common ancestor.
	 */
804
	struct perf_event_context	*parent_ctx;
I
Ingo Molnar 已提交
805 806 807 808
	u64				parent_gen;
	u64				generation;
	int				pin_count;
	struct rcu_head			rcu_head;
T
Thomas Gleixner 已提交
809 810
};

811 812 813 814 815 816
/*
 * Number of contexts where an event can trigger:
 * 	task, softirq, hardirq, nmi.
 */
#define PERF_NR_CONTEXTS	4

T
Thomas Gleixner 已提交
817
/**
818
 * struct perf_event_cpu_context - per cpu event context structure
T
Thomas Gleixner 已提交
819 820
 */
struct perf_cpu_context {
821 822
	struct perf_event_context	ctx;
	struct perf_event_context	*task_ctx;
T
Thomas Gleixner 已提交
823 824
	int				active_oncpu;
	int				max_pertask;
825
	int				exclusive;
826 827 828
	struct swevent_hlist		*swevent_hlist;
	struct mutex			hlist_mutex;
	int				hlist_refcount;
P
Peter Zijlstra 已提交
829

830 831
	/* Recursion avoidance in each contexts */
	int				recursion[PERF_NR_CONTEXTS];
T
Thomas Gleixner 已提交
832 833
};

834
struct perf_output_handle {
I
Ingo Molnar 已提交
835
	struct perf_event		*event;
836
	struct perf_buffer		*buffer;
837
	unsigned long			wakeup;
838 839 840
	unsigned long			size;
	void				*addr;
	int				page;
I
Ingo Molnar 已提交
841 842
	int				nmi;
	int				sample;
843 844
};

845
#ifdef CONFIG_PERF_EVENTS
846

T
Thomas Gleixner 已提交
847 848 849
/*
 * Set by architecture code:
 */
850
extern int perf_max_events;
T
Thomas Gleixner 已提交
851

P
Peter Zijlstra 已提交
852
extern struct pmu *hw_perf_event_init(struct perf_event *event);
I
Ingo Molnar 已提交
853

854
extern void perf_event_task_sched_in(struct task_struct *task);
855
extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
856
extern void perf_event_task_tick(struct task_struct *task);
857 858 859 860 861 862
extern int perf_event_init_task(struct task_struct *child);
extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task);
extern void set_perf_event_pending(void);
extern void perf_event_do_pending(void);
extern void perf_event_print_debug(void);
863 864 865 866
extern void __perf_disable(void);
extern bool __perf_enable(void);
extern void perf_disable(void);
extern void perf_enable(void);
867 868 869
extern int perf_event_task_disable(void);
extern int perf_event_task_enable(void);
extern void perf_event_update_userpage(struct perf_event *event);
870 871 872 873
extern int perf_event_release_kernel(struct perf_event *event);
extern struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr,
				int cpu,
874
				pid_t pid,
875
				perf_overflow_handler_t callback);
876 877
extern u64 perf_event_read_value(struct perf_event *event,
				 u64 *enabled, u64 *running);
878

879
struct perf_sample_data {
880 881 882 883 884 885 886 887
	u64				type;

	u64				ip;
	struct {
		u32	pid;
		u32	tid;
	}				tid_entry;
	u64				time;
I
Ingo Molnar 已提交
888
	u64				addr;
889 890 891 892 893 894
	u64				id;
	u64				stream_id;
	struct {
		u32	cpu;
		u32	reserved;
	}				cpu_entry;
I
Ingo Molnar 已提交
895
	u64				period;
896
	struct perf_callchain_entry	*callchain;
897
	struct perf_raw_record		*raw;
898 899
};

900 901 902 903 904 905 906
static inline
void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
{
	data->addr = addr;
	data->raw  = NULL;
}

907 908 909
extern void perf_output_sample(struct perf_output_handle *handle,
			       struct perf_event_header *header,
			       struct perf_sample_data *data,
910
			       struct perf_event *event);
911 912
extern void perf_prepare_sample(struct perf_event_header *header,
				struct perf_sample_data *data,
913
				struct perf_event *event,
914 915
				struct pt_regs *regs);

916
extern int perf_event_overflow(struct perf_event *event, int nmi,
917 918
				 struct perf_sample_data *data,
				 struct pt_regs *regs);
919

920
/*
921
 * Return 1 for a software event, 0 for a hardware event
922
 */
923
static inline int is_software_event(struct perf_event *event)
924
{
925 926 927 928 929 930 931 932
	switch (event->attr.type) {
	case PERF_TYPE_SOFTWARE:
	case PERF_TYPE_TRACEPOINT:
	/* for now the breakpoint stuff also works as software event */
	case PERF_TYPE_BREAKPOINT:
		return 1;
	}
	return 0;
933 934
}

935
extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
936

937
extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
938

939 940
#ifndef perf_arch_fetch_caller_regs
static inline void
941
perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
942
#endif
943 944 945 946 947 948 949 950 951

/*
 * Take a snapshot of the regs. Skip ip and frame pointer to
 * the nth caller. We only need a few of the regs:
 * - ip for PERF_SAMPLE_IP
 * - cs for user_mode() tests
 * - bp for callchains
 * - eflags, for future purposes, just in case
 */
952
static inline void perf_fetch_caller_regs(struct pt_regs *regs)
953 954 955
{
	memset(regs, 0, sizeof(*regs));

956
	perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
957 958
}

959 960 961 962 963 964 965
static inline void
perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
{
	if (atomic_read(&perf_swevent_enabled[event_id])) {
		struct pt_regs hot_regs;

		if (!regs) {
966
			perf_fetch_caller_regs(&hot_regs);
967 968 969 970 971 972
			regs = &hot_regs;
		}
		__perf_sw_event(event_id, nr, nmi, regs, addr);
	}
}

973
extern void perf_event_mmap(struct vm_area_struct *vma);
974
extern struct perf_guest_info_callbacks *perf_guest_cbs;
975 976
extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
977

978 979
extern void perf_event_comm(struct task_struct *tsk);
extern void perf_event_fork(struct task_struct *tsk);
980

981 982 983 984 985 986 987 988
/* Callchains */
DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);

extern void perf_callchain_user(struct perf_callchain_entry *entry,
				struct pt_regs *regs);
extern void perf_callchain_kernel(struct perf_callchain_entry *entry,
				  struct pt_regs *regs);

989

990 991 992 993 994 995 996
static inline void
perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
{
	if (entry->nr < PERF_MAX_STACK_DEPTH)
		entry->ip[entry->nr++] = ip;
}

997 998 999
extern int sysctl_perf_event_paranoid;
extern int sysctl_perf_event_mlock;
extern int sysctl_perf_event_sample_rate;
1000

1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015
static inline bool perf_paranoid_tracepoint_raw(void)
{
	return sysctl_perf_event_paranoid > -1;
}

static inline bool perf_paranoid_cpu(void)
{
	return sysctl_perf_event_paranoid > 0;
}

static inline bool perf_paranoid_kernel(void)
{
	return sysctl_perf_event_paranoid > 1;
}

1016
extern void perf_event_init(void);
1017 1018
extern void perf_tp_event(u64 addr, u64 count, void *record,
			  int entry_size, struct pt_regs *regs,
1019
			  struct hlist_head *head, int rctx);
1020
extern void perf_bp_event(struct perf_event *event, void *data);
1021

1022
#ifndef perf_misc_flags
1023 1024
#define perf_misc_flags(regs)	(user_mode(regs) ? PERF_RECORD_MISC_USER : \
				 PERF_RECORD_MISC_KERNEL)
1025 1026 1027
#define perf_instruction_pointer(regs)	instruction_pointer(regs)
#endif

1028
extern int perf_output_begin(struct perf_output_handle *handle,
1029
			     struct perf_event *event, unsigned int size,
1030 1031 1032 1033
			     int nmi, int sample);
extern void perf_output_end(struct perf_output_handle *handle);
extern void perf_output_copy(struct perf_output_handle *handle,
			     const void *buf, unsigned int len);
1034 1035
extern int perf_swevent_get_recursion_context(void);
extern void perf_swevent_put_recursion_context(int rctx);
1036 1037
extern void perf_event_enable(struct perf_event *event);
extern void perf_event_disable(struct perf_event *event);
T
Thomas Gleixner 已提交
1038 1039
#else
static inline void
1040
perf_event_task_sched_in(struct task_struct *task)			{ }
T
Thomas Gleixner 已提交
1041
static inline void
1042
perf_event_task_sched_out(struct task_struct *task,
1043
			    struct task_struct *next)			{ }
T
Thomas Gleixner 已提交
1044
static inline void
1045
perf_event_task_tick(struct task_struct *task)				{ }
1046 1047 1048
static inline int perf_event_init_task(struct task_struct *child)	{ return 0; }
static inline void perf_event_exit_task(struct task_struct *child)	{ }
static inline void perf_event_free_task(struct task_struct *task)	{ }
I
Ingo Molnar 已提交
1049 1050
static inline void perf_event_do_pending(void)				{ }
static inline void perf_event_print_debug(void)				{ }
1051 1052
static inline void perf_disable(void)					{ }
static inline void perf_enable(void)					{ }
I
Ingo Molnar 已提交
1053 1054
static inline int perf_event_task_disable(void)				{ return -EINVAL; }
static inline int perf_event_task_enable(void)				{ return -EINVAL; }
1055

1056
static inline void
1057
perf_sw_event(u32 event_id, u64 nr, int nmi,
1058
		     struct pt_regs *regs, u64 addr)			{ }
1059
static inline void
1060
perf_bp_event(struct perf_event *event, void *data)			{ }
1061

1062
static inline int perf_register_guest_info_callbacks
1063
(struct perf_guest_info_callbacks *callbacks) { return 0; }
1064
static inline int perf_unregister_guest_info_callbacks
1065
(struct perf_guest_info_callbacks *callbacks) { return 0; }
1066

I
Ingo Molnar 已提交
1067
static inline void perf_event_mmap(struct vm_area_struct *vma)		{ }
1068 1069 1070
static inline void perf_event_comm(struct task_struct *tsk)		{ }
static inline void perf_event_fork(struct task_struct *tsk)		{ }
static inline void perf_event_init(void)				{ }
1071
static inline int  perf_swevent_get_recursion_context(void)		{ return -1; }
1072
static inline void perf_swevent_put_recursion_context(int rctx)		{ }
1073 1074
static inline void perf_event_enable(struct perf_event *event)		{ }
static inline void perf_event_disable(struct perf_event *event)		{ }
T
Thomas Gleixner 已提交
1075 1076
#endif

1077 1078 1079
#define perf_output_put(handle, x) \
	perf_output_copy((handle), &(x), sizeof(x))

1080 1081 1082 1083 1084 1085
/*
 * This has to have a higher priority than migration_notifier in sched.c.
 */
#define perf_cpu_notifier(fn)					\
do {								\
	static struct notifier_block fn##_nb __cpuinitdata =	\
1086
		{ .notifier_call = fn, .priority = CPU_PRI_PERF }; \
1087 1088 1089 1090 1091 1092 1093 1094 1095
	fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE,		\
		(void *)(unsigned long)smp_processor_id());	\
	fn(&fn##_nb, (unsigned long)CPU_STARTING,		\
		(void *)(unsigned long)smp_processor_id());	\
	fn(&fn##_nb, (unsigned long)CPU_ONLINE,			\
		(void *)(unsigned long)smp_processor_id());	\
	register_cpu_notifier(&fn##_nb);			\
} while (0)

1096
#endif /* __KERNEL__ */
1097
#endif /* _LINUX_PERF_EVENT_H */