perf_event.h 34.2 KB
Newer Older
T
Thomas Gleixner 已提交
1
/*
I
Ingo Molnar 已提交
2
 * Performance events:
T
Thomas Gleixner 已提交
3
 *
I
Ingo Molnar 已提交
4
 *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5 6
 *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
 *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
T
Thomas Gleixner 已提交
7
 *
I
Ingo Molnar 已提交
8
 * Data type definitions, declarations, prototypes.
T
Thomas Gleixner 已提交
9
 *
I
Ingo Molnar 已提交
10
 *    Started by: Thomas Gleixner and Ingo Molnar
T
Thomas Gleixner 已提交
11
 *
I
Ingo Molnar 已提交
12
 * For licencing details see kernel-base/COPYING
T
Thomas Gleixner 已提交
13
 */
14 15
#ifndef _LINUX_PERF_EVENT_H
#define _LINUX_PERF_EVENT_H
T
Thomas Gleixner 已提交
16

17 18
#include <linux/types.h>
#include <linux/ioctl.h>
19
#include <asm/byteorder.h>
T
Thomas Gleixner 已提交
20 21

/*
I
Ingo Molnar 已提交
22 23 24 25
 * User-space ABI bits:
 */

/*
26
 * attr.type
T
Thomas Gleixner 已提交
27
 */
P
Peter Zijlstra 已提交
28
enum perf_type_id {
I
Ingo Molnar 已提交
29 30 31 32 33
	PERF_TYPE_HARDWARE			= 0,
	PERF_TYPE_SOFTWARE			= 1,
	PERF_TYPE_TRACEPOINT			= 2,
	PERF_TYPE_HW_CACHE			= 3,
	PERF_TYPE_RAW				= 4,
34
	PERF_TYPE_BREAKPOINT			= 5,
35

I
Ingo Molnar 已提交
36
	PERF_TYPE_MAX,				/* non-ABI */
37
};
38

39
/*
40 41
 * Generalized performance event event_id types, used by the
 * attr.event_id parameter of the sys_perf_event_open()
I
Ingo Molnar 已提交
42
 * syscall:
43
 */
P
Peter Zijlstra 已提交
44
enum perf_hw_id {
I
Ingo Molnar 已提交
45
	/*
46
	 * Common hardware events, generalized by the kernel:
I
Ingo Molnar 已提交
47
	 */
48 49 50 51 52 53 54
	PERF_COUNT_HW_CPU_CYCLES		= 0,
	PERF_COUNT_HW_INSTRUCTIONS		= 1,
	PERF_COUNT_HW_CACHE_REFERENCES		= 2,
	PERF_COUNT_HW_CACHE_MISSES		= 3,
	PERF_COUNT_HW_BRANCH_INSTRUCTIONS	= 4,
	PERF_COUNT_HW_BRANCH_MISSES		= 5,
	PERF_COUNT_HW_BUS_CYCLES		= 6,
55 56
	PERF_COUNT_HW_STALLED_CYCLES_FRONTEND	= 7,
	PERF_COUNT_HW_STALLED_CYCLES_BACKEND	= 8,
57
	PERF_COUNT_HW_REF_CPU_CYCLES		= 9,
58

I
Ingo Molnar 已提交
59
	PERF_COUNT_HW_MAX,			/* non-ABI */
60
};
61

62
/*
63
 * Generalized hardware cache events:
64
 *
65
 *       { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
66 67 68
 *       { read, write, prefetch } x
 *       { accesses, misses }
 */
P
Peter Zijlstra 已提交
69
enum perf_hw_cache_id {
I
Ingo Molnar 已提交
70 71 72 73 74 75
	PERF_COUNT_HW_CACHE_L1D			= 0,
	PERF_COUNT_HW_CACHE_L1I			= 1,
	PERF_COUNT_HW_CACHE_LL			= 2,
	PERF_COUNT_HW_CACHE_DTLB		= 3,
	PERF_COUNT_HW_CACHE_ITLB		= 4,
	PERF_COUNT_HW_CACHE_BPU			= 5,
76
	PERF_COUNT_HW_CACHE_NODE		= 6,
I
Ingo Molnar 已提交
77 78

	PERF_COUNT_HW_CACHE_MAX,		/* non-ABI */
79 80
};

P
Peter Zijlstra 已提交
81
enum perf_hw_cache_op_id {
I
Ingo Molnar 已提交
82 83 84
	PERF_COUNT_HW_CACHE_OP_READ		= 0,
	PERF_COUNT_HW_CACHE_OP_WRITE		= 1,
	PERF_COUNT_HW_CACHE_OP_PREFETCH		= 2,
85

I
Ingo Molnar 已提交
86
	PERF_COUNT_HW_CACHE_OP_MAX,		/* non-ABI */
87 88
};

P
Peter Zijlstra 已提交
89 90 91
enum perf_hw_cache_op_result_id {
	PERF_COUNT_HW_CACHE_RESULT_ACCESS	= 0,
	PERF_COUNT_HW_CACHE_RESULT_MISS		= 1,
92

I
Ingo Molnar 已提交
93
	PERF_COUNT_HW_CACHE_RESULT_MAX,		/* non-ABI */
94 95
};

96
/*
97 98
 * Special "software" events provided by the kernel, even if the hardware
 * does not support performance events. These events measure various
99 100 101
 * physical and sw events of the kernel (and allow the profiling of them as
 * well):
 */
P
Peter Zijlstra 已提交
102
enum perf_sw_ids {
I
Ingo Molnar 已提交
103 104 105 106 107 108 109
	PERF_COUNT_SW_CPU_CLOCK			= 0,
	PERF_COUNT_SW_TASK_CLOCK		= 1,
	PERF_COUNT_SW_PAGE_FAULTS		= 2,
	PERF_COUNT_SW_CONTEXT_SWITCHES		= 3,
	PERF_COUNT_SW_CPU_MIGRATIONS		= 4,
	PERF_COUNT_SW_PAGE_FAULTS_MIN		= 5,
	PERF_COUNT_SW_PAGE_FAULTS_MAJ		= 6,
110 111
	PERF_COUNT_SW_ALIGNMENT_FAULTS		= 7,
	PERF_COUNT_SW_EMULATION_FAULTS		= 8,
I
Ingo Molnar 已提交
112 113

	PERF_COUNT_SW_MAX,			/* non-ABI */
T
Thomas Gleixner 已提交
114 115
};

116
/*
117
 * Bits that can be set in attr.sample_type to request information
118 119
 * in the overflow packets.
 */
120
enum perf_event_sample_format {
I
Ingo Molnar 已提交
121 122 123 124
	PERF_SAMPLE_IP				= 1U << 0,
	PERF_SAMPLE_TID				= 1U << 1,
	PERF_SAMPLE_TIME			= 1U << 2,
	PERF_SAMPLE_ADDR			= 1U << 3,
125
	PERF_SAMPLE_READ			= 1U << 4,
I
Ingo Molnar 已提交
126 127 128 129
	PERF_SAMPLE_CALLCHAIN			= 1U << 5,
	PERF_SAMPLE_ID				= 1U << 6,
	PERF_SAMPLE_CPU				= 1U << 7,
	PERF_SAMPLE_PERIOD			= 1U << 8,
130
	PERF_SAMPLE_STREAM_ID			= 1U << 9,
131
	PERF_SAMPLE_RAW				= 1U << 10,
132
	PERF_SAMPLE_BRANCH_STACK		= 1U << 11,
133

134
	PERF_SAMPLE_MAX = 1U << 12,		/* non-ABI */
135 136
};

137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
/*
 * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
 *
 * If the user does not pass priv level information via branch_sample_type,
 * the kernel uses the event's priv level. Branch and event priv levels do
 * not have to match. Branch priv level is checked for permissions.
 *
 * The branch types can be combined, however BRANCH_ANY covers all types
 * of branches and therefore it supersedes all the other types.
 */
enum perf_branch_sample_type {
	PERF_SAMPLE_BRANCH_USER		= 1U << 0, /* user branches */
	PERF_SAMPLE_BRANCH_KERNEL	= 1U << 1, /* kernel branches */
	PERF_SAMPLE_BRANCH_HV		= 1U << 2, /* hypervisor branches */

	PERF_SAMPLE_BRANCH_ANY		= 1U << 3, /* any branch types */
	PERF_SAMPLE_BRANCH_ANY_CALL	= 1U << 4, /* any call branch */
	PERF_SAMPLE_BRANCH_ANY_RETURN	= 1U << 5, /* any return branch */
	PERF_SAMPLE_BRANCH_IND_CALL	= 1U << 6, /* indirect calls */

	PERF_SAMPLE_BRANCH_MAX		= 1U << 7, /* non-ABI */
};

#define PERF_SAMPLE_BRANCH_PLM_ALL \
	(PERF_SAMPLE_BRANCH_USER|\
	 PERF_SAMPLE_BRANCH_KERNEL|\
	 PERF_SAMPLE_BRANCH_HV)

165
/*
166
 * The format of the data returned by read() on a perf event fd,
167 168 169
 * as specified by attr.read_format:
 *
 * struct read_format {
I
Ingo Molnar 已提交
170
 *	{ u64		value;
171 172
 *	  { u64		time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
 *	  { u64		time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
I
Ingo Molnar 已提交
173 174
 *	  { u64		id;           } && PERF_FORMAT_ID
 *	} && !PERF_FORMAT_GROUP
175
 *
I
Ingo Molnar 已提交
176
 *	{ u64		nr;
177 178
 *	  { u64		time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
 *	  { u64		time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
I
Ingo Molnar 已提交
179 180 181 182
 *	  { u64		value;
 *	    { u64	id;           } && PERF_FORMAT_ID
 *	  }		cntr[nr];
 *	} && PERF_FORMAT_GROUP
183
 * };
184
 */
185
enum perf_event_read_format {
I
Ingo Molnar 已提交
186 187 188
	PERF_FORMAT_TOTAL_TIME_ENABLED		= 1U << 0,
	PERF_FORMAT_TOTAL_TIME_RUNNING		= 1U << 1,
	PERF_FORMAT_ID				= 1U << 2,
189
	PERF_FORMAT_GROUP			= 1U << 3,
190

I
Ingo Molnar 已提交
191
	PERF_FORMAT_MAX = 1U << 4,		/* non-ABI */
192 193
};

194 195
#define PERF_ATTR_SIZE_VER0	64	/* sizeof first published struct */

I
Ingo Molnar 已提交
196
/*
197
 * Hardware event_id to monitor via a performance monitoring event:
I
Ingo Molnar 已提交
198
 */
199
struct perf_event_attr {
200

201
	/*
202 203 204
	 * Major type: hardware/software/tracepoint/etc.
	 */
	__u32			type;
205 206 207 208 209

	/*
	 * Size of the attr structure, for fwd/bwd compat.
	 */
	__u32			size;
210 211 212

	/*
	 * Type specific configuration information.
213 214
	 */
	__u64			config;
I
Ingo Molnar 已提交
215

216
	union {
217 218
		__u64		sample_period;
		__u64		sample_freq;
219 220
	};

221 222
	__u64			sample_type;
	__u64			read_format;
I
Ingo Molnar 已提交
223

224
	__u64			disabled       :  1, /* off by default        */
225 226 227 228 229 230
				inherit	       :  1, /* children inherit it   */
				pinned	       :  1, /* must always be on PMU */
				exclusive      :  1, /* only group on PMU     */
				exclude_user   :  1, /* don't count user      */
				exclude_kernel :  1, /* ditto kernel          */
				exclude_hv     :  1, /* ditto hypervisor      */
231
				exclude_idle   :  1, /* don't count when idle */
232
				mmap           :  1, /* include mmap data     */
233
				comm	       :  1, /* include comm data     */
234
				freq           :  1, /* use freq, not period  */
235
				inherit_stat   :  1, /* per task counts       */
236
				enable_on_exec :  1, /* next exec enables     */
P
Peter Zijlstra 已提交
237
				task           :  1, /* trace fork/exit       */
238
				watermark      :  1, /* wakeup_watermark      */
P
Peter Zijlstra 已提交
239 240 241 242 243 244 245 246 247 248 249
				/*
				 * precise_ip:
				 *
				 *  0 - SAMPLE_IP can have arbitrary skid
				 *  1 - SAMPLE_IP must have constant skid
				 *  2 - SAMPLE_IP requested to have 0 skid
				 *  3 - SAMPLE_IP must have 0 skid
				 *
				 *  See also PERF_RECORD_MISC_EXACT_IP
				 */
				precise_ip     :  2, /* skid constraint       */
250
				mmap_data      :  1, /* non-exec mmap data    */
251
				sample_id_all  :  1, /* sample_type all events */
P
Peter Zijlstra 已提交
252

253 254 255 256
				exclude_host   :  1, /* don't count in host   */
				exclude_guest  :  1, /* don't count in guest  */

				__reserved_1   : 43;
257

258 259 260 261
	union {
		__u32		wakeup_events;	  /* wakeup every n events */
		__u32		wakeup_watermark; /* bytes before wakeup   */
	};
262

263
	__u32			bp_type;
264 265 266 267 268 269 270 271
	union {
		__u64		bp_addr;
		__u64		config1; /* extension of config */
	};
	union {
		__u64		bp_len;
		__u64		config2; /* extension of config1 */
	};
272
	__u64	branch_sample_type; /* enum branch_sample_type */
273 274
};

275
/*
276
 * Ioctls that can be done on a perf event fd:
277
 */
278
#define PERF_EVENT_IOC_ENABLE		_IO ('$', 0)
I
Ingo Molnar 已提交
279 280
#define PERF_EVENT_IOC_DISABLE		_IO ('$', 1)
#define PERF_EVENT_IOC_REFRESH		_IO ('$', 2)
281
#define PERF_EVENT_IOC_RESET		_IO ('$', 3)
282
#define PERF_EVENT_IOC_PERIOD		_IOW('$', 4, __u64)
283
#define PERF_EVENT_IOC_SET_OUTPUT	_IO ('$', 5)
L
Li Zefan 已提交
284
#define PERF_EVENT_IOC_SET_FILTER	_IOW('$', 6, char *)
285 286

enum perf_event_ioc_flags {
P
Peter Zijlstra 已提交
287 288
	PERF_IOC_FLAG_GROUP		= 1U << 0,
};
289

290 291 292
/*
 * Structure of the page that can be mapped via mmap
 */
293
struct perf_event_mmap_page {
294 295
	__u32	version;		/* version number of this structure */
	__u32	compat_version;		/* lowest version this is compat with */
296 297

	/*
298
	 * Bits needed to read the hw events in user-space.
299
	 *
300 301
	 *   u32 seq;
	 *   s64 count;
302
	 *
303 304
	 *   do {
	 *     seq = pc->lock;
305
	 *
306 307 308 309 310 311
	 *     barrier()
	 *     if (pc->index) {
	 *       count = pmc_read(pc->index - 1);
	 *       count += pc->offset;
	 *     } else
	 *       goto regular_read;
312
	 *
313 314
	 *     barrier();
	 *   } while (pc->lock != seq);
315
	 *
316 317
	 * NOTE: for obvious reason this only works on self-monitoring
	 *       processes.
318
	 */
319
	__u32	lock;			/* seqlock for synchronization */
320 321 322 323
	__u32	index;			/* hardware event identifier */
	__s64	offset;			/* add to hardware event value */
	__u64	time_enabled;		/* time event active */
	__u64	time_running;		/* time event on cpu */
324 325
	__u32	time_mult, time_shift;
	__u64	time_offset;
326

327 328 329 330
		/*
		 * Hole for extension of the self monitor capabilities
		 */

331
	__u64	__reserved[121];	/* align to 1k */
332

333 334 335
	/*
	 * Control data for the mmap() data buffer.
	 *
336 337
	 * User-space reading the @data_head value should issue an rmb(), on
	 * SMP capable platforms, after reading this value -- see
338
	 * perf_event_wakeup().
339 340 341 342
	 *
	 * When the mapping is PROT_WRITE the @data_tail value should be
	 * written by userspace to reflect the last read data. In this case
	 * the kernel will not over-write unread data.
343
	 */
344
	__u64   data_head;		/* head in the data section */
345
	__u64	data_tail;		/* user-space written tail */
346 347
};

348
#define PERF_RECORD_MISC_CPUMODE_MASK		(7 << 0)
349
#define PERF_RECORD_MISC_CPUMODE_UNKNOWN	(0 << 0)
350 351 352
#define PERF_RECORD_MISC_KERNEL			(1 << 0)
#define PERF_RECORD_MISC_USER			(2 << 0)
#define PERF_RECORD_MISC_HYPERVISOR		(3 << 0)
353 354
#define PERF_RECORD_MISC_GUEST_KERNEL		(4 << 0)
#define PERF_RECORD_MISC_GUEST_USER		(5 << 0)
355

P
Peter Zijlstra 已提交
356 357 358 359 360 361
/*
 * Indicates that the content of PERF_SAMPLE_IP points to
 * the actual instruction that triggered the event. See also
 * perf_event_attr::precise_ip.
 */
#define PERF_RECORD_MISC_EXACT_IP		(1 << 14)
362 363 364 365 366
/*
 * Reserve the last bit to indicate some extended misc field
 */
#define PERF_RECORD_MISC_EXT_RESERVED		(1 << 15)

P
Peter Zijlstra 已提交
367 368
struct perf_event_header {
	__u32	type;
369 370
	__u16	misc;
	__u16	size;
P
Peter Zijlstra 已提交
371 372 373
};

enum perf_event_type {
374

375
	/*
376 377 378 379 380 381 382 383 384
	 * If perf_event_attr.sample_id_all is set then all event types will
	 * have the sample_type selected fields related to where/when
	 * (identity) an event took place (TID, TIME, ID, CPU, STREAM_ID)
	 * described in PERF_RECORD_SAMPLE below, it will be stashed just after
	 * the perf_event_header and the fields already present for the existing
	 * fields, i.e. at the end of the payload. That way a newer perf.data
	 * file will be supported by older perf tools, with these new optional
	 * fields being ignored.
	 *
385 386 387 388
	 * The MMAP events record the PROT_EXEC mappings so that we can
	 * correlate userspace IPs to code. They have the following structure:
	 *
	 * struct {
I
Ingo Molnar 已提交
389
	 *	struct perf_event_header	header;
390
	 *
I
Ingo Molnar 已提交
391 392 393 394 395
	 *	u32				pid, tid;
	 *	u64				addr;
	 *	u64				len;
	 *	u64				pgoff;
	 *	char				filename[];
396 397
	 * };
	 */
398
	PERF_RECORD_MMAP			= 1,
399

400 401
	/*
	 * struct {
I
Ingo Molnar 已提交
402 403 404
	 *	struct perf_event_header	header;
	 *	u64				id;
	 *	u64				lost;
405 406
	 * };
	 */
407
	PERF_RECORD_LOST			= 2,
408

409 410
	/*
	 * struct {
I
Ingo Molnar 已提交
411
	 *	struct perf_event_header	header;
412
	 *
I
Ingo Molnar 已提交
413 414
	 *	u32				pid, tid;
	 *	char				comm[];
415 416
	 * };
	 */
417
	PERF_RECORD_COMM			= 3,
418

P
Peter Zijlstra 已提交
419 420 421 422 423
	/*
	 * struct {
	 *	struct perf_event_header	header;
	 *	u32				pid, ppid;
	 *	u32				tid, ptid;
424
	 *	u64				time;
P
Peter Zijlstra 已提交
425 426
	 * };
	 */
427
	PERF_RECORD_EXIT			= 4,
P
Peter Zijlstra 已提交
428

429 430
	/*
	 * struct {
I
Ingo Molnar 已提交
431 432
	 *	struct perf_event_header	header;
	 *	u64				time;
433
	 *	u64				id;
434
	 *	u64				stream_id;
435 436
	 * };
	 */
437 438
	PERF_RECORD_THROTTLE			= 5,
	PERF_RECORD_UNTHROTTLE			= 6,
439

P
Peter Zijlstra 已提交
440 441
	/*
	 * struct {
442 443
	 *	struct perf_event_header	header;
	 *	u32				pid, ppid;
P
Peter Zijlstra 已提交
444
	 *	u32				tid, ptid;
445
	 *	u64				time;
P
Peter Zijlstra 已提交
446 447
	 * };
	 */
448
	PERF_RECORD_FORK			= 7,
P
Peter Zijlstra 已提交
449

450 451
	/*
	 * struct {
452 453
	 *	struct perf_event_header	header;
	 *	u32				pid, tid;
454
	 *
455
	 *	struct read_format		values;
456 457
	 * };
	 */
458
	PERF_RECORD_READ			= 8,
459

460
	/*
461
	 * struct {
I
Ingo Molnar 已提交
462
	 *	struct perf_event_header	header;
463
	 *
464 465 466 467
	 *	{ u64			ip;	  } && PERF_SAMPLE_IP
	 *	{ u32			pid, tid; } && PERF_SAMPLE_TID
	 *	{ u64			time;     } && PERF_SAMPLE_TIME
	 *	{ u64			addr;     } && PERF_SAMPLE_ADDR
468
	 *	{ u64			id;	  } && PERF_SAMPLE_ID
469
	 *	{ u64			stream_id;} && PERF_SAMPLE_STREAM_ID
470
	 *	{ u32			cpu, res; } && PERF_SAMPLE_CPU
I
Ingo Molnar 已提交
471
	 *	{ u64			period;   } && PERF_SAMPLE_PERIOD
472
	 *
473
	 *	{ struct read_format	values;	  } && PERF_SAMPLE_READ
474
	 *
475
	 *	{ u64			nr,
476
	 *	  u64			ips[nr];  } && PERF_SAMPLE_CALLCHAIN
477
	 *
I
Ingo Molnar 已提交
478 479 480 481 482 483 484 485 486 487
	 *	#
	 *	# The RAW record below is opaque data wrt the ABI
	 *	#
	 *	# That is, the ABI doesn't make any promises wrt to
	 *	# the stability of its content, it may vary depending
	 *	# on event, hardware, kernel version and phase of
	 *	# the moon.
	 *	#
	 *	# In other words, PERF_SAMPLE_RAW contents are not an ABI.
	 *	#
488
	 *
489 490
	 *	{ u32			size;
	 *	  char                  data[size];}&& PERF_SAMPLE_RAW
491 492
	 *
	 *	{ u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
493
	 * };
494
	 */
495
	PERF_RECORD_SAMPLE			= 9,
496

497
	PERF_RECORD_MAX,			/* non-ABI */
P
Peter Zijlstra 已提交
498 499
};

500 501 502 503
enum perf_callchain_context {
	PERF_CONTEXT_HV			= (__u64)-32,
	PERF_CONTEXT_KERNEL		= (__u64)-128,
	PERF_CONTEXT_USER		= (__u64)-512,
504

505 506 507 508 509
	PERF_CONTEXT_GUEST		= (__u64)-2048,
	PERF_CONTEXT_GUEST_KERNEL	= (__u64)-2176,
	PERF_CONTEXT_GUEST_USER		= (__u64)-2560,

	PERF_CONTEXT_MAX		= (__u64)-4095,
510 511
};

512 513 514
#define PERF_FLAG_FD_NO_GROUP		(1U << 0)
#define PERF_FLAG_FD_OUTPUT		(1U << 1)
#define PERF_FLAG_PID_CGROUP		(1U << 2) /* pid=cgroup id, per-cpu mode only */
515

516
#ifdef __KERNEL__
I
Ingo Molnar 已提交
517
/*
518
 * Kernel-internal data types and definitions:
I
Ingo Molnar 已提交
519 520
 */

521
#ifdef CONFIG_PERF_EVENTS
S
Stephane Eranian 已提交
522
# include <linux/cgroup.h>
523
# include <asm/perf_event.h>
524
# include <asm/local64.h>
525 526
#endif

527
struct perf_guest_info_callbacks {
528 529 530
	int				(*is_in_guest)(void);
	int				(*is_user_mode)(void);
	unsigned long			(*get_guest_ip)(void);
531 532
};

533 534 535 536
#ifdef CONFIG_HAVE_HW_BREAKPOINT
#include <asm/hw_breakpoint.h>
#endif

537 538 539 540 541
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/spinlock.h>
542
#include <linux/hrtimer.h>
P
Peter Zijlstra 已提交
543
#include <linux/fs.h>
544
#include <linux/pid_namespace.h>
545
#include <linux/workqueue.h>
546
#include <linux/ftrace.h>
547
#include <linux/cpu.h>
548
#include <linux/irq_work.h>
549
#include <linux/static_key.h>
A
Arun Sharma 已提交
550
#include <linux/atomic.h>
551
#include <asm/local.h>
552

553 554 555 556 557 558 559
#define PERF_MAX_STACK_DEPTH		255

struct perf_callchain_entry {
	__u64				nr;
	__u64				ip[PERF_MAX_STACK_DEPTH];
};

560 561 562
struct perf_raw_record {
	u32				size;
	void				*data;
563 564
};

565 566 567 568 569 570 571 572 573 574 575
/*
 * single taken branch record layout:
 *
 *      from: source instruction (may not always be a branch insn)
 *        to: branch target
 *   mispred: branch target was mispredicted
 * predicted: branch target was predicted
 *
 * support for mispred, predicted is optional. In case it
 * is not supported mispred = predicted = 0.
 */
576
struct perf_branch_entry {
577 578 579 580 581
	__u64	from;
	__u64	to;
	__u64	mispred:1,  /* target mispredicted */
		predicted:1,/* target predicted */
		reserved:62;
582 583
};

584 585 586 587 588 589 590 591 592
/*
 * branch stack layout:
 *  nr: number of taken branches stored in entries[]
 *
 * Note that nr can vary from sample to sample
 * branches (to, from) are stored from most recent
 * to least recent, i.e., entries[0] contains the most
 * recent branch.
 */
593 594 595 596 597
struct perf_branch_stack {
	__u64				nr;
	struct perf_branch_entry	entries[0];
};

598 599
struct task_struct;

600 601 602 603 604 605 606 607 608 609
/*
 * extra PMU register associated with an event
 */
struct hw_perf_event_extra {
	u64		config;	/* register value */
	unsigned int	reg;	/* register address or index */
	int		alloc;	/* extra register already allocated */
	int		idx;	/* index in shared_regs->regs[] */
};

T
Thomas Gleixner 已提交
610
/**
611
 * struct hw_perf_event - performance event hardware details:
T
Thomas Gleixner 已提交
612
 */
613 614
struct hw_perf_event {
#ifdef CONFIG_PERF_EVENTS
615 616
	union {
		struct { /* hardware */
I
Ingo Molnar 已提交
617
			u64		config;
618
			u64		last_tag;
I
Ingo Molnar 已提交
619
			unsigned long	config_base;
620
			unsigned long	event_base;
I
Ingo Molnar 已提交
621
			int		idx;
622
			int		last_cpu;
623

624
			struct hw_perf_event_extra extra_reg;
625
			struct hw_perf_event_extra branch_reg;
626
		};
627
		struct { /* software */
I
Ingo Molnar 已提交
628
			struct hrtimer	hrtimer;
629
		};
630
#ifdef CONFIG_HAVE_HW_BREAKPOINT
631 632 633
		struct { /* breakpoint */
			struct arch_hw_breakpoint	info;
			struct list_head		bp_list;
634 635 636 637 638 639
			/*
			 * Crufty hack to avoid the chicken and egg
			 * problem hw_breakpoint has with context
			 * creation and event initalization.
			 */
			struct task_struct		*bp_target;
640
		};
641
#endif
642
	};
P
Peter Zijlstra 已提交
643
	int				state;
644
	local64_t			prev_count;
645
	u64				sample_period;
646
	u64				last_period;
647
	local64_t			period_left;
648
	u64                             interrupts_seq;
649
	u64				interrupts;
650

651 652
	u64				freq_time_stamp;
	u64				freq_count_stamp;
653
#endif
T
Thomas Gleixner 已提交
654 655
};

P
Peter Zijlstra 已提交
656 657 658 659 660 661 662
/*
 * hw_perf_event::state flags
 */
#define PERF_HES_STOPPED	0x01 /* the counter is stopped */
#define PERF_HES_UPTODATE	0x02 /* event->count up-to-date */
#define PERF_HES_ARCH		0x04

663
struct perf_event;
I
Ingo Molnar 已提交
664

665 666 667 668
/*
 * Common implementation detail of pmu::{start,commit,cancel}_txn
 */
#define PERF_EVENT_TXN 0x1
669

I
Ingo Molnar 已提交
670
/**
671
 * struct pmu - generic performance monitoring unit
I
Ingo Molnar 已提交
672
 */
673
struct pmu {
674 675
	struct list_head		entry;

P
Peter Zijlstra 已提交
676
	struct device			*dev;
677
	const struct attribute_group	**attr_groups;
P
Peter Zijlstra 已提交
678 679 680
	char				*name;
	int				type;

P
Peter Zijlstra 已提交
681 682
	int * __percpu			pmu_disable_count;
	struct perf_cpu_context * __percpu pmu_cpu_context;
P
Peter Zijlstra 已提交
683
	int				task_ctx_nr;
684 685

	/*
P
Peter Zijlstra 已提交
686 687
	 * Fully disable/enable this PMU, can be used to protect from the PMI
	 * as well as for lazy/batch writing of the MSRs.
688
	 */
P
Peter Zijlstra 已提交
689 690
	void (*pmu_enable)		(struct pmu *pmu); /* optional */
	void (*pmu_disable)		(struct pmu *pmu); /* optional */
691

692
	/*
P
Peter Zijlstra 已提交
693
	 * Try and initialize the event for this PMU.
694
	 * Should return -ENOENT when the @event doesn't match this PMU.
695
	 */
696 697
	int (*event_init)		(struct perf_event *event);

P
Peter Zijlstra 已提交
698 699 700 701
#define PERF_EF_START	0x01		/* start the counter when adding    */
#define PERF_EF_RELOAD	0x02		/* reload the counter when starting */
#define PERF_EF_UPDATE	0x04		/* update the counter when stopping */

702
	/*
P
Peter Zijlstra 已提交
703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719
	 * Adds/Removes a counter to/from the PMU, can be done inside
	 * a transaction, see the ->*_txn() methods.
	 */
	int  (*add)			(struct perf_event *event, int flags);
	void (*del)			(struct perf_event *event, int flags);

	/*
	 * Starts/Stops a counter present on the PMU. The PMI handler
	 * should stop the counter when perf_event_overflow() returns
	 * !0. ->start() will be used to continue.
	 */
	void (*start)			(struct perf_event *event, int flags);
	void (*stop)			(struct perf_event *event, int flags);

	/*
	 * Updates the counter value of the event.
	 */
720
	void (*read)			(struct perf_event *event);
721 722

	/*
723 724 725
	 * Group events scheduling is treated as a transaction, add
	 * group events as a whole and perform one schedulability test.
	 * If the test fails, roll back the whole group
P
Peter Zijlstra 已提交
726 727
	 *
	 * Start the transaction, after this ->add() doesn't need to
728
	 * do schedulability tests.
729
	 */
730
	void (*start_txn)		(struct pmu *pmu); /* optional */
731
	/*
P
Peter Zijlstra 已提交
732
	 * If ->start_txn() disabled the ->add() schedulability test
733 734 735 736
	 * then ->commit_txn() is required to perform one. On success
	 * the transaction is closed. On error the transaction is kept
	 * open until ->cancel_txn() is called.
	 */
737
	int  (*commit_txn)		(struct pmu *pmu); /* optional */
738
	/*
P
Peter Zijlstra 已提交
739
	 * Will cancel the transaction, assumes ->del() is called
L
Lucas De Marchi 已提交
740
	 * for each successful ->add() during the transaction.
741
	 */
742
	void (*cancel_txn)		(struct pmu *pmu); /* optional */
743 744 745 746 747 748

	/*
	 * Will return the value for perf_event_mmap_page::index for this event,
	 * if no implementation is provided it will default to: event->hw.idx + 1.
	 */
	int (*event_idx)		(struct perf_event *event); /*optional */
I
Ingo Molnar 已提交
749 750
};

751
/**
752
 * enum perf_event_active_state - the states of a event
753
 */
754
enum perf_event_active_state {
I
Ingo Molnar 已提交
755
	PERF_EVENT_STATE_ERROR		= -2,
756 757
	PERF_EVENT_STATE_OFF		= -1,
	PERF_EVENT_STATE_INACTIVE	=  0,
I
Ingo Molnar 已提交
758
	PERF_EVENT_STATE_ACTIVE		=  1,
759 760
};

761
struct file;
762 763
struct perf_sample_data;

764
typedef void (*perf_overflow_handler_t)(struct perf_event *,
765 766 767
					struct perf_sample_data *,
					struct pt_regs *regs);

768
enum perf_group_flag {
769
	PERF_GROUP_SOFTWARE		= 0x1,
770 771
};

772 773
#define SWEVENT_HLIST_BITS		8
#define SWEVENT_HLIST_SIZE		(1 << SWEVENT_HLIST_BITS)
774 775

struct swevent_hlist {
776 777
	struct hlist_head		heads[SWEVENT_HLIST_SIZE];
	struct rcu_head			rcu_head;
778 779
};

780 781
#define PERF_ATTACH_CONTEXT	0x01
#define PERF_ATTACH_GROUP	0x02
782
#define PERF_ATTACH_TASK	0x04
783

S
Stephane Eranian 已提交
784 785 786 787 788 789
#ifdef CONFIG_CGROUP_PERF
/*
 * perf_cgroup_info keeps track of time_enabled for a cgroup.
 * This is a per-cpu dynamically allocated data structure.
 */
struct perf_cgroup_info {
790 791
	u64				time;
	u64				timestamp;
S
Stephane Eranian 已提交
792 793 794
};

struct perf_cgroup {
795 796
	struct				cgroup_subsys_state css;
	struct				perf_cgroup_info *info;	/* timing info, one per cpu */
S
Stephane Eranian 已提交
797 798 799
};
#endif

800 801
struct ring_buffer;

T
Thomas Gleixner 已提交
802
/**
803
 * struct perf_event - performance event kernel representation:
T
Thomas Gleixner 已提交
804
 */
805 806
struct perf_event {
#ifdef CONFIG_PERF_EVENTS
807
	struct list_head		group_entry;
P
Peter Zijlstra 已提交
808
	struct list_head		event_entry;
809
	struct list_head		sibling_list;
810
	struct hlist_node		hlist_entry;
I
Ingo Molnar 已提交
811
	int				nr_siblings;
812
	int				group_flags;
813
	struct perf_event		*group_leader;
P
Peter Zijlstra 已提交
814
	struct pmu			*pmu;
815

816
	enum perf_event_active_state	state;
817
	unsigned int			attach_state;
818
	local64_t			count;
819
	atomic64_t			child_count;
820

821
	/*
822
	 * These are the total time in nanoseconds that the event
823
	 * has been enabled (i.e. eligible to run, and the task has
824
	 * been scheduled in, if this is a per-task event)
825 826 827
	 * and running (scheduled onto the CPU), respectively.
	 *
	 * They are computed from tstamp_enabled, tstamp_running and
828
	 * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
829 830 831 832 833 834
	 */
	u64				total_time_enabled;
	u64				total_time_running;

	/*
	 * These are timestamps used for computing total_time_enabled
835
	 * and total_time_running when the event is in INACTIVE or
836 837
	 * ACTIVE state, measured in nanoseconds from an arbitrary point
	 * in time.
838 839
	 * tstamp_enabled: the notional time when the event was enabled
	 * tstamp_running: the notional time when the event was scheduled on
840
	 * tstamp_stopped: in INACTIVE state, the notional time when the
841
	 *	event was scheduled off.
842 843 844 845 846
	 */
	u64				tstamp_enabled;
	u64				tstamp_running;
	u64				tstamp_stopped;

847 848 849 850 851 852 853 854 855 856
	/*
	 * timestamp shadows the actual context timing but it can
	 * be safely used in NMI interrupt context. It reflects the
	 * context time as it was when the event was last scheduled in.
	 *
	 * ctx_time already accounts for ctx->timestamp. Therefore to
	 * compute ctx_time for a sample, simply add perf_clock().
	 */
	u64				shadow_ctx_time;

857
	struct perf_event_attr		attr;
858
	u16				header_size;
859
	u16				id_header_size;
860
	u16				read_size;
861
	struct hw_perf_event		hw;
T
Thomas Gleixner 已提交
862

863
	struct perf_event_context	*ctx;
864
	struct file			*filp;
T
Thomas Gleixner 已提交
865

866 867
	/*
	 * These accumulate total time (in nanoseconds) that children
868
	 * events have been enabled and running, respectively.
869 870 871 872
	 */
	atomic64_t			child_total_time_enabled;
	atomic64_t			child_total_time_running;

T
Thomas Gleixner 已提交
873
	/*
874
	 * Protect attach/detach and child_list:
T
Thomas Gleixner 已提交
875
	 */
876 877
	struct mutex			child_mutex;
	struct list_head		child_list;
878
	struct perf_event		*parent;
T
Thomas Gleixner 已提交
879 880 881 882

	int				oncpu;
	int				cpu;

883 884 885
	struct list_head		owner_entry;
	struct task_struct		*owner;

886 887 888
	/* mmap bits */
	struct mutex			mmap_mutex;
	atomic_t			mmap_count;
889 890
	int				mmap_locked;
	struct user_struct		*mmap_user;
891
	struct ring_buffer		*rb;
892
	struct list_head		rb_entry;
893

894
	/* poll related */
T
Thomas Gleixner 已提交
895
	wait_queue_head_t		waitq;
P
Peter Zijlstra 已提交
896
	struct fasync_struct		*fasync;
897 898 899

	/* delayed work for NMIs and such */
	int				pending_wakeup;
900
	int				pending_kill;
901
	int				pending_disable;
902
	struct irq_work			pending;
P
Peter Zijlstra 已提交
903

904 905
	atomic_t			event_limit;

906
	void (*destroy)(struct perf_event *);
P
Peter Zijlstra 已提交
907
	struct rcu_head			rcu_head;
908 909

	struct pid_namespace		*ns;
910
	u64				id;
L
Li Zefan 已提交
911

912
	perf_overflow_handler_t		overflow_handler;
913
	void				*overflow_handler_context;
914

915
#ifdef CONFIG_EVENT_TRACING
916
	struct ftrace_event_call	*tp_event;
L
Li Zefan 已提交
917
	struct event_filter		*filter;
918 919 920
#ifdef CONFIG_FUNCTION_TRACER
	struct ftrace_ops               ftrace_ops;
#endif
921
#endif
L
Li Zefan 已提交
922

S
Stephane Eranian 已提交
923 924 925 926 927
#ifdef CONFIG_CGROUP_PERF
	struct perf_cgroup		*cgrp; /* cgroup event is attach to */
	int				cgrp_defer_enabled;
#endif

L
Li Zefan 已提交
928
#endif /* CONFIG_PERF_EVENTS */
T
Thomas Gleixner 已提交
929 930
};

931 932 933 934 935
enum perf_event_context_type {
	task_context,
	cpu_context,
};

T
Thomas Gleixner 已提交
936
/**
937
 * struct perf_event_context - event context structure
T
Thomas Gleixner 已提交
938
 *
939
 * Used as a container for task events and CPU events as well:
T
Thomas Gleixner 已提交
940
 */
941
struct perf_event_context {
P
Peter Zijlstra 已提交
942
	struct pmu			*pmu;
943
	enum perf_event_context_type	type;
T
Thomas Gleixner 已提交
944
	/*
945
	 * Protect the states of the events in the list,
946
	 * nr_active, and the list:
T
Thomas Gleixner 已提交
947
	 */
948
	raw_spinlock_t			lock;
949
	/*
950
	 * Protect the list of events.  Locking either mutex or lock
951 952 953
	 * is sufficient to ensure the list doesn't change; to change
	 * the list you need to lock both the mutex and the spinlock.
	 */
I
Ingo Molnar 已提交
954
	struct mutex			mutex;
955

956 957
	struct list_head		pinned_groups;
	struct list_head		flexible_groups;
I
Ingo Molnar 已提交
958
	struct list_head		event_list;
959
	int				nr_events;
I
Ingo Molnar 已提交
960 961
	int				nr_active;
	int				is_active;
962
	int				nr_stat;
963
	int				nr_freq;
964
	int				rotate_disable;
I
Ingo Molnar 已提交
965 966
	atomic_t			refcount;
	struct task_struct		*task;
967 968

	/*
969
	 * Context clock, runs when context enabled.
970
	 */
I
Ingo Molnar 已提交
971 972
	u64				time;
	u64				timestamp;
973 974 975 976 977

	/*
	 * These fields let us detect when two contexts have both
	 * been cloned (inherited) from a common ancestor.
	 */
978
	struct perf_event_context	*parent_ctx;
I
Ingo Molnar 已提交
979 980 981
	u64				parent_gen;
	u64				generation;
	int				pin_count;
S
Stephane Eranian 已提交
982
	int				nr_cgroups; /* cgroup events present */
983
	struct rcu_head			rcu_head;
T
Thomas Gleixner 已提交
984 985
};

986 987
/*
 * Number of contexts where an event can trigger:
988
 *	task, softirq, hardirq, nmi.
989 990 991
 */
#define PERF_NR_CONTEXTS	4

T
Thomas Gleixner 已提交
992
/**
993
 * struct perf_event_cpu_context - per cpu event context structure
T
Thomas Gleixner 已提交
994 995
 */
struct perf_cpu_context {
996 997
	struct perf_event_context	ctx;
	struct perf_event_context	*task_ctx;
T
Thomas Gleixner 已提交
998
	int				active_oncpu;
999
	int				exclusive;
1000 1001
	struct list_head		rotation_list;
	int				jiffies_interval;
1002
	struct pmu			*active_pmu;
S
Stephane Eranian 已提交
1003
	struct perf_cgroup		*cgrp;
T
Thomas Gleixner 已提交
1004 1005
};

1006
struct perf_output_handle {
I
Ingo Molnar 已提交
1007
	struct perf_event		*event;
1008
	struct ring_buffer		*rb;
1009
	unsigned long			wakeup;
1010 1011 1012
	unsigned long			size;
	void				*addr;
	int				page;
1013 1014
};

1015
#ifdef CONFIG_PERF_EVENTS
1016

P
Peter Zijlstra 已提交
1017
extern int perf_pmu_register(struct pmu *pmu, char *name, int type);
1018
extern void perf_pmu_unregister(struct pmu *pmu);
I
Ingo Molnar 已提交
1019

1020
extern int perf_num_counters(void);
1021
extern const char *perf_pmu_name(void);
1022 1023 1024 1025
extern void __perf_event_task_sched_in(struct task_struct *prev,
				       struct task_struct *task);
extern void __perf_event_task_sched_out(struct task_struct *prev,
					struct task_struct *next);
1026 1027 1028
extern int perf_event_init_task(struct task_struct *child);
extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task);
1029
extern void perf_event_delayed_put(struct task_struct *task);
1030
extern void perf_event_print_debug(void);
P
Peter Zijlstra 已提交
1031 1032
extern void perf_pmu_disable(struct pmu *pmu);
extern void perf_pmu_enable(struct pmu *pmu);
1033 1034
extern int perf_event_task_disable(void);
extern int perf_event_task_enable(void);
1035
extern int perf_event_refresh(struct perf_event *event, int refresh);
1036
extern void perf_event_update_userpage(struct perf_event *event);
1037 1038 1039 1040
extern int perf_event_release_kernel(struct perf_event *event);
extern struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr,
				int cpu,
M
Matt Helsley 已提交
1041
				struct task_struct *task,
1042 1043
				perf_overflow_handler_t callback,
				void *context);
1044 1045
extern u64 perf_event_read_value(struct perf_event *event,
				 u64 *enabled, u64 *running);
1046

1047
struct perf_sample_data {
1048 1049 1050 1051 1052 1053 1054 1055
	u64				type;

	u64				ip;
	struct {
		u32	pid;
		u32	tid;
	}				tid_entry;
	u64				time;
I
Ingo Molnar 已提交
1056
	u64				addr;
1057 1058 1059 1060 1061 1062
	u64				id;
	u64				stream_id;
	struct {
		u32	cpu;
		u32	reserved;
	}				cpu_entry;
I
Ingo Molnar 已提交
1063
	u64				period;
1064
	struct perf_callchain_entry	*callchain;
1065
	struct perf_raw_record		*raw;
1066
	struct perf_branch_stack	*br_stack;
1067 1068
};

1069
static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
1070 1071 1072
{
	data->addr = addr;
	data->raw  = NULL;
1073
	data->br_stack = NULL;
1074 1075
}

1076 1077 1078
extern void perf_output_sample(struct perf_output_handle *handle,
			       struct perf_event_header *header,
			       struct perf_sample_data *data,
1079
			       struct perf_event *event);
1080 1081
extern void perf_prepare_sample(struct perf_event_header *header,
				struct perf_sample_data *data,
1082
				struct perf_event *event,
1083 1084
				struct pt_regs *regs);

1085
extern int perf_event_overflow(struct perf_event *event,
1086 1087
				 struct perf_sample_data *data,
				 struct pt_regs *regs);
1088

1089 1090 1091 1092 1093
static inline bool is_sampling_event(struct perf_event *event)
{
	return event->attr.sample_period != 0;
}

1094
/*
1095
 * Return 1 for a software event, 0 for a hardware event
1096
 */
1097
static inline int is_software_event(struct perf_event *event)
1098
{
1099
	return event->pmu->task_ctx_nr == perf_sw_context;
1100 1101
}

1102
extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
1103

1104
extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
1105

1106
#ifndef perf_arch_fetch_caller_regs
1107
static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
1108
#endif
1109 1110 1111 1112 1113 1114 1115 1116 1117

/*
 * Take a snapshot of the regs. Skip ip and frame pointer to
 * the nth caller. We only need a few of the regs:
 * - ip for PERF_SAMPLE_IP
 * - cs for user_mode() tests
 * - bp for callchains
 * - eflags, for future purposes, just in case
 */
1118
static inline void perf_fetch_caller_regs(struct pt_regs *regs)
1119 1120 1121
{
	memset(regs, 0, sizeof(*regs));

1122
	perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
1123 1124
}

P
Peter Zijlstra 已提交
1125
static __always_inline void
1126
perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1127
{
P
Peter Zijlstra 已提交
1128 1129
	struct pt_regs hot_regs;

1130
	if (static_key_false(&perf_swevent_enabled[event_id])) {
1131 1132 1133 1134
		if (!regs) {
			perf_fetch_caller_regs(&hot_regs);
			regs = &hot_regs;
		}
1135
		__perf_sw_event(event_id, nr, regs, addr);
1136 1137 1138
	}
}

1139
extern struct static_key_deferred perf_sched_events;
1140

1141 1142
static inline void perf_event_task_sched_in(struct task_struct *prev,
					    struct task_struct *task)
1143
{
1144
	if (static_key_false(&perf_sched_events.key))
1145
		__perf_event_task_sched_in(prev, task);
1146 1147
}

1148 1149
static inline void perf_event_task_sched_out(struct task_struct *prev,
					     struct task_struct *next)
1150
{
1151
	perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
1152

1153
	if (static_key_false(&perf_sched_events.key))
1154
		__perf_event_task_sched_out(prev, next);
1155 1156
}

1157
extern void perf_event_mmap(struct vm_area_struct *vma);
1158
extern struct perf_guest_info_callbacks *perf_guest_cbs;
1159 1160
extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1161

1162 1163
extern void perf_event_comm(struct task_struct *tsk);
extern void perf_event_fork(struct task_struct *tsk);
1164

1165 1166 1167
/* Callchains */
DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);

1168 1169
extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
1170

1171
static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
1172 1173 1174 1175
{
	if (entry->nr < PERF_MAX_STACK_DEPTH)
		entry->ip[entry->nr++] = ip;
}
1176

1177 1178 1179
extern int sysctl_perf_event_paranoid;
extern int sysctl_perf_event_mlock;
extern int sysctl_perf_event_sample_rate;
1180

P
Peter Zijlstra 已提交
1181 1182 1183 1184
extern int perf_proc_update_handler(struct ctl_table *table, int write,
		void __user *buffer, size_t *lenp,
		loff_t *ppos);

1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199
static inline bool perf_paranoid_tracepoint_raw(void)
{
	return sysctl_perf_event_paranoid > -1;
}

static inline bool perf_paranoid_cpu(void)
{
	return sysctl_perf_event_paranoid > 0;
}

static inline bool perf_paranoid_kernel(void)
{
	return sysctl_perf_event_paranoid > 1;
}

1200
extern void perf_event_init(void);
1201 1202
extern void perf_tp_event(u64 addr, u64 count, void *record,
			  int entry_size, struct pt_regs *regs,
1203
			  struct hlist_head *head, int rctx);
1204
extern void perf_bp_event(struct perf_event *event, void *data);
1205

1206
#ifndef perf_misc_flags
1207 1208 1209
# define perf_misc_flags(regs) \
		(user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
# define perf_instruction_pointer(regs)	instruction_pointer(regs)
1210 1211
#endif

1212 1213 1214 1215 1216
static inline bool has_branch_stack(struct perf_event *event)
{
	return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
}

1217
extern int perf_output_begin(struct perf_output_handle *handle,
1218
			     struct perf_event *event, unsigned int size);
1219 1220 1221
extern void perf_output_end(struct perf_output_handle *handle);
extern void perf_output_copy(struct perf_output_handle *handle,
			     const void *buf, unsigned int len);
1222 1223
extern int perf_swevent_get_recursion_context(void);
extern void perf_swevent_put_recursion_context(int rctx);
1224 1225
extern void perf_event_enable(struct perf_event *event);
extern void perf_event_disable(struct perf_event *event);
1226
extern void perf_event_task_tick(void);
T
Thomas Gleixner 已提交
1227 1228
#else
static inline void
1229 1230
perf_event_task_sched_in(struct task_struct *prev,
			 struct task_struct *task)			{ }
T
Thomas Gleixner 已提交
1231
static inline void
1232 1233
perf_event_task_sched_out(struct task_struct *prev,
			  struct task_struct *next)			{ }
1234 1235 1236
static inline int perf_event_init_task(struct task_struct *child)	{ return 0; }
static inline void perf_event_exit_task(struct task_struct *child)	{ }
static inline void perf_event_free_task(struct task_struct *task)	{ }
1237
static inline void perf_event_delayed_put(struct task_struct *task)	{ }
I
Ingo Molnar 已提交
1238 1239 1240
static inline void perf_event_print_debug(void)				{ }
static inline int perf_event_task_disable(void)				{ return -EINVAL; }
static inline int perf_event_task_enable(void)				{ return -EINVAL; }
1241 1242 1243 1244
static inline int perf_event_refresh(struct perf_event *event, int refresh)
{
	return -EINVAL;
}
1245

1246
static inline void
1247
perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)	{ }
1248
static inline void
1249
perf_bp_event(struct perf_event *event, void *data)			{ }
1250

1251
static inline int perf_register_guest_info_callbacks
1252
(struct perf_guest_info_callbacks *callbacks)				{ return 0; }
1253
static inline int perf_unregister_guest_info_callbacks
1254
(struct perf_guest_info_callbacks *callbacks)				{ return 0; }
1255

I
Ingo Molnar 已提交
1256
static inline void perf_event_mmap(struct vm_area_struct *vma)		{ }
1257 1258 1259
static inline void perf_event_comm(struct task_struct *tsk)		{ }
static inline void perf_event_fork(struct task_struct *tsk)		{ }
static inline void perf_event_init(void)				{ }
1260
static inline int  perf_swevent_get_recursion_context(void)		{ return -1; }
1261
static inline void perf_swevent_put_recursion_context(int rctx)		{ }
1262 1263
static inline void perf_event_enable(struct perf_event *event)		{ }
static inline void perf_event_disable(struct perf_event *event)		{ }
1264
static inline void perf_event_task_tick(void)				{ }
T
Thomas Gleixner 已提交
1265 1266
#endif

1267
#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
1268

1269 1270 1271
/*
 * This has to have a higher priority than migration_notifier in sched.c.
 */
1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282
#define perf_cpu_notifier(fn)						\
do {									\
	static struct notifier_block fn##_nb __cpuinitdata =		\
		{ .notifier_call = fn, .priority = CPU_PRI_PERF };	\
	fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE,			\
		(void *)(unsigned long)smp_processor_id());		\
	fn(&fn##_nb, (unsigned long)CPU_STARTING,			\
		(void *)(unsigned long)smp_processor_id());		\
	fn(&fn##_nb, (unsigned long)CPU_ONLINE,				\
		(void *)(unsigned long)smp_processor_id());		\
	register_cpu_notifier(&fn##_nb);				\
1283 1284
} while (0)

1285
#endif /* __KERNEL__ */
1286
#endif /* _LINUX_PERF_EVENT_H */