perf_event.h 38.9 KB
Newer Older
T
Thomas Gleixner 已提交
1
/*
I
Ingo Molnar 已提交
2
 * Performance events:
T
Thomas Gleixner 已提交
3
 *
I
Ingo Molnar 已提交
4
 *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5 6
 *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
 *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
T
Thomas Gleixner 已提交
7
 *
I
Ingo Molnar 已提交
8
 * Data type definitions, declarations, prototypes.
T
Thomas Gleixner 已提交
9
 *
I
Ingo Molnar 已提交
10
 *    Started by: Thomas Gleixner and Ingo Molnar
T
Thomas Gleixner 已提交
11
 *
I
Ingo Molnar 已提交
12
 * For licencing details see kernel-base/COPYING
T
Thomas Gleixner 已提交
13
 */
14 15
#ifndef _LINUX_PERF_EVENT_H
#define _LINUX_PERF_EVENT_H
T
Thomas Gleixner 已提交
16

17
#include <uapi/linux/perf_event.h>
T
Thomas Gleixner 已提交
18

I
Ingo Molnar 已提交
19
/*
20
 * Kernel-internal data types and definitions:
I
Ingo Molnar 已提交
21 22
 */

23 24
#ifdef CONFIG_PERF_EVENTS
# include <asm/perf_event.h>
25
# include <asm/local64.h>
26 27
#endif

28
struct perf_guest_info_callbacks {
29 30 31
	int				(*is_in_guest)(void);
	int				(*is_user_mode)(void);
	unsigned long			(*get_guest_ip)(void);
32 33
};

34 35 36 37
#ifdef CONFIG_HAVE_HW_BREAKPOINT
#include <asm/hw_breakpoint.h>
#endif

38 39 40 41 42
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/spinlock.h>
43
#include <linux/hrtimer.h>
P
Peter Zijlstra 已提交
44
#include <linux/fs.h>
45
#include <linux/pid_namespace.h>
46
#include <linux/workqueue.h>
47
#include <linux/ftrace.h>
48
#include <linux/cpu.h>
49
#include <linux/irq_work.h>
50
#include <linux/static_key.h>
51
#include <linux/jump_label_ratelimit.h>
A
Arun Sharma 已提交
52
#include <linux/atomic.h>
53
#include <linux/sysfs.h>
54
#include <linux/perf_regs.h>
55
#include <linux/workqueue.h>
56
#include <linux/cgroup.h>
57
#include <asm/local.h>
58

59 60
struct perf_callchain_entry {
	__u64				nr;
61
	__u64				ip[0]; /* /proc/sys/kernel/perf_event_max_stack */
62 63
};

64 65 66
struct perf_callchain_entry_ctx {
	struct perf_callchain_entry *entry;
	u32			    max_stack;
67
	u32			    nr;
68 69
	short			    contexts;
	bool			    contexts_maxed;
70 71
};

72
typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
73
				     unsigned long off, unsigned long len);
74 75 76 77 78 79 80 81 82 83 84

struct perf_raw_frag {
	union {
		struct perf_raw_frag	*next;
		unsigned long		pad;
	};
	perf_copy_f			copy;
	void				*data;
	u32				size;
} __packed;

85
struct perf_raw_record {
86
	struct perf_raw_frag		frag;
87
	u32				size;
88 89
};

90 91 92 93 94 95 96 97 98
/*
 * branch stack layout:
 *  nr: number of taken branches stored in entries[]
 *
 * Note that nr can vary from sample to sample
 * branches (to, from) are stored from most recent
 * to least recent, i.e., entries[0] contains the most
 * recent branch.
 */
99 100 101 102 103
struct perf_branch_stack {
	__u64				nr;
	struct perf_branch_entry	entries[0];
};

104 105
struct task_struct;

106 107 108 109 110 111 112 113 114 115
/*
 * extra PMU register associated with an event
 */
struct hw_perf_event_extra {
	u64		config;	/* register value */
	unsigned int	reg;	/* register address or index */
	int		alloc;	/* extra register already allocated */
	int		idx;	/* index in shared_regs->regs[] */
};

T
Thomas Gleixner 已提交
116
/**
117
 * struct hw_perf_event - performance event hardware details:
T
Thomas Gleixner 已提交
118
 */
119 120
struct hw_perf_event {
#ifdef CONFIG_PERF_EVENTS
121 122
	union {
		struct { /* hardware */
I
Ingo Molnar 已提交
123
			u64		config;
124
			u64		last_tag;
I
Ingo Molnar 已提交
125
			unsigned long	config_base;
126
			unsigned long	event_base;
127
			int		event_base_rdpmc;
I
Ingo Molnar 已提交
128
			int		idx;
129
			int		last_cpu;
130
			int		flags;
131

132
			struct hw_perf_event_extra extra_reg;
133
			struct hw_perf_event_extra branch_reg;
134
		};
135
		struct { /* software */
I
Ingo Molnar 已提交
136
			struct hrtimer	hrtimer;
137
		};
138 139 140 141
		struct { /* tracepoint */
			/* for tp_event->class */
			struct list_head	tp_list;
		};
142 143
		struct { /* intel_cqm */
			int			cqm_state;
144
			u32			cqm_rmid;
145
			int			is_group_event;
146 147 148 149
			struct list_head	cqm_events_entry;
			struct list_head	cqm_groups_entry;
			struct list_head	cqm_group_entry;
		};
150 151 152
		struct { /* itrace */
			int			itrace_started;
		};
153 154 155 156
		struct { /* amd_power */
			u64	pwr_acc;
			u64	ptsc;
		};
157
#ifdef CONFIG_HAVE_HW_BREAKPOINT
158
		struct { /* breakpoint */
159 160 161 162 163
			/*
			 * Crufty hack to avoid the chicken and egg
			 * problem hw_breakpoint has with context
			 * creation and event initalization.
			 */
164 165
			struct arch_hw_breakpoint	info;
			struct list_head		bp_list;
166
		};
167
#endif
168
	};
169 170 171 172
	/*
	 * If the event is a per task event, this will point to the task in
	 * question. See the comment in perf_event_alloc().
	 */
173
	struct task_struct		*target;
174

175 176 177 178 179 180 181 182 183
	/*
	 * PMU would store hardware filter configuration
	 * here.
	 */
	void				*addr_filters;

	/* Last sync'ed generation of filters */
	unsigned long			addr_filters_gen;

184 185 186 187 188 189 190
/*
 * hw_perf_event::state flags; used to track the PERF_EF_* state.
 */
#define PERF_HES_STOPPED	0x01 /* the counter is stopped */
#define PERF_HES_UPTODATE	0x02 /* event->count up-to-date */
#define PERF_HES_ARCH		0x04

P
Peter Zijlstra 已提交
191
	int				state;
192 193 194 195 196

	/*
	 * The last observed hardware counter value, updated with a
	 * local64_cmpxchg() such that pmu::read() can be called nested.
	 */
197
	local64_t			prev_count;
198 199 200 201

	/*
	 * The period to start the next sample with.
	 */
202
	u64				sample_period;
203 204 205 206

	/*
	 * The period we started this sample with.
	 */
207
	u64				last_period;
208 209 210 211 212 213

	/*
	 * However much is left of the current period; note that this is
	 * a full 64bit value and allows for generation of periods longer
	 * than hardware might allow.
	 */
214
	local64_t			period_left;
215 216 217 218 219

	/*
	 * State for throttling the event, see __perf_event_overflow() and
	 * perf_adjust_freq_unthr_context().
	 */
220
	u64                             interrupts_seq;
221
	u64				interrupts;
222

223 224 225 226
	/*
	 * State for freq target events, see __perf_event_overflow() and
	 * perf_adjust_freq_unthr_context().
	 */
227 228
	u64				freq_time_stamp;
	u64				freq_count_stamp;
229
#endif
T
Thomas Gleixner 已提交
230 231
};

232
struct perf_event;
I
Ingo Molnar 已提交
233

234 235 236
/*
 * Common implementation detail of pmu::{start,commit,cancel}_txn
 */
237
#define PERF_PMU_TXN_ADD  0x1		/* txn to add/schedule event on PMU */
238
#define PERF_PMU_TXN_READ 0x2		/* txn to read event group from PMU */
239

240 241 242 243
/**
 * pmu::capabilities flags
 */
#define PERF_PMU_CAP_NO_INTERRUPT		0x01
244
#define PERF_PMU_CAP_NO_NMI			0x02
245
#define PERF_PMU_CAP_AUX_NO_SG			0x04
246
#define PERF_PMU_CAP_AUX_SW_DOUBLEBUF		0x08
247
#define PERF_PMU_CAP_EXCLUSIVE			0x10
248
#define PERF_PMU_CAP_ITRACE			0x20
249
#define PERF_PMU_CAP_HETEROGENEOUS_CPUS		0x40
250

I
Ingo Molnar 已提交
251
/**
252
 * struct pmu - generic performance monitoring unit
I
Ingo Molnar 已提交
253
 */
254
struct pmu {
255 256
	struct list_head		entry;

257
	struct module			*module;
P
Peter Zijlstra 已提交
258
	struct device			*dev;
259
	const struct attribute_group	**attr_groups;
260
	const char			*name;
P
Peter Zijlstra 已提交
261 262
	int				type;

263 264 265 266 267
	/*
	 * various common per-pmu feature flags
	 */
	int				capabilities;

P
Peter Zijlstra 已提交
268 269
	int * __percpu			pmu_disable_count;
	struct perf_cpu_context * __percpu pmu_cpu_context;
270
	atomic_t			exclusive_cnt; /* < 0: cpu; > 0: tsk */
P
Peter Zijlstra 已提交
271
	int				task_ctx_nr;
272
	int				hrtimer_interval_ms;
273

274 275 276
	/* number of address filters this PMU can do */
	unsigned int			nr_addr_filters;

277
	/*
P
Peter Zijlstra 已提交
278 279
	 * Fully disable/enable this PMU, can be used to protect from the PMI
	 * as well as for lazy/batch writing of the MSRs.
280
	 */
P
Peter Zijlstra 已提交
281 282
	void (*pmu_enable)		(struct pmu *pmu); /* optional */
	void (*pmu_disable)		(struct pmu *pmu); /* optional */
283

284
	/*
P
Peter Zijlstra 已提交
285
	 * Try and initialize the event for this PMU.
286 287 288 289 290 291 292 293 294 295 296 297 298
	 *
	 * Returns:
	 *  -ENOENT	-- @event is not for this PMU
	 *
	 *  -ENODEV	-- @event is for this PMU but PMU not present
	 *  -EBUSY	-- @event is for this PMU but PMU temporarily unavailable
	 *  -EINVAL	-- @event is for this PMU but @event is not valid
	 *  -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
	 *  -EACCESS	-- @event is for this PMU, @event is valid, but no privilidges
	 *
	 *  0		-- @event is for this PMU and valid
	 *
	 * Other error return values are allowed.
299
	 */
300 301
	int (*event_init)		(struct perf_event *event);

302 303 304 305 306 307 308
	/*
	 * Notification that the event was mapped or unmapped.  Called
	 * in the context of the mapping task.
	 */
	void (*event_mapped)		(struct perf_event *event); /*optional*/
	void (*event_unmapped)		(struct perf_event *event); /*optional*/

309 310 311 312
	/*
	 * Flags for ->add()/->del()/ ->start()/->stop(). There are
	 * matching hw_perf_event::state flags.
	 */
P
Peter Zijlstra 已提交
313 314 315 316
#define PERF_EF_START	0x01		/* start the counter when adding    */
#define PERF_EF_RELOAD	0x02		/* reload the counter when starting */
#define PERF_EF_UPDATE	0x04		/* update the counter when stopping */

317
	/*
318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
	 * Adds/Removes a counter to/from the PMU, can be done inside a
	 * transaction, see the ->*_txn() methods.
	 *
	 * The add/del callbacks will reserve all hardware resources required
	 * to service the event, this includes any counter constraint
	 * scheduling etc.
	 *
	 * Called with IRQs disabled and the PMU disabled on the CPU the event
	 * is on.
	 *
	 * ->add() called without PERF_EF_START should result in the same state
	 *  as ->add() followed by ->stop().
	 *
	 * ->del() must always PERF_EF_UPDATE stop an event. If it calls
	 *  ->stop() that must deal with already being stopped without
	 *  PERF_EF_UPDATE.
P
Peter Zijlstra 已提交
334 335 336 337 338
	 */
	int  (*add)			(struct perf_event *event, int flags);
	void (*del)			(struct perf_event *event, int flags);

	/*
339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
	 * Starts/Stops a counter present on the PMU.
	 *
	 * The PMI handler should stop the counter when perf_event_overflow()
	 * returns !0. ->start() will be used to continue.
	 *
	 * Also used to change the sample period.
	 *
	 * Called with IRQs disabled and the PMU disabled on the CPU the event
	 * is on -- will be called from NMI context with the PMU generates
	 * NMIs.
	 *
	 * ->stop() with PERF_EF_UPDATE will read the counter and update
	 *  period/count values like ->read() would.
	 *
	 * ->start() with PERF_EF_RELOAD will reprogram the the counter
	 *  value, must be preceded by a ->stop() with PERF_EF_UPDATE.
P
Peter Zijlstra 已提交
355 356 357 358 359 360
	 */
	void (*start)			(struct perf_event *event, int flags);
	void (*stop)			(struct perf_event *event, int flags);

	/*
	 * Updates the counter value of the event.
361 362 363
	 *
	 * For sampling capable PMUs this will also update the software period
	 * hw_perf_event::period_left field.
P
Peter Zijlstra 已提交
364
	 */
365
	void (*read)			(struct perf_event *event);
366 367

	/*
368 369 370
	 * Group events scheduling is treated as a transaction, add
	 * group events as a whole and perform one schedulability test.
	 * If the test fails, roll back the whole group
P
Peter Zijlstra 已提交
371 372
	 *
	 * Start the transaction, after this ->add() doesn't need to
373
	 * do schedulability tests.
374 375
	 *
	 * Optional.
376
	 */
377
	void (*start_txn)		(struct pmu *pmu, unsigned int txn_flags);
378
	/*
P
Peter Zijlstra 已提交
379
	 * If ->start_txn() disabled the ->add() schedulability test
380 381 382
	 * then ->commit_txn() is required to perform one. On success
	 * the transaction is closed. On error the transaction is kept
	 * open until ->cancel_txn() is called.
383 384
	 *
	 * Optional.
385
	 */
386
	int  (*commit_txn)		(struct pmu *pmu);
387
	/*
P
Peter Zijlstra 已提交
388
	 * Will cancel the transaction, assumes ->del() is called
L
Lucas De Marchi 已提交
389
	 * for each successful ->add() during the transaction.
390 391
	 *
	 * Optional.
392
	 */
393
	void (*cancel_txn)		(struct pmu *pmu);
394 395 396 397 398 399

	/*
	 * Will return the value for perf_event_mmap_page::index for this event,
	 * if no implementation is provided it will default to: event->hw.idx + 1.
	 */
	int (*event_idx)		(struct perf_event *event); /*optional */
400

401 402 403 404 405
	/*
	 * context-switches callback
	 */
	void (*sched_task)		(struct perf_event_context *ctx,
					bool sched_in);
406 407 408 409
	/*
	 * PMU specific data size
	 */
	size_t				task_ctx_size;
410

411 412 413 414 415

	/*
	 * Return the count value for a counter.
	 */
	u64 (*count)			(struct perf_event *event); /*optional*/
416 417 418 419 420 421 422 423 424 425 426 427

	/*
	 * Set up pmu-private data structures for an AUX area
	 */
	void *(*setup_aux)		(int cpu, void **pages,
					 int nr_pages, bool overwrite);
					/* optional */

	/*
	 * Free pmu-private AUX data structures
	 */
	void (*free_aux)		(void *aux); /* optional */
428

429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
	/*
	 * Validate address range filters: make sure the HW supports the
	 * requested configuration and number of filters; return 0 if the
	 * supplied filters are valid, -errno otherwise.
	 *
	 * Runs in the context of the ioctl()ing process and is not serialized
	 * with the rest of the PMU callbacks.
	 */
	int (*addr_filters_validate)	(struct list_head *filters);
					/* optional */

	/*
	 * Synchronize address range filter configuration:
	 * translate hw-agnostic filters into hardware configuration in
	 * event::hw::addr_filters.
	 *
	 * Runs as a part of filter sync sequence that is done in ->start()
	 * callback by calling perf_event_addr_filters_sync().
	 *
	 * May (and should) traverse event::addr_filters::list, for which its
	 * caller provides necessary serialization.
	 */
	void (*addr_filters_sync)	(struct perf_event *event);
					/* optional */

454 455 456 457
	/*
	 * Filter events for PMU-specific reasons.
	 */
	int (*filter_match)		(struct perf_event *event); /* optional */
I
Ingo Molnar 已提交
458 459
};

460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
/**
 * struct perf_addr_filter - address range filter definition
 * @entry:	event's filter list linkage
 * @inode:	object file's inode for file-based filters
 * @offset:	filter range offset
 * @size:	filter range size
 * @range:	1: range, 0: address
 * @filter:	1: filter/start, 0: stop
 *
 * This is a hardware-agnostic filter configuration as specified by the user.
 */
struct perf_addr_filter {
	struct list_head	entry;
	struct inode		*inode;
	unsigned long		offset;
	unsigned long		size;
	unsigned int		range	: 1,
				filter	: 1;
};

/**
 * struct perf_addr_filters_head - container for address range filters
 * @list:	list of filters for this event
 * @lock:	spinlock that serializes accesses to the @list and event's
 *		(and its children's) filter generations.
485
 * @nr_file_filters:	number of file-based filters
486 487 488 489 490 491 492
 *
 * A child event will use parent's @list (and therefore @lock), so they are
 * bundled together; see perf_event_addr_filters().
 */
struct perf_addr_filters_head {
	struct list_head	list;
	raw_spinlock_t		lock;
493
	unsigned int		nr_file_filters;
494 495
};

496
/**
497
 * enum perf_event_active_state - the states of a event
498
 */
499
enum perf_event_active_state {
P
Peter Zijlstra 已提交
500
	PERF_EVENT_STATE_DEAD		= -4,
501
	PERF_EVENT_STATE_EXIT		= -3,
I
Ingo Molnar 已提交
502
	PERF_EVENT_STATE_ERROR		= -2,
503 504
	PERF_EVENT_STATE_OFF		= -1,
	PERF_EVENT_STATE_INACTIVE	=  0,
I
Ingo Molnar 已提交
505
	PERF_EVENT_STATE_ACTIVE		=  1,
506 507
};

508
struct file;
509 510
struct perf_sample_data;

511
typedef void (*perf_overflow_handler_t)(struct perf_event *,
512 513 514
					struct perf_sample_data *,
					struct pt_regs *regs);

515 516 517 518
/*
 * Event capabilities. For event_caps and groups caps.
 *
 * PERF_EV_CAP_SOFTWARE: Is a software event.
519 520
 * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read
 * from any CPU in the package where it is active.
521 522
 */
#define PERF_EV_CAP_SOFTWARE		BIT(0)
523
#define PERF_EV_CAP_READ_ACTIVE_PKG	BIT(1)
524

525 526
#define SWEVENT_HLIST_BITS		8
#define SWEVENT_HLIST_SIZE		(1 << SWEVENT_HLIST_BITS)
527 528

struct swevent_hlist {
529 530
	struct hlist_head		heads[SWEVENT_HLIST_SIZE];
	struct rcu_head			rcu_head;
531 532
};

533 534
#define PERF_ATTACH_CONTEXT	0x01
#define PERF_ATTACH_GROUP	0x02
535
#define PERF_ATTACH_TASK	0x04
536
#define PERF_ATTACH_TASK_DATA	0x08
537

538
struct perf_cgroup;
539 540
struct ring_buffer;

541 542 543 544 545
struct pmu_event_list {
	raw_spinlock_t		lock;
	struct list_head	list;
};

T
Thomas Gleixner 已提交
546
/**
547
 * struct perf_event - performance event kernel representation:
T
Thomas Gleixner 已提交
548
 */
549 550
struct perf_event {
#ifdef CONFIG_PERF_EVENTS
551 552 553 554 555
	/*
	 * entry onto perf_event_context::event_list;
	 *   modifications require ctx->lock
	 *   RCU safe iterations.
	 */
P
Peter Zijlstra 已提交
556
	struct list_head		event_entry;
557 558 559 560 561 562 563 564 565 566

	/*
	 * XXX: group_entry and sibling_list should be mutually exclusive;
	 * either you're a sibling on a group, or you're the group leader.
	 * Rework the code to always use the same list element.
	 *
	 * Locked for modification by both ctx->mutex and ctx->lock; holding
	 * either sufficies for read.
	 */
	struct list_head		group_entry;
567
	struct list_head		sibling_list;
568 569 570 571 572 573 574 575

	/*
	 * We need storage to track the entries in perf_pmu_migrate_context; we
	 * cannot use the event_entry because of RCU and we want to keep the
	 * group in tact which avoids us using the other two entries.
	 */
	struct list_head		migrate_entry;

576 577
	struct hlist_node		hlist_entry;
	struct list_head		active_entry;
I
Ingo Molnar 已提交
578
	int				nr_siblings;
579 580 581 582 583 584

	/* Not serialized. Only written during event initialization. */
	int				event_caps;
	/* The cumulative AND of all event_caps for events in this group. */
	int				group_caps;

585
	struct perf_event		*group_leader;
P
Peter Zijlstra 已提交
586
	struct pmu			*pmu;
587
	void				*pmu_private;
588

589
	enum perf_event_active_state	state;
590
	unsigned int			attach_state;
591
	local64_t			count;
592
	atomic64_t			child_count;
593

594
	/*
595
	 * These are the total time in nanoseconds that the event
596
	 * has been enabled (i.e. eligible to run, and the task has
597
	 * been scheduled in, if this is a per-task event)
598 599 600
	 * and running (scheduled onto the CPU), respectively.
	 *
	 * They are computed from tstamp_enabled, tstamp_running and
601
	 * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
602 603 604 605 606 607
	 */
	u64				total_time_enabled;
	u64				total_time_running;

	/*
	 * These are timestamps used for computing total_time_enabled
608
	 * and total_time_running when the event is in INACTIVE or
609 610
	 * ACTIVE state, measured in nanoseconds from an arbitrary point
	 * in time.
611 612
	 * tstamp_enabled: the notional time when the event was enabled
	 * tstamp_running: the notional time when the event was scheduled on
613
	 * tstamp_stopped: in INACTIVE state, the notional time when the
614
	 *	event was scheduled off.
615 616 617 618 619
	 */
	u64				tstamp_enabled;
	u64				tstamp_running;
	u64				tstamp_stopped;

620 621 622 623 624 625 626 627 628 629
	/*
	 * timestamp shadows the actual context timing but it can
	 * be safely used in NMI interrupt context. It reflects the
	 * context time as it was when the event was last scheduled in.
	 *
	 * ctx_time already accounts for ctx->timestamp. Therefore to
	 * compute ctx_time for a sample, simply add perf_clock().
	 */
	u64				shadow_ctx_time;

630
	struct perf_event_attr		attr;
631
	u16				header_size;
632
	u16				id_header_size;
633
	u16				read_size;
634
	struct hw_perf_event		hw;
T
Thomas Gleixner 已提交
635

636
	struct perf_event_context	*ctx;
637
	atomic_long_t			refcount;
T
Thomas Gleixner 已提交
638

639 640
	/*
	 * These accumulate total time (in nanoseconds) that children
641
	 * events have been enabled and running, respectively.
642 643 644 645
	 */
	atomic64_t			child_total_time_enabled;
	atomic64_t			child_total_time_running;

T
Thomas Gleixner 已提交
646
	/*
647
	 * Protect attach/detach and child_list:
T
Thomas Gleixner 已提交
648
	 */
649 650
	struct mutex			child_mutex;
	struct list_head		child_list;
651
	struct perf_event		*parent;
T
Thomas Gleixner 已提交
652 653 654 655

	int				oncpu;
	int				cpu;

656 657 658
	struct list_head		owner_entry;
	struct task_struct		*owner;

659 660 661
	/* mmap bits */
	struct mutex			mmap_mutex;
	atomic_t			mmap_count;
P
Peter Zijlstra 已提交
662

663
	struct ring_buffer		*rb;
664
	struct list_head		rb_entry;
665 666
	unsigned long			rcu_batches;
	int				rcu_pending;
667

668
	/* poll related */
T
Thomas Gleixner 已提交
669
	wait_queue_head_t		waitq;
P
Peter Zijlstra 已提交
670
	struct fasync_struct		*fasync;
671 672 673

	/* delayed work for NMIs and such */
	int				pending_wakeup;
674
	int				pending_kill;
675
	int				pending_disable;
676
	struct irq_work			pending;
P
Peter Zijlstra 已提交
677

678 679
	atomic_t			event_limit;

680 681 682 683 684 685
	/* address range filters */
	struct perf_addr_filters_head	addr_filters;
	/* vma address array for file-based filders */
	unsigned long			*addr_filters_offs;
	unsigned long			addr_filters_gen;

686
	void (*destroy)(struct perf_event *);
P
Peter Zijlstra 已提交
687
	struct rcu_head			rcu_head;
688 689

	struct pid_namespace		*ns;
690
	u64				id;
L
Li Zefan 已提交
691

692
	u64				(*clock)(void);
693
	perf_overflow_handler_t		overflow_handler;
694
	void				*overflow_handler_context;
695 696 697 698
#ifdef CONFIG_BPF_SYSCALL
	perf_overflow_handler_t		orig_overflow_handler;
	struct bpf_prog			*prog;
#endif
699

700
#ifdef CONFIG_EVENT_TRACING
701
	struct trace_event_call		*tp_event;
L
Li Zefan 已提交
702
	struct event_filter		*filter;
703 704 705
#ifdef CONFIG_FUNCTION_TRACER
	struct ftrace_ops               ftrace_ops;
#endif
706
#endif
L
Li Zefan 已提交
707

S
Stephane Eranian 已提交
708 709 710 711 712
#ifdef CONFIG_CGROUP_PERF
	struct perf_cgroup		*cgrp; /* cgroup event is attach to */
	int				cgrp_defer_enabled;
#endif

713
	struct list_head		sb_list;
L
Li Zefan 已提交
714
#endif /* CONFIG_PERF_EVENTS */
T
Thomas Gleixner 已提交
715 716 717
};

/**
718
 * struct perf_event_context - event context structure
T
Thomas Gleixner 已提交
719
 *
720
 * Used as a container for task events and CPU events as well:
T
Thomas Gleixner 已提交
721
 */
722
struct perf_event_context {
P
Peter Zijlstra 已提交
723
	struct pmu			*pmu;
T
Thomas Gleixner 已提交
724
	/*
725
	 * Protect the states of the events in the list,
726
	 * nr_active, and the list:
T
Thomas Gleixner 已提交
727
	 */
728
	raw_spinlock_t			lock;
729
	/*
730
	 * Protect the list of events.  Locking either mutex or lock
731 732 733
	 * is sufficient to ensure the list doesn't change; to change
	 * the list you need to lock both the mutex and the spinlock.
	 */
I
Ingo Molnar 已提交
734
	struct mutex			mutex;
735

736
	struct list_head		active_ctx_list;
737 738
	struct list_head		pinned_groups;
	struct list_head		flexible_groups;
I
Ingo Molnar 已提交
739
	struct list_head		event_list;
740
	int				nr_events;
I
Ingo Molnar 已提交
741 742
	int				nr_active;
	int				is_active;
743
	int				nr_stat;
744
	int				nr_freq;
745
	int				rotate_disable;
I
Ingo Molnar 已提交
746 747
	atomic_t			refcount;
	struct task_struct		*task;
748 749

	/*
750
	 * Context clock, runs when context enabled.
751
	 */
I
Ingo Molnar 已提交
752 753
	u64				time;
	u64				timestamp;
754 755 756 757 758

	/*
	 * These fields let us detect when two contexts have both
	 * been cloned (inherited) from a common ancestor.
	 */
759
	struct perf_event_context	*parent_ctx;
I
Ingo Molnar 已提交
760 761 762
	u64				parent_gen;
	u64				generation;
	int				pin_count;
763
#ifdef CONFIG_CGROUP_PERF
764
	int				nr_cgroups;	 /* cgroup evts */
765
#endif
766
	void				*task_ctx_data; /* pmu specific data */
767
	struct rcu_head			rcu_head;
T
Thomas Gleixner 已提交
768 769
};

770 771
/*
 * Number of contexts where an event can trigger:
772
 *	task, softirq, hardirq, nmi.
773 774 775
 */
#define PERF_NR_CONTEXTS	4

T
Thomas Gleixner 已提交
776
/**
777
 * struct perf_event_cpu_context - per cpu event context structure
T
Thomas Gleixner 已提交
778 779
 */
struct perf_cpu_context {
780 781
	struct perf_event_context	ctx;
	struct perf_event_context	*task_ctx;
T
Thomas Gleixner 已提交
782
	int				active_oncpu;
783
	int				exclusive;
P
Peter Zijlstra 已提交
784 785

	raw_spinlock_t			hrtimer_lock;
786 787
	struct hrtimer			hrtimer;
	ktime_t				hrtimer_interval;
P
Peter Zijlstra 已提交
788 789
	unsigned int			hrtimer_active;

790
#ifdef CONFIG_CGROUP_PERF
S
Stephane Eranian 已提交
791
	struct perf_cgroup		*cgrp;
792
	struct list_head		cgrp_cpuctx_entry;
793
#endif
794 795 796

	struct list_head		sched_cb_entry;
	int				sched_cb_usage;
T
Thomas Gleixner 已提交
797 798
};

799
struct perf_output_handle {
I
Ingo Molnar 已提交
800
	struct perf_event		*event;
801
	struct ring_buffer		*rb;
802
	unsigned long			wakeup;
803
	unsigned long			size;
804 805 806 807
	union {
		void			*addr;
		unsigned long		head;
	};
808
	int				page;
809 810
};

811 812 813 814 815
struct bpf_perf_event_data_kern {
	struct pt_regs *regs;
	struct perf_sample_data *data;
};

816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837
#ifdef CONFIG_CGROUP_PERF

/*
 * perf_cgroup_info keeps track of time_enabled for a cgroup.
 * This is a per-cpu dynamically allocated data structure.
 */
struct perf_cgroup_info {
	u64				time;
	u64				timestamp;
};

struct perf_cgroup {
	struct cgroup_subsys_state	css;
	struct perf_cgroup_info	__percpu *info;
};

/*
 * Must ensure cgroup is pinned (css_get) before calling
 * this function. In other words, we cannot call this function
 * if there is no cgroup event for the current CPU context.
 */
static inline struct perf_cgroup *
838
perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
839
{
840 841 842
	return container_of(task_css_check(task, perf_event_cgrp_id,
					   ctx ? lockdep_is_held(&ctx->lock)
					       : true),
843 844 845 846
			    struct perf_cgroup, css);
}
#endif /* CONFIG_CGROUP_PERF */

847
#ifdef CONFIG_PERF_EVENTS
848

849 850 851 852 853 854 855 856
extern void *perf_aux_output_begin(struct perf_output_handle *handle,
				   struct perf_event *event);
extern void perf_aux_output_end(struct perf_output_handle *handle,
				unsigned long size, bool truncated);
extern int perf_aux_output_skip(struct perf_output_handle *handle,
				unsigned long size);
extern void *perf_get_aux(struct perf_output_handle *handle);

857
extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
858
extern void perf_pmu_unregister(struct pmu *pmu);
I
Ingo Molnar 已提交
859

860
extern int perf_num_counters(void);
861
extern const char *perf_pmu_name(void);
862 863 864 865
extern void __perf_event_task_sched_in(struct task_struct *prev,
				       struct task_struct *task);
extern void __perf_event_task_sched_out(struct task_struct *prev,
					struct task_struct *next);
866 867 868
extern int perf_event_init_task(struct task_struct *child);
extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task);
869
extern void perf_event_delayed_put(struct task_struct *task);
870
extern struct file *perf_event_get(unsigned int fd);
871
extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
872
extern void perf_event_print_debug(void);
P
Peter Zijlstra 已提交
873 874
extern void perf_pmu_disable(struct pmu *pmu);
extern void perf_pmu_enable(struct pmu *pmu);
875 876
extern void perf_sched_cb_dec(struct pmu *pmu);
extern void perf_sched_cb_inc(struct pmu *pmu);
877 878
extern int perf_event_task_disable(void);
extern int perf_event_task_enable(void);
879
extern int perf_event_refresh(struct perf_event *event, int refresh);
880
extern void perf_event_update_userpage(struct perf_event *event);
881 882 883 884
extern int perf_event_release_kernel(struct perf_event *event);
extern struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr,
				int cpu,
M
Matt Helsley 已提交
885
				struct task_struct *task,
886 887
				perf_overflow_handler_t callback,
				void *context);
888 889
extern void perf_pmu_migrate_context(struct pmu *pmu,
				int src_cpu, int dst_cpu);
890
extern u64 perf_event_read_local(struct perf_event *event);
891 892
extern u64 perf_event_read_value(struct perf_event *event,
				 u64 *enabled, u64 *running);
893

894

895
struct perf_sample_data {
896 897 898 899 900 901 902 903 904 905 906
	/*
	 * Fields set by perf_sample_data_init(), group so as to
	 * minimize the cachelines touched.
	 */
	u64				addr;
	struct perf_raw_record		*raw;
	struct perf_branch_stack	*br_stack;
	u64				period;
	u64				weight;
	u64				txn;
	union  perf_mem_data_src	data_src;
907

908 909 910 911 912
	/*
	 * The other fields, optionally {set,used} by
	 * perf_{prepare,output}_sample().
	 */
	u64				type;
913 914 915 916 917 918 919 920 921 922 923 924 925
	u64				ip;
	struct {
		u32	pid;
		u32	tid;
	}				tid_entry;
	u64				time;
	u64				id;
	u64				stream_id;
	struct {
		u32	cpu;
		u32	reserved;
	}				cpu_entry;
	struct perf_callchain_entry	*callchain;
926 927 928 929 930

	/*
	 * regs_user may point to task_pt_regs or to regs_user_copy, depending
	 * on arch details.
	 */
931
	struct perf_regs		regs_user;
932 933
	struct pt_regs			regs_user_copy;

934
	struct perf_regs		regs_intr;
935
	u64				stack_user_size;
936
} ____cacheline_aligned;
937

938 939 940 941 942 943 944
/* default value for data source */
#define PERF_MEM_NA (PERF_MEM_S(OP, NA)   |\
		    PERF_MEM_S(LVL, NA)   |\
		    PERF_MEM_S(SNOOP, NA) |\
		    PERF_MEM_S(LOCK, NA)  |\
		    PERF_MEM_S(TLB, NA))

945 946
static inline void perf_sample_data_init(struct perf_sample_data *data,
					 u64 addr, u64 period)
947
{
948
	/* remaining struct members initialized in perf_prepare_sample() */
949 950
	data->addr = addr;
	data->raw  = NULL;
951
	data->br_stack = NULL;
952
	data->period = period;
A
Andi Kleen 已提交
953
	data->weight = 0;
954
	data->data_src.val = PERF_MEM_NA;
A
Andi Kleen 已提交
955
	data->txn = 0;
956 957
}

958 959 960
extern void perf_output_sample(struct perf_output_handle *handle,
			       struct perf_event_header *header,
			       struct perf_sample_data *data,
961
			       struct perf_event *event);
962 963
extern void perf_prepare_sample(struct perf_event_header *header,
				struct perf_sample_data *data,
964
				struct perf_event *event,
965 966
				struct pt_regs *regs);

967
extern int perf_event_overflow(struct perf_event *event,
968 969
				 struct perf_sample_data *data,
				 struct pt_regs *regs);
970

971 972 973 974 975 976
extern void perf_event_output_forward(struct perf_event *event,
				     struct perf_sample_data *data,
				     struct pt_regs *regs);
extern void perf_event_output_backward(struct perf_event *event,
				       struct perf_sample_data *data,
				       struct pt_regs *regs);
977
extern void perf_event_output(struct perf_event *event,
978 979
			      struct perf_sample_data *data,
			      struct pt_regs *regs);
980

981 982 983
static inline bool
is_default_overflow_handler(struct perf_event *event)
{
984 985 986 987 988
	if (likely(event->overflow_handler == perf_event_output_forward))
		return true;
	if (unlikely(event->overflow_handler == perf_event_output_backward))
		return true;
	return false;
989 990
}

991 992 993 994 995 996 997 998 999
extern void
perf_event_header__init_id(struct perf_event_header *header,
			   struct perf_sample_data *data,
			   struct perf_event *event);
extern void
perf_event__output_id_sample(struct perf_event *event,
			     struct perf_output_handle *handle,
			     struct perf_sample_data *sample);

1000 1001 1002
extern void
perf_log_lost_samples(struct perf_event *event, u64 lost);

1003 1004 1005 1006 1007
static inline bool is_sampling_event(struct perf_event *event)
{
	return event->attr.sample_period != 0;
}

1008
/*
1009
 * Return 1 for a software event, 0 for a hardware event
1010
 */
1011
static inline int is_software_event(struct perf_event *event)
1012
{
1013
	return event->event_caps & PERF_EV_CAP_SOFTWARE;
1014 1015
}

1016
extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
1017

1018
extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
1019
extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
1020

1021
#ifndef perf_arch_fetch_caller_regs
1022
static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
1023
#endif
1024 1025 1026 1027 1028 1029 1030 1031 1032

/*
 * Take a snapshot of the regs. Skip ip and frame pointer to
 * the nth caller. We only need a few of the regs:
 * - ip for PERF_SAMPLE_IP
 * - cs for user_mode() tests
 * - bp for callchains
 * - eflags, for future purposes, just in case
 */
1033
static inline void perf_fetch_caller_regs(struct pt_regs *regs)
1034
{
1035
	perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
1036 1037
}

P
Peter Zijlstra 已提交
1038
static __always_inline void
1039
perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1040
{
1041 1042 1043 1044 1045
	if (static_key_false(&perf_swevent_enabled[event_id]))
		__perf_sw_event(event_id, nr, regs, addr);
}

DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
P
Peter Zijlstra 已提交
1046

1047 1048 1049 1050 1051 1052 1053 1054
/*
 * 'Special' version for the scheduler, it hard assumes no recursion,
 * which is guaranteed by us not actually scheduling inside other swevents
 * because those disable preemption.
 */
static __always_inline void
perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
{
1055
	if (static_key_false(&perf_swevent_enabled[event_id])) {
1056 1057 1058 1059
		struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);

		perf_fetch_caller_regs(regs);
		___perf_sw_event(event_id, nr, regs, addr);
1060 1061 1062
	}
}

1063
extern struct static_key_false perf_sched_events;
1064

1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078
static __always_inline bool
perf_sw_migrate_enabled(void)
{
	if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS]))
		return true;
	return false;
}

static inline void perf_event_task_migrate(struct task_struct *task)
{
	if (perf_sw_migrate_enabled())
		task->sched_migrated = 1;
}

1079
static inline void perf_event_task_sched_in(struct task_struct *prev,
1080
					    struct task_struct *task)
1081
{
1082
	if (static_branch_unlikely(&perf_sched_events))
1083
		__perf_event_task_sched_in(prev, task);
1084 1085 1086 1087 1088 1089 1090 1091

	if (perf_sw_migrate_enabled() && task->sched_migrated) {
		struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);

		perf_fetch_caller_regs(regs);
		___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0);
		task->sched_migrated = 0;
	}
1092 1093 1094 1095
}

static inline void perf_event_task_sched_out(struct task_struct *prev,
					     struct task_struct *next)
1096
{
1097
	perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
1098

1099
	if (static_branch_unlikely(&perf_sched_events))
1100
		__perf_event_task_sched_out(prev, next);
1101 1102
}

1103 1104 1105 1106 1107
static inline u64 __perf_event_count(struct perf_event *event)
{
	return local64_read(&event->count) + atomic64_read(&event->child_count);
}

1108
extern void perf_event_mmap(struct vm_area_struct *vma);
1109
extern struct perf_guest_info_callbacks *perf_guest_cbs;
1110 1111
extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1112

1113
extern void perf_event_exec(void);
1114
extern void perf_event_comm(struct task_struct *tsk, bool exec);
1115
extern void perf_event_fork(struct task_struct *tsk);
1116

1117 1118 1119
/* Callchains */
DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);

1120 1121
extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
1122 1123
extern struct perf_callchain_entry *
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
1124
		   u32 max_stack, bool crosstask, bool add_mark);
1125
extern int get_callchain_buffers(int max_stack);
1126
extern void put_callchain_buffers(void);
1127

1128
extern int sysctl_perf_event_max_stack;
1129
extern int sysctl_perf_event_max_contexts_per_stack;
1130

1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip)
{
	if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) {
		struct perf_callchain_entry *entry = ctx->entry;
		entry->ip[entry->nr++] = ip;
		++ctx->contexts;
		return 0;
	} else {
		ctx->contexts_maxed = true;
		return -1; /* no more room, stop walking the stack */
	}
}
1143

1144
static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip)
1145
{
1146
	if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) {
1147
		struct perf_callchain_entry *entry = ctx->entry;
1148
		entry->ip[entry->nr++] = ip;
1149
		++ctx->nr;
1150 1151 1152 1153
		return 0;
	} else {
		return -1; /* no more room, stop walking the stack */
	}
1154
}
1155

1156 1157 1158
extern int sysctl_perf_event_paranoid;
extern int sysctl_perf_event_mlock;
extern int sysctl_perf_event_sample_rate;
1159 1160 1161
extern int sysctl_perf_cpu_time_max_percent;

extern void perf_sample_event_took(u64 sample_len_ns);
1162

P
Peter Zijlstra 已提交
1163 1164 1165
extern int perf_proc_update_handler(struct ctl_table *table, int write,
		void __user *buffer, size_t *lenp,
		loff_t *ppos);
1166 1167 1168 1169
extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
		void __user *buffer, size_t *lenp,
		loff_t *ppos);

1170 1171
int perf_event_max_stack_handler(struct ctl_table *table, int write,
				 void __user *buffer, size_t *lenp, loff_t *ppos);
P
Peter Zijlstra 已提交
1172

1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187
static inline bool perf_paranoid_tracepoint_raw(void)
{
	return sysctl_perf_event_paranoid > -1;
}

static inline bool perf_paranoid_cpu(void)
{
	return sysctl_perf_event_paranoid > 0;
}

static inline bool perf_paranoid_kernel(void)
{
	return sysctl_perf_event_paranoid > 1;
}

1188
extern void perf_event_init(void);
1189
extern void perf_tp_event(u16 event_type, u64 count, void *record,
1190
			  int entry_size, struct pt_regs *regs,
1191 1192
			  struct hlist_head *head, int rctx,
			  struct task_struct *task);
1193
extern void perf_bp_event(struct perf_event *event, void *data);
1194

1195
#ifndef perf_misc_flags
1196 1197 1198
# define perf_misc_flags(regs) \
		(user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
# define perf_instruction_pointer(regs)	instruction_pointer(regs)
1199 1200
#endif

1201 1202 1203
static inline bool has_branch_stack(struct perf_event *event)
{
	return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
1204 1205 1206 1207 1208
}

static inline bool needs_branch_stack(struct perf_event *event)
{
	return event->attr.branch_sample_type != 0;
1209 1210
}

1211 1212 1213 1214 1215
static inline bool has_aux(struct perf_event *event)
{
	return event->pmu->setup_aux;
}

1216 1217 1218 1219 1220
static inline bool is_write_backward(struct perf_event *event)
{
	return !!event->attr.write_backward;
}

1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241
static inline bool has_addr_filter(struct perf_event *event)
{
	return event->pmu->nr_addr_filters;
}

/*
 * An inherited event uses parent's filters
 */
static inline struct perf_addr_filters_head *
perf_event_addr_filters(struct perf_event *event)
{
	struct perf_addr_filters_head *ifh = &event->addr_filters;

	if (event->parent)
		ifh = &event->parent->addr_filters;

	return ifh;
}

extern void perf_event_addr_filters_sync(struct perf_event *event);

1242
extern int perf_output_begin(struct perf_output_handle *handle,
1243
			     struct perf_event *event, unsigned int size);
1244 1245 1246 1247 1248 1249 1250
extern int perf_output_begin_forward(struct perf_output_handle *handle,
				    struct perf_event *event,
				    unsigned int size);
extern int perf_output_begin_backward(struct perf_output_handle *handle,
				      struct perf_event *event,
				      unsigned int size);

1251
extern void perf_output_end(struct perf_output_handle *handle);
1252
extern unsigned int perf_output_copy(struct perf_output_handle *handle,
1253
			     const void *buf, unsigned int len);
1254 1255
extern unsigned int perf_output_skip(struct perf_output_handle *handle,
				     unsigned int len);
1256 1257
extern int perf_swevent_get_recursion_context(void);
extern void perf_swevent_put_recursion_context(int rctx);
1258
extern u64 perf_swevent_set_period(struct perf_event *event);
1259 1260
extern void perf_event_enable(struct perf_event *event);
extern void perf_event_disable(struct perf_event *event);
1261
extern void perf_event_disable_local(struct perf_event *event);
1262
extern void perf_event_disable_inatomic(struct perf_event *event);
1263
extern void perf_event_task_tick(void);
1264
extern int perf_event_account_interrupt(struct perf_event *event);
1265
#else /* !CONFIG_PERF_EVENTS: */
1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276
static inline void *
perf_aux_output_begin(struct perf_output_handle *handle,
		      struct perf_event *event)				{ return NULL; }
static inline void
perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
		    bool truncated)					{ }
static inline int
perf_aux_output_skip(struct perf_output_handle *handle,
		     unsigned long size)				{ return -EINVAL; }
static inline void *
perf_get_aux(struct perf_output_handle *handle)				{ return NULL; }
T
Thomas Gleixner 已提交
1277
static inline void
1278 1279
perf_event_task_migrate(struct task_struct *task)			{ }
static inline void
1280 1281 1282 1283 1284
perf_event_task_sched_in(struct task_struct *prev,
			 struct task_struct *task)			{ }
static inline void
perf_event_task_sched_out(struct task_struct *prev,
			  struct task_struct *next)			{ }
1285 1286 1287
static inline int perf_event_init_task(struct task_struct *child)	{ return 0; }
static inline void perf_event_exit_task(struct task_struct *child)	{ }
static inline void perf_event_free_task(struct task_struct *task)	{ }
1288
static inline void perf_event_delayed_put(struct task_struct *task)	{ }
1289
static inline struct file *perf_event_get(unsigned int fd)	{ return ERR_PTR(-EINVAL); }
1290 1291 1292 1293 1294
static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
{
	return ERR_PTR(-EINVAL);
}
static inline u64 perf_event_read_local(struct perf_event *event)	{ return -EINVAL; }
I
Ingo Molnar 已提交
1295 1296 1297
static inline void perf_event_print_debug(void)				{ }
static inline int perf_event_task_disable(void)				{ return -EINVAL; }
static inline int perf_event_task_enable(void)				{ return -EINVAL; }
1298 1299 1300 1301
static inline int perf_event_refresh(struct perf_event *event, int refresh)
{
	return -EINVAL;
}
1302

1303
static inline void
1304
perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)	{ }
1305
static inline void
1306 1307
perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)			{ }
static inline void
1308
perf_bp_event(struct perf_event *event, void *data)			{ }
1309

1310
static inline int perf_register_guest_info_callbacks
1311
(struct perf_guest_info_callbacks *callbacks)				{ return 0; }
1312
static inline int perf_unregister_guest_info_callbacks
1313
(struct perf_guest_info_callbacks *callbacks)				{ return 0; }
1314

I
Ingo Molnar 已提交
1315
static inline void perf_event_mmap(struct vm_area_struct *vma)		{ }
1316
static inline void perf_event_exec(void)				{ }
1317
static inline void perf_event_comm(struct task_struct *tsk, bool exec)	{ }
1318 1319
static inline void perf_event_fork(struct task_struct *tsk)		{ }
static inline void perf_event_init(void)				{ }
1320
static inline int  perf_swevent_get_recursion_context(void)		{ return -1; }
1321
static inline void perf_swevent_put_recursion_context(int rctx)		{ }
1322
static inline u64 perf_swevent_set_period(struct perf_event *event)	{ return 0; }
1323 1324
static inline void perf_event_enable(struct perf_event *event)		{ }
static inline void perf_event_disable(struct perf_event *event)		{ }
1325
static inline int __perf_event_disable(void *info)			{ return -1; }
1326
static inline void perf_event_task_tick(void)				{ }
1327
static inline int perf_event_release_kernel(struct perf_event *event)	{ return 0; }
T
Thomas Gleixner 已提交
1328 1329
#endif

1330 1331 1332
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
extern void perf_restore_debug_store(void);
#else
1333
static inline void perf_restore_debug_store(void)			{ }
T
Thomas Gleixner 已提交
1334 1335
#endif

1336 1337 1338 1339 1340
static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
{
	return frag->pad < sizeof(u64);
}

1341
#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
1342

1343 1344 1345
struct perf_pmu_events_attr {
	struct device_attribute attr;
	u64 id;
1346
	const char *event_str;
1347 1348
};

1349 1350 1351 1352 1353 1354 1355
struct perf_pmu_events_ht_attr {
	struct device_attribute			attr;
	u64					id;
	const char				*event_str_ht;
	const char				*event_str_noht;
};

1356 1357 1358
ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
			      char *page);

1359 1360 1361 1362 1363 1364
#define PMU_EVENT_ATTR(_name, _var, _id, _show)				\
static struct perf_pmu_events_attr _var = {				\
	.attr = __ATTR(_name, 0444, _show, NULL),			\
	.id   =  _id,							\
};

1365 1366 1367 1368 1369 1370 1371
#define PMU_EVENT_ATTR_STRING(_name, _var, _str)			    \
static struct perf_pmu_events_attr _var = {				    \
	.attr		= __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
	.id		= 0,						    \
	.event_str	= _str,						    \
};

1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383
#define PMU_FORMAT_ATTR(_name, _format)					\
static ssize_t								\
_name##_show(struct device *dev,					\
			       struct device_attribute *attr,		\
			       char *page)				\
{									\
	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);			\
	return sprintf(page, _format "\n");				\
}									\
									\
static struct device_attribute format_attr_##_name = __ATTR_RO(_name)

1384 1385 1386 1387 1388 1389 1390 1391 1392
/* Performance counter hotplug functions */
#ifdef CONFIG_PERF_EVENTS
int perf_event_init_cpu(unsigned int cpu);
int perf_event_exit_cpu(unsigned int cpu);
#else
#define perf_event_init_cpu	NULL
#define perf_event_exit_cpu	NULL
#endif

1393
#endif /* _LINUX_PERF_EVENT_H */