perf_event.h 29.1 KB
Newer Older
T
Thomas Gleixner 已提交
1
/*
I
Ingo Molnar 已提交
2
 * Performance events:
T
Thomas Gleixner 已提交
3
 *
I
Ingo Molnar 已提交
4
 *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5 6
 *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
 *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
T
Thomas Gleixner 已提交
7
 *
I
Ingo Molnar 已提交
8
 * Data type definitions, declarations, prototypes.
T
Thomas Gleixner 已提交
9
 *
I
Ingo Molnar 已提交
10
 *    Started by: Thomas Gleixner and Ingo Molnar
T
Thomas Gleixner 已提交
11
 *
I
Ingo Molnar 已提交
12
 * For licencing details see kernel-base/COPYING
T
Thomas Gleixner 已提交
13
 */
14 15
#ifndef _LINUX_PERF_EVENT_H
#define _LINUX_PERF_EVENT_H
T
Thomas Gleixner 已提交
16

17
#include <uapi/linux/perf_event.h>
T
Thomas Gleixner 已提交
18

I
Ingo Molnar 已提交
19
/*
20
 * Kernel-internal data types and definitions:
I
Ingo Molnar 已提交
21 22
 */

23 24
#ifdef CONFIG_PERF_EVENTS
# include <asm/perf_event.h>
25
# include <asm/local64.h>
26 27
#endif

28
struct perf_guest_info_callbacks {
29 30 31
	int				(*is_in_guest)(void);
	int				(*is_user_mode)(void);
	unsigned long			(*get_guest_ip)(void);
32 33
};

34 35 36 37
#ifdef CONFIG_HAVE_HW_BREAKPOINT
#include <asm/hw_breakpoint.h>
#endif

38 39 40 41 42
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/spinlock.h>
43
#include <linux/hrtimer.h>
P
Peter Zijlstra 已提交
44
#include <linux/fs.h>
45
#include <linux/pid_namespace.h>
46
#include <linux/workqueue.h>
47
#include <linux/ftrace.h>
48
#include <linux/cpu.h>
49
#include <linux/irq_work.h>
50
#include <linux/static_key.h>
51
#include <linux/jump_label_ratelimit.h>
A
Arun Sharma 已提交
52
#include <linux/atomic.h>
53
#include <linux/sysfs.h>
54
#include <linux/perf_regs.h>
55
#include <linux/workqueue.h>
56
#include <linux/cgroup.h>
57
#include <asm/local.h>
58

59 60 61 62 63
struct perf_callchain_entry {
	__u64				nr;
	__u64				ip[PERF_MAX_STACK_DEPTH];
};

64 65 66
struct perf_raw_record {
	u32				size;
	void				*data;
67 68
};

69 70 71 72 73 74 75 76 77
/*
 * branch stack layout:
 *  nr: number of taken branches stored in entries[]
 *
 * Note that nr can vary from sample to sample
 * branches (to, from) are stored from most recent
 * to least recent, i.e., entries[0] contains the most
 * recent branch.
 */
78 79 80 81 82
struct perf_branch_stack {
	__u64				nr;
	struct perf_branch_entry	entries[0];
};

83 84
struct task_struct;

85 86 87 88 89 90 91 92 93 94
/*
 * extra PMU register associated with an event
 */
struct hw_perf_event_extra {
	u64		config;	/* register value */
	unsigned int	reg;	/* register address or index */
	int		alloc;	/* extra register already allocated */
	int		idx;	/* index in shared_regs->regs[] */
};

95 96
struct event_constraint;

T
Thomas Gleixner 已提交
97
/**
98
 * struct hw_perf_event - performance event hardware details:
T
Thomas Gleixner 已提交
99
 */
100 101
struct hw_perf_event {
#ifdef CONFIG_PERF_EVENTS
102 103
	union {
		struct { /* hardware */
I
Ingo Molnar 已提交
104
			u64		config;
105
			u64		last_tag;
I
Ingo Molnar 已提交
106
			unsigned long	config_base;
107
			unsigned long	event_base;
108
			int		event_base_rdpmc;
I
Ingo Molnar 已提交
109
			int		idx;
110
			int		last_cpu;
111
			int		flags;
112

113
			struct hw_perf_event_extra extra_reg;
114
			struct hw_perf_event_extra branch_reg;
115 116

			struct event_constraint *constraint;
117
		};
118
		struct { /* software */
I
Ingo Molnar 已提交
119
			struct hrtimer	hrtimer;
120
		};
121 122 123 124
		struct { /* tracepoint */
			/* for tp_event->class */
			struct list_head	tp_list;
		};
125 126 127 128 129 130 131
		struct { /* intel_cqm */
			int			cqm_state;
			int			cqm_rmid;
			struct list_head	cqm_events_entry;
			struct list_head	cqm_groups_entry;
			struct list_head	cqm_group_entry;
		};
132 133 134
		struct { /* itrace */
			int			itrace_started;
		};
135
#ifdef CONFIG_HAVE_HW_BREAKPOINT
136
		struct { /* breakpoint */
137 138 139 140 141
			/*
			 * Crufty hack to avoid the chicken and egg
			 * problem hw_breakpoint has with context
			 * creation and event initalization.
			 */
142 143
			struct arch_hw_breakpoint	info;
			struct list_head		bp_list;
144
		};
145
#endif
146
	};
147
	struct task_struct		*target;
P
Peter Zijlstra 已提交
148
	int				state;
149
	local64_t			prev_count;
150
	u64				sample_period;
151
	u64				last_period;
152
	local64_t			period_left;
153
	u64                             interrupts_seq;
154
	u64				interrupts;
155

156 157
	u64				freq_time_stamp;
	u64				freq_count_stamp;
158
#endif
T
Thomas Gleixner 已提交
159 160
};

P
Peter Zijlstra 已提交
161 162 163 164 165 166 167
/*
 * hw_perf_event::state flags
 */
#define PERF_HES_STOPPED	0x01 /* the counter is stopped */
#define PERF_HES_UPTODATE	0x02 /* event->count up-to-date */
#define PERF_HES_ARCH		0x04

168
struct perf_event;
I
Ingo Molnar 已提交
169

170 171 172 173
/*
 * Common implementation detail of pmu::{start,commit,cancel}_txn
 */
#define PERF_EVENT_TXN 0x1
174

175 176 177 178
/**
 * pmu::capabilities flags
 */
#define PERF_PMU_CAP_NO_INTERRUPT		0x01
179
#define PERF_PMU_CAP_NO_NMI			0x02
180
#define PERF_PMU_CAP_AUX_NO_SG			0x04
181
#define PERF_PMU_CAP_AUX_SW_DOUBLEBUF		0x08
182
#define PERF_PMU_CAP_EXCLUSIVE			0x10
183
#define PERF_PMU_CAP_ITRACE			0x20
184

I
Ingo Molnar 已提交
185
/**
186
 * struct pmu - generic performance monitoring unit
I
Ingo Molnar 已提交
187
 */
188
struct pmu {
189 190
	struct list_head		entry;

191
	struct module			*module;
P
Peter Zijlstra 已提交
192
	struct device			*dev;
193
	const struct attribute_group	**attr_groups;
194
	const char			*name;
P
Peter Zijlstra 已提交
195 196
	int				type;

197 198 199 200 201
	/*
	 * various common per-pmu feature flags
	 */
	int				capabilities;

P
Peter Zijlstra 已提交
202 203
	int * __percpu			pmu_disable_count;
	struct perf_cpu_context * __percpu pmu_cpu_context;
204
	atomic_t			exclusive_cnt; /* < 0: cpu; > 0: tsk */
P
Peter Zijlstra 已提交
205
	int				task_ctx_nr;
206
	int				hrtimer_interval_ms;
207 208

	/*
P
Peter Zijlstra 已提交
209 210
	 * Fully disable/enable this PMU, can be used to protect from the PMI
	 * as well as for lazy/batch writing of the MSRs.
211
	 */
P
Peter Zijlstra 已提交
212 213
	void (*pmu_enable)		(struct pmu *pmu); /* optional */
	void (*pmu_disable)		(struct pmu *pmu); /* optional */
214

215
	/*
P
Peter Zijlstra 已提交
216
	 * Try and initialize the event for this PMU.
217
	 * Should return -ENOENT when the @event doesn't match this PMU.
218
	 */
219 220
	int (*event_init)		(struct perf_event *event);

221 222 223 224 225 226 227
	/*
	 * Notification that the event was mapped or unmapped.  Called
	 * in the context of the mapping task.
	 */
	void (*event_mapped)		(struct perf_event *event); /*optional*/
	void (*event_unmapped)		(struct perf_event *event); /*optional*/

P
Peter Zijlstra 已提交
228 229 230 231
#define PERF_EF_START	0x01		/* start the counter when adding    */
#define PERF_EF_RELOAD	0x02		/* reload the counter when starting */
#define PERF_EF_UPDATE	0x04		/* update the counter when stopping */

232
	/*
P
Peter Zijlstra 已提交
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
	 * Adds/Removes a counter to/from the PMU, can be done inside
	 * a transaction, see the ->*_txn() methods.
	 */
	int  (*add)			(struct perf_event *event, int flags);
	void (*del)			(struct perf_event *event, int flags);

	/*
	 * Starts/Stops a counter present on the PMU. The PMI handler
	 * should stop the counter when perf_event_overflow() returns
	 * !0. ->start() will be used to continue.
	 */
	void (*start)			(struct perf_event *event, int flags);
	void (*stop)			(struct perf_event *event, int flags);

	/*
	 * Updates the counter value of the event.
	 */
250
	void (*read)			(struct perf_event *event);
251 252

	/*
253 254 255
	 * Group events scheduling is treated as a transaction, add
	 * group events as a whole and perform one schedulability test.
	 * If the test fails, roll back the whole group
P
Peter Zijlstra 已提交
256 257
	 *
	 * Start the transaction, after this ->add() doesn't need to
258
	 * do schedulability tests.
259
	 */
260
	void (*start_txn)		(struct pmu *pmu); /* optional */
261
	/*
P
Peter Zijlstra 已提交
262
	 * If ->start_txn() disabled the ->add() schedulability test
263 264 265 266
	 * then ->commit_txn() is required to perform one. On success
	 * the transaction is closed. On error the transaction is kept
	 * open until ->cancel_txn() is called.
	 */
267
	int  (*commit_txn)		(struct pmu *pmu); /* optional */
268
	/*
P
Peter Zijlstra 已提交
269
	 * Will cancel the transaction, assumes ->del() is called
L
Lucas De Marchi 已提交
270
	 * for each successful ->add() during the transaction.
271
	 */
272
	void (*cancel_txn)		(struct pmu *pmu); /* optional */
273 274 275 276 277 278

	/*
	 * Will return the value for perf_event_mmap_page::index for this event,
	 * if no implementation is provided it will default to: event->hw.idx + 1.
	 */
	int (*event_idx)		(struct perf_event *event); /*optional */
279

280 281 282 283 284
	/*
	 * context-switches callback
	 */
	void (*sched_task)		(struct perf_event_context *ctx,
					bool sched_in);
285 286 287 288
	/*
	 * PMU specific data size
	 */
	size_t				task_ctx_size;
289

290 291 292 293 294

	/*
	 * Return the count value for a counter.
	 */
	u64 (*count)			(struct perf_event *event); /*optional*/
295 296 297 298 299 300 301 302 303 304 305 306

	/*
	 * Set up pmu-private data structures for an AUX area
	 */
	void *(*setup_aux)		(int cpu, void **pages,
					 int nr_pages, bool overwrite);
					/* optional */

	/*
	 * Free pmu-private AUX data structures
	 */
	void (*free_aux)		(void *aux); /* optional */
I
Ingo Molnar 已提交
307 308
};

309
/**
310
 * enum perf_event_active_state - the states of a event
311
 */
312
enum perf_event_active_state {
313
	PERF_EVENT_STATE_EXIT		= -3,
I
Ingo Molnar 已提交
314
	PERF_EVENT_STATE_ERROR		= -2,
315 316
	PERF_EVENT_STATE_OFF		= -1,
	PERF_EVENT_STATE_INACTIVE	=  0,
I
Ingo Molnar 已提交
317
	PERF_EVENT_STATE_ACTIVE		=  1,
318 319
};

320
struct file;
321 322
struct perf_sample_data;

323
typedef void (*perf_overflow_handler_t)(struct perf_event *,
324 325 326
					struct perf_sample_data *,
					struct pt_regs *regs);

327
enum perf_group_flag {
328
	PERF_GROUP_SOFTWARE		= 0x1,
329 330
};

331 332
#define SWEVENT_HLIST_BITS		8
#define SWEVENT_HLIST_SIZE		(1 << SWEVENT_HLIST_BITS)
333 334

struct swevent_hlist {
335 336
	struct hlist_head		heads[SWEVENT_HLIST_SIZE];
	struct rcu_head			rcu_head;
337 338
};

339 340
#define PERF_ATTACH_CONTEXT	0x01
#define PERF_ATTACH_GROUP	0x02
341
#define PERF_ATTACH_TASK	0x04
342
#define PERF_ATTACH_TASK_DATA	0x08
343

344
struct perf_cgroup;
345 346
struct ring_buffer;

T
Thomas Gleixner 已提交
347
/**
348
 * struct perf_event - performance event kernel representation:
T
Thomas Gleixner 已提交
349
 */
350 351
struct perf_event {
#ifdef CONFIG_PERF_EVENTS
352 353 354 355 356
	/*
	 * entry onto perf_event_context::event_list;
	 *   modifications require ctx->lock
	 *   RCU safe iterations.
	 */
P
Peter Zijlstra 已提交
357
	struct list_head		event_entry;
358 359 360 361 362 363 364 365 366 367

	/*
	 * XXX: group_entry and sibling_list should be mutually exclusive;
	 * either you're a sibling on a group, or you're the group leader.
	 * Rework the code to always use the same list element.
	 *
	 * Locked for modification by both ctx->mutex and ctx->lock; holding
	 * either sufficies for read.
	 */
	struct list_head		group_entry;
368
	struct list_head		sibling_list;
369 370 371 372 373 374 375 376

	/*
	 * We need storage to track the entries in perf_pmu_migrate_context; we
	 * cannot use the event_entry because of RCU and we want to keep the
	 * group in tact which avoids us using the other two entries.
	 */
	struct list_head		migrate_entry;

377 378
	struct hlist_node		hlist_entry;
	struct list_head		active_entry;
I
Ingo Molnar 已提交
379
	int				nr_siblings;
380
	int				group_flags;
381
	struct perf_event		*group_leader;
P
Peter Zijlstra 已提交
382
	struct pmu			*pmu;
383

384
	enum perf_event_active_state	state;
385
	unsigned int			attach_state;
386
	local64_t			count;
387
	atomic64_t			child_count;
388

389
	/*
390
	 * These are the total time in nanoseconds that the event
391
	 * has been enabled (i.e. eligible to run, and the task has
392
	 * been scheduled in, if this is a per-task event)
393 394 395
	 * and running (scheduled onto the CPU), respectively.
	 *
	 * They are computed from tstamp_enabled, tstamp_running and
396
	 * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
397 398 399 400 401 402
	 */
	u64				total_time_enabled;
	u64				total_time_running;

	/*
	 * These are timestamps used for computing total_time_enabled
403
	 * and total_time_running when the event is in INACTIVE or
404 405
	 * ACTIVE state, measured in nanoseconds from an arbitrary point
	 * in time.
406 407
	 * tstamp_enabled: the notional time when the event was enabled
	 * tstamp_running: the notional time when the event was scheduled on
408
	 * tstamp_stopped: in INACTIVE state, the notional time when the
409
	 *	event was scheduled off.
410 411 412 413 414
	 */
	u64				tstamp_enabled;
	u64				tstamp_running;
	u64				tstamp_stopped;

415 416 417 418 419 420 421 422 423 424
	/*
	 * timestamp shadows the actual context timing but it can
	 * be safely used in NMI interrupt context. It reflects the
	 * context time as it was when the event was last scheduled in.
	 *
	 * ctx_time already accounts for ctx->timestamp. Therefore to
	 * compute ctx_time for a sample, simply add perf_clock().
	 */
	u64				shadow_ctx_time;

425
	struct perf_event_attr		attr;
426
	u16				header_size;
427
	u16				id_header_size;
428
	u16				read_size;
429
	struct hw_perf_event		hw;
T
Thomas Gleixner 已提交
430

431
	struct perf_event_context	*ctx;
432
	atomic_long_t			refcount;
T
Thomas Gleixner 已提交
433

434 435
	/*
	 * These accumulate total time (in nanoseconds) that children
436
	 * events have been enabled and running, respectively.
437 438 439 440
	 */
	atomic64_t			child_total_time_enabled;
	atomic64_t			child_total_time_running;

T
Thomas Gleixner 已提交
441
	/*
442
	 * Protect attach/detach and child_list:
T
Thomas Gleixner 已提交
443
	 */
444 445
	struct mutex			child_mutex;
	struct list_head		child_list;
446
	struct perf_event		*parent;
T
Thomas Gleixner 已提交
447 448 449 450

	int				oncpu;
	int				cpu;

451 452 453
	struct list_head		owner_entry;
	struct task_struct		*owner;

454 455 456
	/* mmap bits */
	struct mutex			mmap_mutex;
	atomic_t			mmap_count;
P
Peter Zijlstra 已提交
457

458
	struct ring_buffer		*rb;
459
	struct list_head		rb_entry;
460 461
	unsigned long			rcu_batches;
	int				rcu_pending;
462

463
	/* poll related */
T
Thomas Gleixner 已提交
464
	wait_queue_head_t		waitq;
P
Peter Zijlstra 已提交
465
	struct fasync_struct		*fasync;
466 467 468

	/* delayed work for NMIs and such */
	int				pending_wakeup;
469
	int				pending_kill;
470
	int				pending_disable;
471
	struct irq_work			pending;
P
Peter Zijlstra 已提交
472

473 474
	atomic_t			event_limit;

475
	void (*destroy)(struct perf_event *);
P
Peter Zijlstra 已提交
476
	struct rcu_head			rcu_head;
477 478

	struct pid_namespace		*ns;
479
	u64				id;
L
Li Zefan 已提交
480

481
	u64				(*clock)(void);
482
	perf_overflow_handler_t		overflow_handler;
483
	void				*overflow_handler_context;
484

485
#ifdef CONFIG_EVENT_TRACING
486
	struct ftrace_event_call	*tp_event;
L
Li Zefan 已提交
487
	struct event_filter		*filter;
488 489 490
#ifdef CONFIG_FUNCTION_TRACER
	struct ftrace_ops               ftrace_ops;
#endif
491
#endif
L
Li Zefan 已提交
492

S
Stephane Eranian 已提交
493 494 495 496 497
#ifdef CONFIG_CGROUP_PERF
	struct perf_cgroup		*cgrp; /* cgroup event is attach to */
	int				cgrp_defer_enabled;
#endif

L
Li Zefan 已提交
498
#endif /* CONFIG_PERF_EVENTS */
T
Thomas Gleixner 已提交
499 500 501
};

/**
502
 * struct perf_event_context - event context structure
T
Thomas Gleixner 已提交
503
 *
504
 * Used as a container for task events and CPU events as well:
T
Thomas Gleixner 已提交
505
 */
506
struct perf_event_context {
P
Peter Zijlstra 已提交
507
	struct pmu			*pmu;
T
Thomas Gleixner 已提交
508
	/*
509
	 * Protect the states of the events in the list,
510
	 * nr_active, and the list:
T
Thomas Gleixner 已提交
511
	 */
512
	raw_spinlock_t			lock;
513
	/*
514
	 * Protect the list of events.  Locking either mutex or lock
515 516 517
	 * is sufficient to ensure the list doesn't change; to change
	 * the list you need to lock both the mutex and the spinlock.
	 */
I
Ingo Molnar 已提交
518
	struct mutex			mutex;
519

520
	struct list_head		active_ctx_list;
521 522
	struct list_head		pinned_groups;
	struct list_head		flexible_groups;
I
Ingo Molnar 已提交
523
	struct list_head		event_list;
524
	int				nr_events;
I
Ingo Molnar 已提交
525 526
	int				nr_active;
	int				is_active;
527
	int				nr_stat;
528
	int				nr_freq;
529
	int				rotate_disable;
I
Ingo Molnar 已提交
530 531
	atomic_t			refcount;
	struct task_struct		*task;
532 533

	/*
534
	 * Context clock, runs when context enabled.
535
	 */
I
Ingo Molnar 已提交
536 537
	u64				time;
	u64				timestamp;
538 539 540 541 542

	/*
	 * These fields let us detect when two contexts have both
	 * been cloned (inherited) from a common ancestor.
	 */
543
	struct perf_event_context	*parent_ctx;
I
Ingo Molnar 已提交
544 545 546
	u64				parent_gen;
	u64				generation;
	int				pin_count;
547
	int				nr_cgroups;	 /* cgroup evts */
548
	void				*task_ctx_data; /* pmu specific data */
549
	struct rcu_head			rcu_head;
550 551 552

	struct delayed_work		orphans_remove;
	bool				orphans_remove_sched;
T
Thomas Gleixner 已提交
553 554
};

555 556
/*
 * Number of contexts where an event can trigger:
557
 *	task, softirq, hardirq, nmi.
558 559 560
 */
#define PERF_NR_CONTEXTS	4

T
Thomas Gleixner 已提交
561
/**
562
 * struct perf_event_cpu_context - per cpu event context structure
T
Thomas Gleixner 已提交
563 564
 */
struct perf_cpu_context {
565 566
	struct perf_event_context	ctx;
	struct perf_event_context	*task_ctx;
T
Thomas Gleixner 已提交
567
	int				active_oncpu;
568
	int				exclusive;
569 570
	struct hrtimer			hrtimer;
	ktime_t				hrtimer_interval;
571
	struct pmu			*unique_pmu;
S
Stephane Eranian 已提交
572
	struct perf_cgroup		*cgrp;
T
Thomas Gleixner 已提交
573 574
};

575
struct perf_output_handle {
I
Ingo Molnar 已提交
576
	struct perf_event		*event;
577
	struct ring_buffer		*rb;
578
	unsigned long			wakeup;
579
	unsigned long			size;
580 581 582 583
	union {
		void			*addr;
		unsigned long		head;
	};
584
	int				page;
585 586
};

587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
#ifdef CONFIG_CGROUP_PERF

/*
 * perf_cgroup_info keeps track of time_enabled for a cgroup.
 * This is a per-cpu dynamically allocated data structure.
 */
struct perf_cgroup_info {
	u64				time;
	u64				timestamp;
};

struct perf_cgroup {
	struct cgroup_subsys_state	css;
	struct perf_cgroup_info	__percpu *info;
};

/*
 * Must ensure cgroup is pinned (css_get) before calling
 * this function. In other words, we cannot call this function
 * if there is no cgroup event for the current CPU context.
 */
static inline struct perf_cgroup *
perf_cgroup_from_task(struct task_struct *task)
{
	return container_of(task_css(task, perf_event_cgrp_id),
			    struct perf_cgroup, css);
}
#endif /* CONFIG_CGROUP_PERF */

616
#ifdef CONFIG_PERF_EVENTS
617

618 619 620 621 622 623 624 625
extern void *perf_aux_output_begin(struct perf_output_handle *handle,
				   struct perf_event *event);
extern void perf_aux_output_end(struct perf_output_handle *handle,
				unsigned long size, bool truncated);
extern int perf_aux_output_skip(struct perf_output_handle *handle,
				unsigned long size);
extern void *perf_get_aux(struct perf_output_handle *handle);

626
extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
627
extern void perf_pmu_unregister(struct pmu *pmu);
I
Ingo Molnar 已提交
628

629
extern int perf_num_counters(void);
630
extern const char *perf_pmu_name(void);
631 632 633 634
extern void __perf_event_task_sched_in(struct task_struct *prev,
				       struct task_struct *task);
extern void __perf_event_task_sched_out(struct task_struct *prev,
					struct task_struct *next);
635 636 637
extern int perf_event_init_task(struct task_struct *child);
extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task);
638
extern void perf_event_delayed_put(struct task_struct *task);
639
extern void perf_event_print_debug(void);
P
Peter Zijlstra 已提交
640 641
extern void perf_pmu_disable(struct pmu *pmu);
extern void perf_pmu_enable(struct pmu *pmu);
642 643
extern void perf_sched_cb_dec(struct pmu *pmu);
extern void perf_sched_cb_inc(struct pmu *pmu);
644 645
extern int perf_event_task_disable(void);
extern int perf_event_task_enable(void);
646
extern int perf_event_refresh(struct perf_event *event, int refresh);
647
extern void perf_event_update_userpage(struct perf_event *event);
648 649 650 651
extern int perf_event_release_kernel(struct perf_event *event);
extern struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr,
				int cpu,
M
Matt Helsley 已提交
652
				struct task_struct *task,
653 654
				perf_overflow_handler_t callback,
				void *context);
655 656
extern void perf_pmu_migrate_context(struct pmu *pmu,
				int src_cpu, int dst_cpu);
657 658
extern u64 perf_event_read_value(struct perf_event *event,
				 u64 *enabled, u64 *running);
659

660

661
struct perf_sample_data {
662 663 664 665 666 667 668 669 670 671 672
	/*
	 * Fields set by perf_sample_data_init(), group so as to
	 * minimize the cachelines touched.
	 */
	u64				addr;
	struct perf_raw_record		*raw;
	struct perf_branch_stack	*br_stack;
	u64				period;
	u64				weight;
	u64				txn;
	union  perf_mem_data_src	data_src;
673

674 675 676 677 678
	/*
	 * The other fields, optionally {set,used} by
	 * perf_{prepare,output}_sample().
	 */
	u64				type;
679 680 681 682 683 684 685 686 687 688 689 690 691
	u64				ip;
	struct {
		u32	pid;
		u32	tid;
	}				tid_entry;
	u64				time;
	u64				id;
	u64				stream_id;
	struct {
		u32	cpu;
		u32	reserved;
	}				cpu_entry;
	struct perf_callchain_entry	*callchain;
692 693 694 695 696

	/*
	 * regs_user may point to task_pt_regs or to regs_user_copy, depending
	 * on arch details.
	 */
697
	struct perf_regs		regs_user;
698 699
	struct pt_regs			regs_user_copy;

700
	struct perf_regs		regs_intr;
701
	u64				stack_user_size;
702
} ____cacheline_aligned;
703

704 705 706 707 708 709 710
/* default value for data source */
#define PERF_MEM_NA (PERF_MEM_S(OP, NA)   |\
		    PERF_MEM_S(LVL, NA)   |\
		    PERF_MEM_S(SNOOP, NA) |\
		    PERF_MEM_S(LOCK, NA)  |\
		    PERF_MEM_S(TLB, NA))

711 712
static inline void perf_sample_data_init(struct perf_sample_data *data,
					 u64 addr, u64 period)
713
{
714
	/* remaining struct members initialized in perf_prepare_sample() */
715 716
	data->addr = addr;
	data->raw  = NULL;
717
	data->br_stack = NULL;
718
	data->period = period;
A
Andi Kleen 已提交
719
	data->weight = 0;
720
	data->data_src.val = PERF_MEM_NA;
A
Andi Kleen 已提交
721
	data->txn = 0;
722 723
}

724 725 726
extern void perf_output_sample(struct perf_output_handle *handle,
			       struct perf_event_header *header,
			       struct perf_sample_data *data,
727
			       struct perf_event *event);
728 729
extern void perf_prepare_sample(struct perf_event_header *header,
				struct perf_sample_data *data,
730
				struct perf_event *event,
731 732
				struct pt_regs *regs);

733
extern int perf_event_overflow(struct perf_event *event,
734 735
				 struct perf_sample_data *data,
				 struct pt_regs *regs);
736

737 738 739 740 741
static inline bool is_sampling_event(struct perf_event *event)
{
	return event->attr.sample_period != 0;
}

742
/*
743
 * Return 1 for a software event, 0 for a hardware event
744
 */
745
static inline int is_software_event(struct perf_event *event)
746
{
747
	return event->pmu->task_ctx_nr == perf_sw_context;
748 749
}

750
extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
751

752
extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
753
extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
754

755
#ifndef perf_arch_fetch_caller_regs
756
static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
757
#endif
758 759 760 761 762 763 764 765 766

/*
 * Take a snapshot of the regs. Skip ip and frame pointer to
 * the nth caller. We only need a few of the regs:
 * - ip for PERF_SAMPLE_IP
 * - cs for user_mode() tests
 * - bp for callchains
 * - eflags, for future purposes, just in case
 */
767
static inline void perf_fetch_caller_regs(struct pt_regs *regs)
768 769 770
{
	memset(regs, 0, sizeof(*regs));

771
	perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
772 773
}

P
Peter Zijlstra 已提交
774
static __always_inline void
775
perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
776
{
777 778 779 780 781
	if (static_key_false(&perf_swevent_enabled[event_id]))
		__perf_sw_event(event_id, nr, regs, addr);
}

DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
P
Peter Zijlstra 已提交
782

783 784 785 786 787 788 789 790
/*
 * 'Special' version for the scheduler, it hard assumes no recursion,
 * which is guaranteed by us not actually scheduling inside other swevents
 * because those disable preemption.
 */
static __always_inline void
perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
{
791
	if (static_key_false(&perf_swevent_enabled[event_id])) {
792 793 794 795
		struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);

		perf_fetch_caller_regs(regs);
		___perf_sw_event(event_id, nr, regs, addr);
796 797 798
	}
}

799
extern struct static_key_deferred perf_sched_events;
800

801
static inline void perf_event_task_sched_in(struct task_struct *prev,
802
					    struct task_struct *task)
803 804 805 806 807 808 809
{
	if (static_key_false(&perf_sched_events.key))
		__perf_event_task_sched_in(prev, task);
}

static inline void perf_event_task_sched_out(struct task_struct *prev,
					     struct task_struct *next)
810
{
811
	perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
812

813
	if (static_key_false(&perf_sched_events.key))
814
		__perf_event_task_sched_out(prev, next);
815 816
}

817 818 819 820 821
static inline u64 __perf_event_count(struct perf_event *event)
{
	return local64_read(&event->count) + atomic64_read(&event->child_count);
}

822
extern void perf_event_mmap(struct vm_area_struct *vma);
823
extern struct perf_guest_info_callbacks *perf_guest_cbs;
824 825
extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
826

827
extern void perf_event_exec(void);
828
extern void perf_event_comm(struct task_struct *tsk, bool exec);
829
extern void perf_event_fork(struct task_struct *tsk);
830

831 832 833
/* Callchains */
DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);

834 835
extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
836

837
static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
838 839 840 841
{
	if (entry->nr < PERF_MAX_STACK_DEPTH)
		entry->ip[entry->nr++] = ip;
}
842

843 844 845
extern int sysctl_perf_event_paranoid;
extern int sysctl_perf_event_mlock;
extern int sysctl_perf_event_sample_rate;
846 847 848
extern int sysctl_perf_cpu_time_max_percent;

extern void perf_sample_event_took(u64 sample_len_ns);
849

P
Peter Zijlstra 已提交
850 851 852
extern int perf_proc_update_handler(struct ctl_table *table, int write,
		void __user *buffer, size_t *lenp,
		loff_t *ppos);
853 854 855 856
extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
		void __user *buffer, size_t *lenp,
		loff_t *ppos);

P
Peter Zijlstra 已提交
857

858 859 860 861 862 863 864 865 866 867 868 869 870 871 872
static inline bool perf_paranoid_tracepoint_raw(void)
{
	return sysctl_perf_event_paranoid > -1;
}

static inline bool perf_paranoid_cpu(void)
{
	return sysctl_perf_event_paranoid > 0;
}

static inline bool perf_paranoid_kernel(void)
{
	return sysctl_perf_event_paranoid > 1;
}

873
extern void perf_event_init(void);
874 875
extern void perf_tp_event(u64 addr, u64 count, void *record,
			  int entry_size, struct pt_regs *regs,
876 877
			  struct hlist_head *head, int rctx,
			  struct task_struct *task);
878
extern void perf_bp_event(struct perf_event *event, void *data);
879

880
#ifndef perf_misc_flags
881 882 883
# define perf_misc_flags(regs) \
		(user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
# define perf_instruction_pointer(regs)	instruction_pointer(regs)
884 885
#endif

886 887 888
static inline bool has_branch_stack(struct perf_event *event)
{
	return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
889 890 891 892 893
}

static inline bool needs_branch_stack(struct perf_event *event)
{
	return event->attr.branch_sample_type != 0;
894 895
}

896 897 898 899 900
static inline bool has_aux(struct perf_event *event)
{
	return event->pmu->setup_aux;
}

901
extern int perf_output_begin(struct perf_output_handle *handle,
902
			     struct perf_event *event, unsigned int size);
903
extern void perf_output_end(struct perf_output_handle *handle);
904
extern unsigned int perf_output_copy(struct perf_output_handle *handle,
905
			     const void *buf, unsigned int len);
906 907
extern unsigned int perf_output_skip(struct perf_output_handle *handle,
				     unsigned int len);
908 909
extern int perf_swevent_get_recursion_context(void);
extern void perf_swevent_put_recursion_context(int rctx);
910
extern u64 perf_swevent_set_period(struct perf_event *event);
911 912
extern void perf_event_enable(struct perf_event *event);
extern void perf_event_disable(struct perf_event *event);
913
extern int __perf_event_disable(void *info);
914
extern void perf_event_task_tick(void);
915
#else /* !CONFIG_PERF_EVENTS: */
916 917 918 919 920 921 922 923 924 925 926
static inline void *
perf_aux_output_begin(struct perf_output_handle *handle,
		      struct perf_event *event)				{ return NULL; }
static inline void
perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
		    bool truncated)					{ }
static inline int
perf_aux_output_skip(struct perf_output_handle *handle,
		     unsigned long size)				{ return -EINVAL; }
static inline void *
perf_get_aux(struct perf_output_handle *handle)				{ return NULL; }
T
Thomas Gleixner 已提交
927
static inline void
928 929 930 931 932
perf_event_task_sched_in(struct task_struct *prev,
			 struct task_struct *task)			{ }
static inline void
perf_event_task_sched_out(struct task_struct *prev,
			  struct task_struct *next)			{ }
933 934 935
static inline int perf_event_init_task(struct task_struct *child)	{ return 0; }
static inline void perf_event_exit_task(struct task_struct *child)	{ }
static inline void perf_event_free_task(struct task_struct *task)	{ }
936
static inline void perf_event_delayed_put(struct task_struct *task)	{ }
I
Ingo Molnar 已提交
937 938 939
static inline void perf_event_print_debug(void)				{ }
static inline int perf_event_task_disable(void)				{ return -EINVAL; }
static inline int perf_event_task_enable(void)				{ return -EINVAL; }
940 941 942 943
static inline int perf_event_refresh(struct perf_event *event, int refresh)
{
	return -EINVAL;
}
944

945
static inline void
946
perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)	{ }
947
static inline void
948 949
perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)			{ }
static inline void
950
perf_bp_event(struct perf_event *event, void *data)			{ }
951

952
static inline int perf_register_guest_info_callbacks
953
(struct perf_guest_info_callbacks *callbacks)				{ return 0; }
954
static inline int perf_unregister_guest_info_callbacks
955
(struct perf_guest_info_callbacks *callbacks)				{ return 0; }
956

I
Ingo Molnar 已提交
957
static inline void perf_event_mmap(struct vm_area_struct *vma)		{ }
958
static inline void perf_event_exec(void)				{ }
959
static inline void perf_event_comm(struct task_struct *tsk, bool exec)	{ }
960 961
static inline void perf_event_fork(struct task_struct *tsk)		{ }
static inline void perf_event_init(void)				{ }
962
static inline int  perf_swevent_get_recursion_context(void)		{ return -1; }
963
static inline void perf_swevent_put_recursion_context(int rctx)		{ }
964
static inline u64 perf_swevent_set_period(struct perf_event *event)	{ return 0; }
965 966
static inline void perf_event_enable(struct perf_event *event)		{ }
static inline void perf_event_disable(struct perf_event *event)		{ }
967
static inline int __perf_event_disable(void *info)			{ return -1; }
968
static inline void perf_event_task_tick(void)				{ }
T
Thomas Gleixner 已提交
969 970
#endif

971 972 973 974 975 976
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL)
extern bool perf_event_can_stop_tick(void);
#else
static inline bool perf_event_can_stop_tick(void)			{ return true; }
#endif

977 978 979
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
extern void perf_restore_debug_store(void);
#else
980
static inline void perf_restore_debug_store(void)			{ }
T
Thomas Gleixner 已提交
981 982
#endif

983
#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
984

985
/*
986
 * This has to have a higher priority than migration_notifier in sched/core.c.
987
 */
988 989
#define perf_cpu_notifier(fn)						\
do {									\
990
	static struct notifier_block fn##_nb =				\
991
		{ .notifier_call = fn, .priority = CPU_PRI_PERF };	\
992
	unsigned long cpu = smp_processor_id();				\
993
	unsigned long flags;						\
994 995
									\
	cpu_notifier_register_begin();					\
996
	fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE,			\
997
		(void *)(unsigned long)cpu);				\
998
	local_irq_save(flags);						\
999
	fn(&fn##_nb, (unsigned long)CPU_STARTING,			\
1000
		(void *)(unsigned long)cpu);				\
1001
	local_irq_restore(flags);					\
1002
	fn(&fn##_nb, (unsigned long)CPU_ONLINE,				\
1003
		(void *)(unsigned long)cpu);				\
1004 1005
	__register_cpu_notifier(&fn##_nb);				\
	cpu_notifier_register_done();					\
1006 1007
} while (0)

1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
/*
 * Bare-bones version of perf_cpu_notifier(), which doesn't invoke the
 * callback for already online CPUs.
 */
#define __perf_cpu_notifier(fn)						\
do {									\
	static struct notifier_block fn##_nb =				\
		{ .notifier_call = fn, .priority = CPU_PRI_PERF };	\
									\
	__register_cpu_notifier(&fn##_nb);				\
} while (0)
1019

1020 1021 1022
struct perf_pmu_events_attr {
	struct device_attribute attr;
	u64 id;
1023
	const char *event_str;
1024 1025
};

1026 1027 1028
ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
			      char *page);

1029 1030 1031 1032 1033 1034
#define PMU_EVENT_ATTR(_name, _var, _id, _show)				\
static struct perf_pmu_events_attr _var = {				\
	.attr = __ATTR(_name, 0444, _show, NULL),			\
	.id   =  _id,							\
};

1035 1036 1037 1038 1039 1040 1041
#define PMU_EVENT_ATTR_STRING(_name, _var, _str)			    \
static struct perf_pmu_events_attr _var = {				    \
	.attr		= __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
	.id		= 0,						    \
	.event_str	= _str,						    \
};

1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
#define PMU_FORMAT_ATTR(_name, _format)					\
static ssize_t								\
_name##_show(struct device *dev,					\
			       struct device_attribute *attr,		\
			       char *page)				\
{									\
	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);			\
	return sprintf(page, _format "\n");				\
}									\
									\
static struct device_attribute format_attr_##_name = __ATTR_RO(_name)

1054
#endif /* _LINUX_PERF_EVENT_H */