perf_event.h 27.4 KB
Newer Older
T
Thomas Gleixner 已提交
1
/*
I
Ingo Molnar 已提交
2
 * Performance events:
T
Thomas Gleixner 已提交
3
 *
I
Ingo Molnar 已提交
4
 *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5 6
 *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
 *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
T
Thomas Gleixner 已提交
7
 *
I
Ingo Molnar 已提交
8
 * Data type definitions, declarations, prototypes.
T
Thomas Gleixner 已提交
9
 *
I
Ingo Molnar 已提交
10
 *    Started by: Thomas Gleixner and Ingo Molnar
T
Thomas Gleixner 已提交
11
 *
I
Ingo Molnar 已提交
12
 * For licencing details see kernel-base/COPYING
T
Thomas Gleixner 已提交
13
 */
14 15
#ifndef _LINUX_PERF_EVENT_H
#define _LINUX_PERF_EVENT_H
T
Thomas Gleixner 已提交
16

17
#include <uapi/linux/perf_event.h>
T
Thomas Gleixner 已提交
18

I
Ingo Molnar 已提交
19
/*
20
 * Kernel-internal data types and definitions:
I
Ingo Molnar 已提交
21 22
 */

23 24
#ifdef CONFIG_PERF_EVENTS
# include <asm/perf_event.h>
25
# include <asm/local64.h>
26 27
#endif

28
struct perf_guest_info_callbacks {
29 30 31
	int				(*is_in_guest)(void);
	int				(*is_user_mode)(void);
	unsigned long			(*get_guest_ip)(void);
32 33
};

34 35 36 37
#ifdef CONFIG_HAVE_HW_BREAKPOINT
#include <asm/hw_breakpoint.h>
#endif

38 39 40 41 42
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/spinlock.h>
43
#include <linux/hrtimer.h>
P
Peter Zijlstra 已提交
44
#include <linux/fs.h>
45
#include <linux/pid_namespace.h>
46
#include <linux/workqueue.h>
47
#include <linux/ftrace.h>
48
#include <linux/cpu.h>
49
#include <linux/irq_work.h>
50
#include <linux/static_key.h>
51
#include <linux/jump_label_ratelimit.h>
A
Arun Sharma 已提交
52
#include <linux/atomic.h>
53
#include <linux/sysfs.h>
54
#include <linux/perf_regs.h>
55
#include <linux/workqueue.h>
56
#include <linux/cgroup.h>
57
#include <asm/local.h>
58

59 60 61 62 63
struct perf_callchain_entry {
	__u64				nr;
	__u64				ip[PERF_MAX_STACK_DEPTH];
};

64 65 66
struct perf_raw_record {
	u32				size;
	void				*data;
67 68
};

69 70 71 72 73 74 75 76 77
/*
 * branch stack layout:
 *  nr: number of taken branches stored in entries[]
 *
 * Note that nr can vary from sample to sample
 * branches (to, from) are stored from most recent
 * to least recent, i.e., entries[0] contains the most
 * recent branch.
 */
78 79 80 81 82
struct perf_branch_stack {
	__u64				nr;
	struct perf_branch_entry	entries[0];
};

83 84
struct task_struct;

85 86 87 88 89 90 91 92 93 94
/*
 * extra PMU register associated with an event
 */
struct hw_perf_event_extra {
	u64		config;	/* register value */
	unsigned int	reg;	/* register address or index */
	int		alloc;	/* extra register already allocated */
	int		idx;	/* index in shared_regs->regs[] */
};

95 96
struct event_constraint;

T
Thomas Gleixner 已提交
97
/**
98
 * struct hw_perf_event - performance event hardware details:
T
Thomas Gleixner 已提交
99
 */
100 101
struct hw_perf_event {
#ifdef CONFIG_PERF_EVENTS
102 103
	union {
		struct { /* hardware */
I
Ingo Molnar 已提交
104
			u64		config;
105
			u64		last_tag;
I
Ingo Molnar 已提交
106
			unsigned long	config_base;
107
			unsigned long	event_base;
108
			int		event_base_rdpmc;
I
Ingo Molnar 已提交
109
			int		idx;
110
			int		last_cpu;
111
			int		flags;
112

113
			struct hw_perf_event_extra extra_reg;
114
			struct hw_perf_event_extra branch_reg;
115 116

			struct event_constraint *constraint;
117
		};
118
		struct { /* software */
I
Ingo Molnar 已提交
119
			struct hrtimer	hrtimer;
120
		};
121 122 123 124 125
		struct { /* tracepoint */
			struct task_struct	*tp_target;
			/* for tp_event->class */
			struct list_head	tp_list;
		};
126 127 128 129 130 131
		struct { /* intel_cqm */
			int			cqm_state;
			int			cqm_rmid;
			struct list_head	cqm_events_entry;
			struct list_head	cqm_groups_entry;
			struct list_head	cqm_group_entry;
132
			struct task_struct	*cqm_target;
133
		};
134
#ifdef CONFIG_HAVE_HW_BREAKPOINT
135
		struct { /* breakpoint */
136 137 138 139 140 141
			/*
			 * Crufty hack to avoid the chicken and egg
			 * problem hw_breakpoint has with context
			 * creation and event initalization.
			 */
			struct task_struct		*bp_target;
142 143
			struct arch_hw_breakpoint	info;
			struct list_head		bp_list;
144
		};
145
#endif
146
	};
P
Peter Zijlstra 已提交
147
	int				state;
148
	local64_t			prev_count;
149
	u64				sample_period;
150
	u64				last_period;
151
	local64_t			period_left;
152
	u64                             interrupts_seq;
153
	u64				interrupts;
154

155 156
	u64				freq_time_stamp;
	u64				freq_count_stamp;
157
#endif
T
Thomas Gleixner 已提交
158 159
};

P
Peter Zijlstra 已提交
160 161 162 163 164 165 166
/*
 * hw_perf_event::state flags
 */
#define PERF_HES_STOPPED	0x01 /* the counter is stopped */
#define PERF_HES_UPTODATE	0x02 /* event->count up-to-date */
#define PERF_HES_ARCH		0x04

167
struct perf_event;
I
Ingo Molnar 已提交
168

169 170 171 172
/*
 * Common implementation detail of pmu::{start,commit,cancel}_txn
 */
#define PERF_EVENT_TXN 0x1
173

174 175 176 177 178
/**
 * pmu::capabilities flags
 */
#define PERF_PMU_CAP_NO_INTERRUPT		0x01

I
Ingo Molnar 已提交
179
/**
180
 * struct pmu - generic performance monitoring unit
I
Ingo Molnar 已提交
181
 */
182
struct pmu {
183 184
	struct list_head		entry;

185
	struct module			*module;
P
Peter Zijlstra 已提交
186
	struct device			*dev;
187
	const struct attribute_group	**attr_groups;
188
	const char			*name;
P
Peter Zijlstra 已提交
189 190
	int				type;

191 192 193 194 195
	/*
	 * various common per-pmu feature flags
	 */
	int				capabilities;

P
Peter Zijlstra 已提交
196 197
	int * __percpu			pmu_disable_count;
	struct perf_cpu_context * __percpu pmu_cpu_context;
P
Peter Zijlstra 已提交
198
	int				task_ctx_nr;
199
	int				hrtimer_interval_ms;
200 201

	/*
P
Peter Zijlstra 已提交
202 203
	 * Fully disable/enable this PMU, can be used to protect from the PMI
	 * as well as for lazy/batch writing of the MSRs.
204
	 */
P
Peter Zijlstra 已提交
205 206
	void (*pmu_enable)		(struct pmu *pmu); /* optional */
	void (*pmu_disable)		(struct pmu *pmu); /* optional */
207

208
	/*
P
Peter Zijlstra 已提交
209
	 * Try and initialize the event for this PMU.
210
	 * Should return -ENOENT when the @event doesn't match this PMU.
211
	 */
212 213
	int (*event_init)		(struct perf_event *event);

214 215 216 217 218 219 220
	/*
	 * Notification that the event was mapped or unmapped.  Called
	 * in the context of the mapping task.
	 */
	void (*event_mapped)		(struct perf_event *event); /*optional*/
	void (*event_unmapped)		(struct perf_event *event); /*optional*/

P
Peter Zijlstra 已提交
221 222 223 224
#define PERF_EF_START	0x01		/* start the counter when adding    */
#define PERF_EF_RELOAD	0x02		/* reload the counter when starting */
#define PERF_EF_UPDATE	0x04		/* update the counter when stopping */

225
	/*
P
Peter Zijlstra 已提交
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
	 * Adds/Removes a counter to/from the PMU, can be done inside
	 * a transaction, see the ->*_txn() methods.
	 */
	int  (*add)			(struct perf_event *event, int flags);
	void (*del)			(struct perf_event *event, int flags);

	/*
	 * Starts/Stops a counter present on the PMU. The PMI handler
	 * should stop the counter when perf_event_overflow() returns
	 * !0. ->start() will be used to continue.
	 */
	void (*start)			(struct perf_event *event, int flags);
	void (*stop)			(struct perf_event *event, int flags);

	/*
	 * Updates the counter value of the event.
	 */
243
	void (*read)			(struct perf_event *event);
244 245

	/*
246 247 248
	 * Group events scheduling is treated as a transaction, add
	 * group events as a whole and perform one schedulability test.
	 * If the test fails, roll back the whole group
P
Peter Zijlstra 已提交
249 250
	 *
	 * Start the transaction, after this ->add() doesn't need to
251
	 * do schedulability tests.
252
	 */
253
	void (*start_txn)		(struct pmu *pmu); /* optional */
254
	/*
P
Peter Zijlstra 已提交
255
	 * If ->start_txn() disabled the ->add() schedulability test
256 257 258 259
	 * then ->commit_txn() is required to perform one. On success
	 * the transaction is closed. On error the transaction is kept
	 * open until ->cancel_txn() is called.
	 */
260
	int  (*commit_txn)		(struct pmu *pmu); /* optional */
261
	/*
P
Peter Zijlstra 已提交
262
	 * Will cancel the transaction, assumes ->del() is called
L
Lucas De Marchi 已提交
263
	 * for each successful ->add() during the transaction.
264
	 */
265
	void (*cancel_txn)		(struct pmu *pmu); /* optional */
266 267 268 269 270 271

	/*
	 * Will return the value for perf_event_mmap_page::index for this event,
	 * if no implementation is provided it will default to: event->hw.idx + 1.
	 */
	int (*event_idx)		(struct perf_event *event); /*optional */
272

273 274 275 276 277
	/*
	 * context-switches callback
	 */
	void (*sched_task)		(struct perf_event_context *ctx,
					bool sched_in);
278 279 280 281
	/*
	 * PMU specific data size
	 */
	size_t				task_ctx_size;
282

283 284 285 286 287

	/*
	 * Return the count value for a counter.
	 */
	u64 (*count)			(struct perf_event *event); /*optional*/
I
Ingo Molnar 已提交
288 289
};

290
/**
291
 * enum perf_event_active_state - the states of a event
292
 */
293
enum perf_event_active_state {
294
	PERF_EVENT_STATE_EXIT		= -3,
I
Ingo Molnar 已提交
295
	PERF_EVENT_STATE_ERROR		= -2,
296 297
	PERF_EVENT_STATE_OFF		= -1,
	PERF_EVENT_STATE_INACTIVE	=  0,
I
Ingo Molnar 已提交
298
	PERF_EVENT_STATE_ACTIVE		=  1,
299 300
};

301
struct file;
302 303
struct perf_sample_data;

304
typedef void (*perf_overflow_handler_t)(struct perf_event *,
305 306 307
					struct perf_sample_data *,
					struct pt_regs *regs);

308
enum perf_group_flag {
309
	PERF_GROUP_SOFTWARE		= 0x1,
310 311
};

312 313
#define SWEVENT_HLIST_BITS		8
#define SWEVENT_HLIST_SIZE		(1 << SWEVENT_HLIST_BITS)
314 315

struct swevent_hlist {
316 317
	struct hlist_head		heads[SWEVENT_HLIST_SIZE];
	struct rcu_head			rcu_head;
318 319
};

320 321
#define PERF_ATTACH_CONTEXT	0x01
#define PERF_ATTACH_GROUP	0x02
322
#define PERF_ATTACH_TASK	0x04
323
#define PERF_ATTACH_TASK_DATA	0x08
324

325
struct perf_cgroup;
326 327
struct ring_buffer;

T
Thomas Gleixner 已提交
328
/**
329
 * struct perf_event - performance event kernel representation:
T
Thomas Gleixner 已提交
330
 */
331 332
struct perf_event {
#ifdef CONFIG_PERF_EVENTS
333 334 335 336 337
	/*
	 * entry onto perf_event_context::event_list;
	 *   modifications require ctx->lock
	 *   RCU safe iterations.
	 */
P
Peter Zijlstra 已提交
338
	struct list_head		event_entry;
339 340 341 342 343 344 345 346 347 348

	/*
	 * XXX: group_entry and sibling_list should be mutually exclusive;
	 * either you're a sibling on a group, or you're the group leader.
	 * Rework the code to always use the same list element.
	 *
	 * Locked for modification by both ctx->mutex and ctx->lock; holding
	 * either sufficies for read.
	 */
	struct list_head		group_entry;
349
	struct list_head		sibling_list;
350 351 352 353 354 355 356 357

	/*
	 * We need storage to track the entries in perf_pmu_migrate_context; we
	 * cannot use the event_entry because of RCU and we want to keep the
	 * group in tact which avoids us using the other two entries.
	 */
	struct list_head		migrate_entry;

358 359
	struct hlist_node		hlist_entry;
	struct list_head		active_entry;
I
Ingo Molnar 已提交
360
	int				nr_siblings;
361
	int				group_flags;
362
	struct perf_event		*group_leader;
P
Peter Zijlstra 已提交
363
	struct pmu			*pmu;
364

365
	enum perf_event_active_state	state;
366
	unsigned int			attach_state;
367
	local64_t			count;
368
	atomic64_t			child_count;
369

370
	/*
371
	 * These are the total time in nanoseconds that the event
372
	 * has been enabled (i.e. eligible to run, and the task has
373
	 * been scheduled in, if this is a per-task event)
374 375 376
	 * and running (scheduled onto the CPU), respectively.
	 *
	 * They are computed from tstamp_enabled, tstamp_running and
377
	 * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
378 379 380 381 382 383
	 */
	u64				total_time_enabled;
	u64				total_time_running;

	/*
	 * These are timestamps used for computing total_time_enabled
384
	 * and total_time_running when the event is in INACTIVE or
385 386
	 * ACTIVE state, measured in nanoseconds from an arbitrary point
	 * in time.
387 388
	 * tstamp_enabled: the notional time when the event was enabled
	 * tstamp_running: the notional time when the event was scheduled on
389
	 * tstamp_stopped: in INACTIVE state, the notional time when the
390
	 *	event was scheduled off.
391 392 393 394 395
	 */
	u64				tstamp_enabled;
	u64				tstamp_running;
	u64				tstamp_stopped;

396 397 398 399 400 401 402 403 404 405
	/*
	 * timestamp shadows the actual context timing but it can
	 * be safely used in NMI interrupt context. It reflects the
	 * context time as it was when the event was last scheduled in.
	 *
	 * ctx_time already accounts for ctx->timestamp. Therefore to
	 * compute ctx_time for a sample, simply add perf_clock().
	 */
	u64				shadow_ctx_time;

406
	struct perf_event_attr		attr;
407
	u16				header_size;
408
	u16				id_header_size;
409
	u16				read_size;
410
	struct hw_perf_event		hw;
T
Thomas Gleixner 已提交
411

412
	struct perf_event_context	*ctx;
413
	atomic_long_t			refcount;
T
Thomas Gleixner 已提交
414

415 416
	/*
	 * These accumulate total time (in nanoseconds) that children
417
	 * events have been enabled and running, respectively.
418 419 420 421
	 */
	atomic64_t			child_total_time_enabled;
	atomic64_t			child_total_time_running;

T
Thomas Gleixner 已提交
422
	/*
423
	 * Protect attach/detach and child_list:
T
Thomas Gleixner 已提交
424
	 */
425 426
	struct mutex			child_mutex;
	struct list_head		child_list;
427
	struct perf_event		*parent;
T
Thomas Gleixner 已提交
428 429 430 431

	int				oncpu;
	int				cpu;

432 433 434
	struct list_head		owner_entry;
	struct task_struct		*owner;

435 436 437
	/* mmap bits */
	struct mutex			mmap_mutex;
	atomic_t			mmap_count;
P
Peter Zijlstra 已提交
438

439
	struct ring_buffer		*rb;
440
	struct list_head		rb_entry;
441 442
	unsigned long			rcu_batches;
	int				rcu_pending;
443

444
	/* poll related */
T
Thomas Gleixner 已提交
445
	wait_queue_head_t		waitq;
P
Peter Zijlstra 已提交
446
	struct fasync_struct		*fasync;
447 448 449

	/* delayed work for NMIs and such */
	int				pending_wakeup;
450
	int				pending_kill;
451
	int				pending_disable;
452
	struct irq_work			pending;
P
Peter Zijlstra 已提交
453

454 455
	atomic_t			event_limit;

456
	void (*destroy)(struct perf_event *);
P
Peter Zijlstra 已提交
457
	struct rcu_head			rcu_head;
458 459

	struct pid_namespace		*ns;
460
	u64				id;
L
Li Zefan 已提交
461

462
	perf_overflow_handler_t		overflow_handler;
463
	void				*overflow_handler_context;
464

465
#ifdef CONFIG_EVENT_TRACING
466
	struct ftrace_event_call	*tp_event;
L
Li Zefan 已提交
467
	struct event_filter		*filter;
468 469 470
#ifdef CONFIG_FUNCTION_TRACER
	struct ftrace_ops               ftrace_ops;
#endif
471
#endif
L
Li Zefan 已提交
472

S
Stephane Eranian 已提交
473 474 475 476 477
#ifdef CONFIG_CGROUP_PERF
	struct perf_cgroup		*cgrp; /* cgroup event is attach to */
	int				cgrp_defer_enabled;
#endif

L
Li Zefan 已提交
478
#endif /* CONFIG_PERF_EVENTS */
T
Thomas Gleixner 已提交
479 480 481
};

/**
482
 * struct perf_event_context - event context structure
T
Thomas Gleixner 已提交
483
 *
484
 * Used as a container for task events and CPU events as well:
T
Thomas Gleixner 已提交
485
 */
486
struct perf_event_context {
P
Peter Zijlstra 已提交
487
	struct pmu			*pmu;
T
Thomas Gleixner 已提交
488
	/*
489
	 * Protect the states of the events in the list,
490
	 * nr_active, and the list:
T
Thomas Gleixner 已提交
491
	 */
492
	raw_spinlock_t			lock;
493
	/*
494
	 * Protect the list of events.  Locking either mutex or lock
495 496 497
	 * is sufficient to ensure the list doesn't change; to change
	 * the list you need to lock both the mutex and the spinlock.
	 */
I
Ingo Molnar 已提交
498
	struct mutex			mutex;
499

500
	struct list_head		active_ctx_list;
501 502
	struct list_head		pinned_groups;
	struct list_head		flexible_groups;
I
Ingo Molnar 已提交
503
	struct list_head		event_list;
504
	int				nr_events;
I
Ingo Molnar 已提交
505 506
	int				nr_active;
	int				is_active;
507
	int				nr_stat;
508
	int				nr_freq;
509
	int				rotate_disable;
I
Ingo Molnar 已提交
510 511
	atomic_t			refcount;
	struct task_struct		*task;
512 513

	/*
514
	 * Context clock, runs when context enabled.
515
	 */
I
Ingo Molnar 已提交
516 517
	u64				time;
	u64				timestamp;
518 519 520 521 522

	/*
	 * These fields let us detect when two contexts have both
	 * been cloned (inherited) from a common ancestor.
	 */
523
	struct perf_event_context	*parent_ctx;
I
Ingo Molnar 已提交
524 525 526
	u64				parent_gen;
	u64				generation;
	int				pin_count;
527
	int				nr_cgroups;	 /* cgroup evts */
528
	void				*task_ctx_data; /* pmu specific data */
529
	struct rcu_head			rcu_head;
530 531 532

	struct delayed_work		orphans_remove;
	bool				orphans_remove_sched;
T
Thomas Gleixner 已提交
533 534
};

535 536
/*
 * Number of contexts where an event can trigger:
537
 *	task, softirq, hardirq, nmi.
538 539 540
 */
#define PERF_NR_CONTEXTS	4

T
Thomas Gleixner 已提交
541
/**
542
 * struct perf_event_cpu_context - per cpu event context structure
T
Thomas Gleixner 已提交
543 544
 */
struct perf_cpu_context {
545 546
	struct perf_event_context	ctx;
	struct perf_event_context	*task_ctx;
T
Thomas Gleixner 已提交
547
	int				active_oncpu;
548
	int				exclusive;
549 550
	struct hrtimer			hrtimer;
	ktime_t				hrtimer_interval;
551
	struct pmu			*unique_pmu;
S
Stephane Eranian 已提交
552
	struct perf_cgroup		*cgrp;
T
Thomas Gleixner 已提交
553 554
};

555
struct perf_output_handle {
I
Ingo Molnar 已提交
556
	struct perf_event		*event;
557
	struct ring_buffer		*rb;
558
	unsigned long			wakeup;
559 560 561
	unsigned long			size;
	void				*addr;
	int				page;
562 563
};

564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
#ifdef CONFIG_CGROUP_PERF

/*
 * perf_cgroup_info keeps track of time_enabled for a cgroup.
 * This is a per-cpu dynamically allocated data structure.
 */
struct perf_cgroup_info {
	u64				time;
	u64				timestamp;
};

struct perf_cgroup {
	struct cgroup_subsys_state	css;
	struct perf_cgroup_info	__percpu *info;
};

/*
 * Must ensure cgroup is pinned (css_get) before calling
 * this function. In other words, we cannot call this function
 * if there is no cgroup event for the current CPU context.
 */
static inline struct perf_cgroup *
perf_cgroup_from_task(struct task_struct *task)
{
	return container_of(task_css(task, perf_event_cgrp_id),
			    struct perf_cgroup, css);
}
#endif /* CONFIG_CGROUP_PERF */

593
#ifdef CONFIG_PERF_EVENTS
594

595
extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
596
extern void perf_pmu_unregister(struct pmu *pmu);
I
Ingo Molnar 已提交
597

598
extern int perf_num_counters(void);
599
extern const char *perf_pmu_name(void);
600 601 602 603
extern void __perf_event_task_sched_in(struct task_struct *prev,
				       struct task_struct *task);
extern void __perf_event_task_sched_out(struct task_struct *prev,
					struct task_struct *next);
604 605 606
extern int perf_event_init_task(struct task_struct *child);
extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task);
607
extern void perf_event_delayed_put(struct task_struct *task);
608
extern void perf_event_print_debug(void);
P
Peter Zijlstra 已提交
609 610
extern void perf_pmu_disable(struct pmu *pmu);
extern void perf_pmu_enable(struct pmu *pmu);
611 612
extern void perf_sched_cb_dec(struct pmu *pmu);
extern void perf_sched_cb_inc(struct pmu *pmu);
613 614
extern int perf_event_task_disable(void);
extern int perf_event_task_enable(void);
615
extern int perf_event_refresh(struct perf_event *event, int refresh);
616
extern void perf_event_update_userpage(struct perf_event *event);
617 618 619 620
extern int perf_event_release_kernel(struct perf_event *event);
extern struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr,
				int cpu,
M
Matt Helsley 已提交
621
				struct task_struct *task,
622 623
				perf_overflow_handler_t callback,
				void *context);
624 625
extern void perf_pmu_migrate_context(struct pmu *pmu,
				int src_cpu, int dst_cpu);
626 627
extern u64 perf_event_read_value(struct perf_event *event,
				 u64 *enabled, u64 *running);
628

629

630
struct perf_sample_data {
631 632 633 634 635 636 637 638 639 640 641
	/*
	 * Fields set by perf_sample_data_init(), group so as to
	 * minimize the cachelines touched.
	 */
	u64				addr;
	struct perf_raw_record		*raw;
	struct perf_branch_stack	*br_stack;
	u64				period;
	u64				weight;
	u64				txn;
	union  perf_mem_data_src	data_src;
642

643 644 645 646 647
	/*
	 * The other fields, optionally {set,used} by
	 * perf_{prepare,output}_sample().
	 */
	u64				type;
648 649 650 651 652 653 654 655 656 657 658 659 660
	u64				ip;
	struct {
		u32	pid;
		u32	tid;
	}				tid_entry;
	u64				time;
	u64				id;
	u64				stream_id;
	struct {
		u32	cpu;
		u32	reserved;
	}				cpu_entry;
	struct perf_callchain_entry	*callchain;
661 662 663 664 665

	/*
	 * regs_user may point to task_pt_regs or to regs_user_copy, depending
	 * on arch details.
	 */
666
	struct perf_regs		regs_user;
667 668
	struct pt_regs			regs_user_copy;

669
	struct perf_regs		regs_intr;
670
	u64				stack_user_size;
671
} ____cacheline_aligned;
672

673 674 675 676 677 678 679
/* default value for data source */
#define PERF_MEM_NA (PERF_MEM_S(OP, NA)   |\
		    PERF_MEM_S(LVL, NA)   |\
		    PERF_MEM_S(SNOOP, NA) |\
		    PERF_MEM_S(LOCK, NA)  |\
		    PERF_MEM_S(TLB, NA))

680 681
static inline void perf_sample_data_init(struct perf_sample_data *data,
					 u64 addr, u64 period)
682
{
683
	/* remaining struct members initialized in perf_prepare_sample() */
684 685
	data->addr = addr;
	data->raw  = NULL;
686
	data->br_stack = NULL;
687
	data->period = period;
A
Andi Kleen 已提交
688
	data->weight = 0;
689
	data->data_src.val = PERF_MEM_NA;
A
Andi Kleen 已提交
690
	data->txn = 0;
691 692
}

693 694 695
extern void perf_output_sample(struct perf_output_handle *handle,
			       struct perf_event_header *header,
			       struct perf_sample_data *data,
696
			       struct perf_event *event);
697 698
extern void perf_prepare_sample(struct perf_event_header *header,
				struct perf_sample_data *data,
699
				struct perf_event *event,
700 701
				struct pt_regs *regs);

702
extern int perf_event_overflow(struct perf_event *event,
703 704
				 struct perf_sample_data *data,
				 struct pt_regs *regs);
705

706 707 708 709 710
static inline bool is_sampling_event(struct perf_event *event)
{
	return event->attr.sample_period != 0;
}

711
/*
712
 * Return 1 for a software event, 0 for a hardware event
713
 */
714
static inline int is_software_event(struct perf_event *event)
715
{
716
	return event->pmu->task_ctx_nr == perf_sw_context;
717 718
}

719
extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
720

721
extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
722
extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
723

724
#ifndef perf_arch_fetch_caller_regs
725
static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
726
#endif
727 728 729 730 731 732 733 734 735

/*
 * Take a snapshot of the regs. Skip ip and frame pointer to
 * the nth caller. We only need a few of the regs:
 * - ip for PERF_SAMPLE_IP
 * - cs for user_mode() tests
 * - bp for callchains
 * - eflags, for future purposes, just in case
 */
736
static inline void perf_fetch_caller_regs(struct pt_regs *regs)
737 738 739
{
	memset(regs, 0, sizeof(*regs));

740
	perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
741 742
}

P
Peter Zijlstra 已提交
743
static __always_inline void
744
perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
745
{
746 747 748 749 750
	if (static_key_false(&perf_swevent_enabled[event_id]))
		__perf_sw_event(event_id, nr, regs, addr);
}

DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
P
Peter Zijlstra 已提交
751

752 753 754 755 756 757 758 759
/*
 * 'Special' version for the scheduler, it hard assumes no recursion,
 * which is guaranteed by us not actually scheduling inside other swevents
 * because those disable preemption.
 */
static __always_inline void
perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
{
760
	if (static_key_false(&perf_swevent_enabled[event_id])) {
761 762 763 764
		struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);

		perf_fetch_caller_regs(regs);
		___perf_sw_event(event_id, nr, regs, addr);
765 766 767
	}
}

768
extern struct static_key_deferred perf_sched_events;
769

770
static inline void perf_event_task_sched_in(struct task_struct *prev,
771
					    struct task_struct *task)
772 773 774 775 776 777 778
{
	if (static_key_false(&perf_sched_events.key))
		__perf_event_task_sched_in(prev, task);
}

static inline void perf_event_task_sched_out(struct task_struct *prev,
					     struct task_struct *next)
779
{
780
	perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
781

782
	if (static_key_false(&perf_sched_events.key))
783
		__perf_event_task_sched_out(prev, next);
784 785
}

786 787 788 789 790
static inline u64 __perf_event_count(struct perf_event *event)
{
	return local64_read(&event->count) + atomic64_read(&event->child_count);
}

791
extern void perf_event_mmap(struct vm_area_struct *vma);
792
extern struct perf_guest_info_callbacks *perf_guest_cbs;
793 794
extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
795

796
extern void perf_event_exec(void);
797
extern void perf_event_comm(struct task_struct *tsk, bool exec);
798
extern void perf_event_fork(struct task_struct *tsk);
799

800 801 802
/* Callchains */
DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);

803 804
extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
805

806
static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
807 808 809 810
{
	if (entry->nr < PERF_MAX_STACK_DEPTH)
		entry->ip[entry->nr++] = ip;
}
811

812 813 814
extern int sysctl_perf_event_paranoid;
extern int sysctl_perf_event_mlock;
extern int sysctl_perf_event_sample_rate;
815 816 817
extern int sysctl_perf_cpu_time_max_percent;

extern void perf_sample_event_took(u64 sample_len_ns);
818

P
Peter Zijlstra 已提交
819 820 821
extern int perf_proc_update_handler(struct ctl_table *table, int write,
		void __user *buffer, size_t *lenp,
		loff_t *ppos);
822 823 824 825
extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
		void __user *buffer, size_t *lenp,
		loff_t *ppos);

P
Peter Zijlstra 已提交
826

827 828 829 830 831 832 833 834 835 836 837 838 839 840 841
static inline bool perf_paranoid_tracepoint_raw(void)
{
	return sysctl_perf_event_paranoid > -1;
}

static inline bool perf_paranoid_cpu(void)
{
	return sysctl_perf_event_paranoid > 0;
}

static inline bool perf_paranoid_kernel(void)
{
	return sysctl_perf_event_paranoid > 1;
}

842
extern void perf_event_init(void);
843 844
extern void perf_tp_event(u64 addr, u64 count, void *record,
			  int entry_size, struct pt_regs *regs,
845 846
			  struct hlist_head *head, int rctx,
			  struct task_struct *task);
847
extern void perf_bp_event(struct perf_event *event, void *data);
848

849
#ifndef perf_misc_flags
850 851 852
# define perf_misc_flags(regs) \
		(user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
# define perf_instruction_pointer(regs)	instruction_pointer(regs)
853 854
#endif

855 856 857
static inline bool has_branch_stack(struct perf_event *event)
{
	return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
858 859 860 861 862
}

static inline bool needs_branch_stack(struct perf_event *event)
{
	return event->attr.branch_sample_type != 0;
863 864
}

865
extern int perf_output_begin(struct perf_output_handle *handle,
866
			     struct perf_event *event, unsigned int size);
867
extern void perf_output_end(struct perf_output_handle *handle);
868
extern unsigned int perf_output_copy(struct perf_output_handle *handle,
869
			     const void *buf, unsigned int len);
870 871
extern unsigned int perf_output_skip(struct perf_output_handle *handle,
				     unsigned int len);
872 873
extern int perf_swevent_get_recursion_context(void);
extern void perf_swevent_put_recursion_context(int rctx);
874
extern u64 perf_swevent_set_period(struct perf_event *event);
875 876
extern void perf_event_enable(struct perf_event *event);
extern void perf_event_disable(struct perf_event *event);
877
extern int __perf_event_disable(void *info);
878
extern void perf_event_task_tick(void);
879
#else /* !CONFIG_PERF_EVENTS: */
T
Thomas Gleixner 已提交
880
static inline void
881 882 883 884 885
perf_event_task_sched_in(struct task_struct *prev,
			 struct task_struct *task)			{ }
static inline void
perf_event_task_sched_out(struct task_struct *prev,
			  struct task_struct *next)			{ }
886 887 888
static inline int perf_event_init_task(struct task_struct *child)	{ return 0; }
static inline void perf_event_exit_task(struct task_struct *child)	{ }
static inline void perf_event_free_task(struct task_struct *task)	{ }
889
static inline void perf_event_delayed_put(struct task_struct *task)	{ }
I
Ingo Molnar 已提交
890 891 892
static inline void perf_event_print_debug(void)				{ }
static inline int perf_event_task_disable(void)				{ return -EINVAL; }
static inline int perf_event_task_enable(void)				{ return -EINVAL; }
893 894 895 896
static inline int perf_event_refresh(struct perf_event *event, int refresh)
{
	return -EINVAL;
}
897

898
static inline void
899
perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)	{ }
900
static inline void
901 902
perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)			{ }
static inline void
903
perf_bp_event(struct perf_event *event, void *data)			{ }
904

905
static inline int perf_register_guest_info_callbacks
906
(struct perf_guest_info_callbacks *callbacks)				{ return 0; }
907
static inline int perf_unregister_guest_info_callbacks
908
(struct perf_guest_info_callbacks *callbacks)				{ return 0; }
909

I
Ingo Molnar 已提交
910
static inline void perf_event_mmap(struct vm_area_struct *vma)		{ }
911
static inline void perf_event_exec(void)				{ }
912
static inline void perf_event_comm(struct task_struct *tsk, bool exec)	{ }
913 914
static inline void perf_event_fork(struct task_struct *tsk)		{ }
static inline void perf_event_init(void)				{ }
915
static inline int  perf_swevent_get_recursion_context(void)		{ return -1; }
916
static inline void perf_swevent_put_recursion_context(int rctx)		{ }
917
static inline u64 perf_swevent_set_period(struct perf_event *event)	{ return 0; }
918 919
static inline void perf_event_enable(struct perf_event *event)		{ }
static inline void perf_event_disable(struct perf_event *event)		{ }
920
static inline int __perf_event_disable(void *info)			{ return -1; }
921
static inline void perf_event_task_tick(void)				{ }
T
Thomas Gleixner 已提交
922 923
#endif

924 925 926 927 928 929
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL)
extern bool perf_event_can_stop_tick(void);
#else
static inline bool perf_event_can_stop_tick(void)			{ return true; }
#endif

930 931 932
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
extern void perf_restore_debug_store(void);
#else
933
static inline void perf_restore_debug_store(void)			{ }
T
Thomas Gleixner 已提交
934 935
#endif

936
#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
937

938
/*
939
 * This has to have a higher priority than migration_notifier in sched/core.c.
940
 */
941 942
#define perf_cpu_notifier(fn)						\
do {									\
943
	static struct notifier_block fn##_nb =				\
944
		{ .notifier_call = fn, .priority = CPU_PRI_PERF };	\
945
	unsigned long cpu = smp_processor_id();				\
946
	unsigned long flags;						\
947 948
									\
	cpu_notifier_register_begin();					\
949
	fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE,			\
950
		(void *)(unsigned long)cpu);				\
951
	local_irq_save(flags);						\
952
	fn(&fn##_nb, (unsigned long)CPU_STARTING,			\
953
		(void *)(unsigned long)cpu);				\
954
	local_irq_restore(flags);					\
955
	fn(&fn##_nb, (unsigned long)CPU_ONLINE,				\
956
		(void *)(unsigned long)cpu);				\
957 958
	__register_cpu_notifier(&fn##_nb);				\
	cpu_notifier_register_done();					\
959 960
} while (0)

961 962 963 964 965 966 967 968 969 970 971
/*
 * Bare-bones version of perf_cpu_notifier(), which doesn't invoke the
 * callback for already online CPUs.
 */
#define __perf_cpu_notifier(fn)						\
do {									\
	static struct notifier_block fn##_nb =				\
		{ .notifier_call = fn, .priority = CPU_PRI_PERF };	\
									\
	__register_cpu_notifier(&fn##_nb);				\
} while (0)
972

973 974 975
struct perf_pmu_events_attr {
	struct device_attribute attr;
	u64 id;
976
	const char *event_str;
977 978 979 980 981 982 983 984
};

#define PMU_EVENT_ATTR(_name, _var, _id, _show)				\
static struct perf_pmu_events_attr _var = {				\
	.attr = __ATTR(_name, 0444, _show, NULL),			\
	.id   =  _id,							\
};

985 986 987 988 989 990 991 992 993 994 995 996
#define PMU_FORMAT_ATTR(_name, _format)					\
static ssize_t								\
_name##_show(struct device *dev,					\
			       struct device_attribute *attr,		\
			       char *page)				\
{									\
	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);			\
	return sprintf(page, _format "\n");				\
}									\
									\
static struct device_attribute format_attr_##_name = __ATTR_RO(_name)

997
#endif /* _LINUX_PERF_EVENT_H */