perf_event.h 3.0 KB
Newer Older
1 2
#ifndef _ASM_X86_PERF_EVENT_H
#define _ASM_X86_PERF_EVENT_H
3

4
/*
5
 * Performance event hw details:
6 7 8 9 10
 */

#define X86_PMC_MAX_GENERIC					8
#define X86_PMC_MAX_FIXED					3

11 12 13 14
#define X86_PMC_IDX_GENERIC				        0
#define X86_PMC_IDX_FIXED				       32
#define X86_PMC_IDX_MAX					       64

I
Ingo Molnar 已提交
15 16
#define MSR_ARCH_PERFMON_PERFCTR0			      0xc1
#define MSR_ARCH_PERFMON_PERFCTR1			      0xc2
17

I
Ingo Molnar 已提交
18 19
#define MSR_ARCH_PERFMON_EVENTSEL0			     0x186
#define MSR_ARCH_PERFMON_EVENTSEL1			     0x187
20

I
Ingo Molnar 已提交
21 22 23 24
#define ARCH_PERFMON_EVENTSEL0_ENABLE			  (1 << 22)
#define ARCH_PERFMON_EVENTSEL_INT			  (1 << 20)
#define ARCH_PERFMON_EVENTSEL_OS			  (1 << 17)
#define ARCH_PERFMON_EVENTSEL_USR			  (1 << 16)
25

26 27 28 29 30
/*
 * Includes eventsel and unit mask as well:
 */
#define ARCH_PERFMON_EVENT_MASK				    0xffff

31 32 33 34 35 36 37 38 39 40 41
/*
 * filter mask to validate fixed counter events.
 * the following filters disqualify for fixed counters:
 *  - inv
 *  - edge
 *  - cnt-mask
 *  The other filters are supported by fixed counters.
 *  The any-thread option is supported starting with v3.
 */
#define ARCH_PERFMON_EVENT_FILTER_MASK			0xff840000

I
Ingo Molnar 已提交
42 43
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL		      0x3c
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK		(0x00 << 8)
44
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX			 0
45
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
I
Ingo Molnar 已提交
46 47 48
		(1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))

#define ARCH_PERFMON_BRANCH_MISSES_RETIRED			 6
49

50 51 52 53
/*
 * Intel "Architectural Performance Monitoring" CPUID
 * detection/enumeration details:
 */
54 55 56
union cpuid10_eax {
	struct {
		unsigned int version_id:8;
57
		unsigned int num_events:8;
58 59 60 61 62 63
		unsigned int bit_width:8;
		unsigned int mask_length:8;
	} split;
	unsigned int full;
};

64 65
union cpuid10_edx {
	struct {
66
		unsigned int num_events_fixed:4;
67 68 69 70 71 72 73
		unsigned int reserved:28;
	} split;
	unsigned int full;
};


/*
74
 * Fixed-purpose performance events:
75 76
 */

77 78 79 80 81 82 83 84 85
/*
 * All 3 fixed-mode PMCs are configured via this single MSR:
 */
#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL			0x38d

/*
 * The counts are available in three separate MSRs:
 */

86 87
/* Instr_Retired.Any: */
#define MSR_ARCH_PERFMON_FIXED_CTR0			0x309
88
#define X86_PMC_IDX_FIXED_INSTRUCTIONS			(X86_PMC_IDX_FIXED + 0)
89 90 91

/* CPU_CLK_Unhalted.Core: */
#define MSR_ARCH_PERFMON_FIXED_CTR1			0x30a
92
#define X86_PMC_IDX_FIXED_CPU_CYCLES			(X86_PMC_IDX_FIXED + 1)
93 94 95

/* CPU_CLK_Unhalted.Ref: */
#define MSR_ARCH_PERFMON_FIXED_CTR2			0x30b
96
#define X86_PMC_IDX_FIXED_BUS_CYCLES			(X86_PMC_IDX_FIXED + 2)
97

98 99 100
/*
 * We model BTS tracing as another fixed-mode PMC.
 *
101 102
 * We choose a value in the middle of the fixed event range, since lower
 * values are used by actual fixed events and higher values are used
103 104 105 106 107
 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
 */
#define X86_PMC_IDX_FIXED_BTS				(X86_PMC_IDX_FIXED + 16)


108 109 110
#ifdef CONFIG_PERF_EVENTS
extern void init_hw_perf_events(void);
extern void perf_events_lapic_init(void);
111

112
#define PERF_EVENT_INDEX_OFFSET			0
113

I
Ingo Molnar 已提交
114
#else
115 116
static inline void init_hw_perf_events(void)		{ }
static inline void perf_events_lapic_init(void)	{ }
I
Ingo Molnar 已提交
117 118
#endif

119
#endif /* _ASM_X86_PERF_EVENT_H */