perf_event.h 4.3 KB
Newer Older
1 2
#ifndef _ASM_X86_PERF_EVENT_H
#define _ASM_X86_PERF_EVENT_H
3

4
/*
5
 * Performance event hw details:
6 7
 */

8
#define X86_PMC_MAX_GENERIC				       32
9 10
#define X86_PMC_MAX_FIXED					3

11 12 13 14
#define X86_PMC_IDX_GENERIC				        0
#define X86_PMC_IDX_FIXED				       32
#define X86_PMC_IDX_MAX					       64

I
Ingo Molnar 已提交
15 16
#define MSR_ARCH_PERFMON_PERFCTR0			      0xc1
#define MSR_ARCH_PERFMON_PERFCTR1			      0xc2
17

I
Ingo Molnar 已提交
18 19
#define MSR_ARCH_PERFMON_EVENTSEL0			     0x186
#define MSR_ARCH_PERFMON_EVENTSEL1			     0x187
20

21
#define ARCH_PERFMON_EVENTSEL_ENABLE			  (1 << 22)
22
#define ARCH_PERFMON_EVENTSEL_ANY			  (1 << 21)
I
Ingo Molnar 已提交
23 24 25
#define ARCH_PERFMON_EVENTSEL_INT			  (1 << 20)
#define ARCH_PERFMON_EVENTSEL_OS			  (1 << 17)
#define ARCH_PERFMON_EVENTSEL_USR			  (1 << 16)
26

27 28 29
/*
 * Includes eventsel and unit mask as well:
 */
30 31 32 33 34 35 36 37


#define INTEL_ARCH_EVTSEL_MASK		0x000000FFULL
#define INTEL_ARCH_UNIT_MASK		0x0000FF00ULL
#define INTEL_ARCH_EDGE_MASK		0x00040000ULL
#define INTEL_ARCH_INV_MASK		0x00800000ULL
#define INTEL_ARCH_CNT_MASK		0xFF000000ULL
#define INTEL_ARCH_EVENT_MASK	(INTEL_ARCH_UNIT_MASK|INTEL_ARCH_EVTSEL_MASK)
38

39 40 41 42 43 44 45 46 47
/*
 * filter mask to validate fixed counter events.
 * the following filters disqualify for fixed counters:
 *  - inv
 *  - edge
 *  - cnt-mask
 *  The other filters are supported by fixed counters.
 *  The any-thread option is supported starting with v3.
 */
48 49 50 51 52
#define INTEL_ARCH_FIXED_MASK \
	(INTEL_ARCH_CNT_MASK| \
	 INTEL_ARCH_INV_MASK| \
	 INTEL_ARCH_EDGE_MASK|\
	 INTEL_ARCH_UNIT_MASK|\
53
	 INTEL_ARCH_EVENT_MASK)
54

I
Ingo Molnar 已提交
55 56
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL		      0x3c
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK		(0x00 << 8)
57
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX			 0
58
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
I
Ingo Molnar 已提交
59 60 61
		(1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))

#define ARCH_PERFMON_BRANCH_MISSES_RETIRED			 6
62

63 64 65 66
/*
 * Intel "Architectural Performance Monitoring" CPUID
 * detection/enumeration details:
 */
67 68 69
union cpuid10_eax {
	struct {
		unsigned int version_id:8;
70
		unsigned int num_counters:8;
71 72 73 74 75 76
		unsigned int bit_width:8;
		unsigned int mask_length:8;
	} split;
	unsigned int full;
};

77 78
union cpuid10_edx {
	struct {
79
		unsigned int num_counters_fixed:4;
80 81 82 83 84 85 86
		unsigned int reserved:28;
	} split;
	unsigned int full;
};


/*
87
 * Fixed-purpose performance events:
88 89
 */

90 91 92 93 94 95 96 97 98
/*
 * All 3 fixed-mode PMCs are configured via this single MSR:
 */
#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL			0x38d

/*
 * The counts are available in three separate MSRs:
 */

99 100
/* Instr_Retired.Any: */
#define MSR_ARCH_PERFMON_FIXED_CTR0			0x309
101
#define X86_PMC_IDX_FIXED_INSTRUCTIONS			(X86_PMC_IDX_FIXED + 0)
102 103 104

/* CPU_CLK_Unhalted.Core: */
#define MSR_ARCH_PERFMON_FIXED_CTR1			0x30a
105
#define X86_PMC_IDX_FIXED_CPU_CYCLES			(X86_PMC_IDX_FIXED + 1)
106 107 108

/* CPU_CLK_Unhalted.Ref: */
#define MSR_ARCH_PERFMON_FIXED_CTR2			0x30b
109
#define X86_PMC_IDX_FIXED_BUS_CYCLES			(X86_PMC_IDX_FIXED + 2)
110

111 112 113
/*
 * We model BTS tracing as another fixed-mode PMC.
 *
114 115
 * We choose a value in the middle of the fixed event range, since lower
 * values are used by actual fixed events and higher values are used
116 117 118 119
 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
 */
#define X86_PMC_IDX_FIXED_BTS				(X86_PMC_IDX_FIXED + 16)

120 121 122 123
/* IbsFetchCtl bits/masks */
#define IBS_FETCH_RAND_EN		(1ULL<<57)
#define IBS_FETCH_VAL			(1ULL<<49)
#define IBS_FETCH_ENABLE		(1ULL<<48)
124 125
#define IBS_FETCH_CNT			0xFFFF0000ULL
#define IBS_FETCH_MAX_CNT		0x0000FFFFULL
126 127 128 129 130

/* IbsOpCtl bits */
#define IBS_OP_CNT_CTL			(1ULL<<19)
#define IBS_OP_VAL			(1ULL<<18)
#define IBS_OP_ENABLE			(1ULL<<17)
131
#define IBS_OP_MAX_CNT			0x0000FFFFULL
132

133 134 135
#ifdef CONFIG_PERF_EVENTS
extern void init_hw_perf_events(void);
extern void perf_events_lapic_init(void);
136

137
#define PERF_EVENT_INDEX_OFFSET			0
138

139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
/*
 * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups.
 * This flag is otherwise unused and ABI specified to be 0, so nobody should
 * care what we do with it.
 */
#define PERF_EFLAGS_EXACT	(1UL << 3)

#define perf_misc_flags(regs)				\
({	int misc = 0;					\
	if (user_mode(regs))				\
		misc |= PERF_RECORD_MISC_USER;		\
	else						\
		misc |= PERF_RECORD_MISC_KERNEL;	\
	if (regs->flags & PERF_EFLAGS_EXACT)		\
		misc |= PERF_RECORD_MISC_EXACT;		\
	misc; })

#define perf_instruction_pointer(regs)	((regs)->ip)

I
Ingo Molnar 已提交
158
#else
159 160
static inline void init_hw_perf_events(void)		{ }
static inline void perf_events_lapic_init(void)	{ }
I
Ingo Molnar 已提交
161 162
#endif

163
#endif /* _ASM_X86_PERF_EVENT_H */