perf_event.h 8.5 KB
Newer Older
1 2
#ifndef _ASM_X86_PERF_EVENT_H
#define _ASM_X86_PERF_EVENT_H
3

4
/*
5
 * Performance event hw details:
6 7
 */

8 9 10
#define INTEL_PMC_MAX_GENERIC				       32
#define INTEL_PMC_MAX_FIXED					3
#define INTEL_PMC_IDX_FIXED				       32
11

12 13
#define X86_PMC_IDX_MAX					       64

I
Ingo Molnar 已提交
14 15
#define MSR_ARCH_PERFMON_PERFCTR0			      0xc1
#define MSR_ARCH_PERFMON_PERFCTR1			      0xc2
16

I
Ingo Molnar 已提交
17 18
#define MSR_ARCH_PERFMON_EVENTSEL0			     0x186
#define MSR_ARCH_PERFMON_EVENTSEL1			     0x187
19

20 21 22 23 24
#define ARCH_PERFMON_EVENTSEL_EVENT			0x000000FFULL
#define ARCH_PERFMON_EVENTSEL_UMASK			0x0000FF00ULL
#define ARCH_PERFMON_EVENTSEL_USR			(1ULL << 16)
#define ARCH_PERFMON_EVENTSEL_OS			(1ULL << 17)
#define ARCH_PERFMON_EVENTSEL_EDGE			(1ULL << 18)
25
#define ARCH_PERFMON_EVENTSEL_PIN_CONTROL		(1ULL << 19)
26 27 28 29 30 31
#define ARCH_PERFMON_EVENTSEL_INT			(1ULL << 20)
#define ARCH_PERFMON_EVENTSEL_ANY			(1ULL << 21)
#define ARCH_PERFMON_EVENTSEL_ENABLE			(1ULL << 22)
#define ARCH_PERFMON_EVENTSEL_INV			(1ULL << 23)
#define ARCH_PERFMON_EVENTSEL_CMASK			0xFF000000ULL

32 33 34
#define HSW_IN_TX					(1ULL << 32)
#define HSW_IN_TX_CHECKPOINTED				(1ULL << 33)

35
#define AMD64_EVENTSEL_INT_CORE_ENABLE			(1ULL << 36)
36 37
#define AMD64_EVENTSEL_GUESTONLY			(1ULL << 40)
#define AMD64_EVENTSEL_HOSTONLY				(1ULL << 41)
38

39 40 41 42
#define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT		37
#define AMD64_EVENTSEL_INT_CORE_SEL_MASK		\
	(0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)

43 44 45 46 47 48 49 50 51 52 53
#define AMD64_EVENTSEL_EVENT	\
	(ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
#define INTEL_ARCH_EVENT_MASK	\
	(ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)

#define X86_RAW_EVENT_MASK		\
	(ARCH_PERFMON_EVENTSEL_EVENT |	\
	 ARCH_PERFMON_EVENTSEL_UMASK |	\
	 ARCH_PERFMON_EVENTSEL_EDGE  |	\
	 ARCH_PERFMON_EVENTSEL_INV   |	\
	 ARCH_PERFMON_EVENTSEL_CMASK)
54 55 56 57 58 59 60 61
#define X86_ALL_EVENT_FLAGS  			\
	(ARCH_PERFMON_EVENTSEL_EDGE |  		\
	 ARCH_PERFMON_EVENTSEL_INV | 		\
	 ARCH_PERFMON_EVENTSEL_CMASK | 		\
	 ARCH_PERFMON_EVENTSEL_ANY | 		\
	 ARCH_PERFMON_EVENTSEL_PIN_CONTROL | 	\
	 HSW_IN_TX | 				\
	 HSW_IN_TX_CHECKPOINTED)
62 63 64
#define AMD64_RAW_EVENT_MASK		\
	(X86_RAW_EVENT_MASK          |  \
	 AMD64_EVENTSEL_EVENT)
65 66 67
#define AMD64_RAW_EVENT_MASK_NB		\
	(AMD64_EVENTSEL_EVENT        |  \
	 ARCH_PERFMON_EVENTSEL_UMASK)
68
#define AMD64_NUM_COUNTERS				4
69
#define AMD64_NUM_COUNTERS_CORE				6
70
#define AMD64_NUM_COUNTERS_NB				4
71

72
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL		0x3c
I
Ingo Molnar 已提交
73
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK		(0x00 << 8)
74
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX		0
75
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
I
Ingo Molnar 已提交
76 77
		(1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))

78
#define ARCH_PERFMON_BRANCH_MISSES_RETIRED		6
79
#define ARCH_PERFMON_EVENTS_COUNT			7
80

81 82 83 84
/*
 * Intel "Architectural Performance Monitoring" CPUID
 * detection/enumeration details:
 */
85 86 87
union cpuid10_eax {
	struct {
		unsigned int version_id:8;
88
		unsigned int num_counters:8;
89 90 91 92 93 94
		unsigned int bit_width:8;
		unsigned int mask_length:8;
	} split;
	unsigned int full;
};

95 96 97 98 99 100 101 102 103 104 105 106 107
union cpuid10_ebx {
	struct {
		unsigned int no_unhalted_core_cycles:1;
		unsigned int no_instructions_retired:1;
		unsigned int no_unhalted_reference_cycles:1;
		unsigned int no_llc_reference:1;
		unsigned int no_llc_misses:1;
		unsigned int no_branch_instruction_retired:1;
		unsigned int no_branch_misses_retired:1;
	} split;
	unsigned int full;
};

108 109
union cpuid10_edx {
	struct {
110 111 112
		unsigned int num_counters_fixed:5;
		unsigned int bit_width_fixed:8;
		unsigned int reserved:19;
113 114 115 116
	} split;
	unsigned int full;
};

117 118 119 120 121 122 123 124 125
struct x86_pmu_capability {
	int		version;
	int		num_counters_gp;
	int		num_counters_fixed;
	int		bit_width_gp;
	int		bit_width_fixed;
	unsigned int	events_mask;
	int		events_mask_len;
};
126 127

/*
128
 * Fixed-purpose performance events:
129 130
 */

131 132 133
/*
 * All 3 fixed-mode PMCs are configured via this single MSR:
 */
134
#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL	0x38d
135 136 137 138 139

/*
 * The counts are available in three separate MSRs:
 */

140
/* Instr_Retired.Any: */
141
#define MSR_ARCH_PERFMON_FIXED_CTR0	0x309
142
#define INTEL_PMC_IDX_FIXED_INSTRUCTIONS	(INTEL_PMC_IDX_FIXED + 0)
143 144

/* CPU_CLK_Unhalted.Core: */
145
#define MSR_ARCH_PERFMON_FIXED_CTR1	0x30a
146
#define INTEL_PMC_IDX_FIXED_CPU_CYCLES	(INTEL_PMC_IDX_FIXED + 1)
147 148

/* CPU_CLK_Unhalted.Ref: */
149
#define MSR_ARCH_PERFMON_FIXED_CTR2	0x30b
150 151
#define INTEL_PMC_IDX_FIXED_REF_CYCLES	(INTEL_PMC_IDX_FIXED + 2)
#define INTEL_PMC_MSK_FIXED_REF_CYCLES	(1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
152

153 154 155
/*
 * We model BTS tracing as another fixed-mode PMC.
 *
156 157
 * We choose a value in the middle of the fixed event range, since lower
 * values are used by actual fixed events and higher values are used
158 159
 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
 */
160
#define INTEL_PMC_IDX_FIXED_BTS				(INTEL_PMC_IDX_FIXED + 16)
161

162 163 164 165 166 167
#define GLOBAL_STATUS_COND_CHG				BIT_ULL(63)
#define GLOBAL_STATUS_BUFFER_OVF			BIT_ULL(62)
#define GLOBAL_STATUS_UNC_OVF				BIT_ULL(61)
#define GLOBAL_STATUS_ASIF				BIT_ULL(60)
#define GLOBAL_STATUS_COUNTERS_FROZEN			BIT_ULL(59)
#define GLOBAL_STATUS_LBRS_FROZEN			BIT_ULL(58)
168
#define GLOBAL_STATUS_TRACE_TOPAPMI			BIT_ULL(55)
169

170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
/*
 * IBS cpuid feature detection
 */

#define IBS_CPUID_FEATURES		0x8000001b

/*
 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
 * bit 0 is used to indicate the existence of IBS.
 */
#define IBS_CAPS_AVAIL			(1U<<0)
#define IBS_CAPS_FETCHSAM		(1U<<1)
#define IBS_CAPS_OPSAM			(1U<<2)
#define IBS_CAPS_RDWROPCNT		(1U<<3)
#define IBS_CAPS_OPCNT			(1U<<4)
#define IBS_CAPS_BRNTRGT		(1U<<5)
#define IBS_CAPS_OPCNTEXT		(1U<<6)
187
#define IBS_CAPS_RIPINVALIDCHK		(1U<<7)
188 189 190
#define IBS_CAPS_OPBRNFUSE		(1U<<8)
#define IBS_CAPS_FETCHCTLEXTD		(1U<<9)
#define IBS_CAPS_OPDATA4		(1U<<10)
191 192 193 194 195 196 197 198 199 200 201 202

#define IBS_CAPS_DEFAULT		(IBS_CAPS_AVAIL		\
					 | IBS_CAPS_FETCHSAM	\
					 | IBS_CAPS_OPSAM)

/*
 * IBS APIC setup
 */
#define IBSCTL				0x1cc
#define IBSCTL_LVT_OFFSET_VALID		(1ULL<<8)
#define IBSCTL_LVT_OFFSET_MASK		0x0F

203
/* ibs fetch bits/masks */
204 205 206 207 208
#define IBS_FETCH_RAND_EN	(1ULL<<57)
#define IBS_FETCH_VAL		(1ULL<<49)
#define IBS_FETCH_ENABLE	(1ULL<<48)
#define IBS_FETCH_CNT		0xFFFF0000ULL
#define IBS_FETCH_MAX_CNT	0x0000FFFFULL
209

210
/* ibs op bits/masks */
211 212
/* lower 4 bits of the current count are ignored: */
#define IBS_OP_CUR_CNT		(0xFFFF0ULL<<32)
213 214 215 216 217
#define IBS_OP_CNT_CTL		(1ULL<<19)
#define IBS_OP_VAL		(1ULL<<18)
#define IBS_OP_ENABLE		(1ULL<<17)
#define IBS_OP_MAX_CNT		0x0000FFFFULL
#define IBS_OP_MAX_CNT_EXT	0x007FFFFFULL	/* not a register bit mask */
218
#define IBS_RIP_INVALID		(1ULL<<38)
219

220
#ifdef CONFIG_X86_LOCAL_APIC
221
extern u32 get_ibs_caps(void);
222 223 224
#else
static inline u32 get_ibs_caps(void) { return 0; }
#endif
225

226 227
#ifdef CONFIG_PERF_EVENTS
extern void perf_events_lapic_init(void);
228

229
/*
230 231 232 233 234 235 236
 * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
 * unused and ABI specified to be 0, so nobody should care what we do with
 * them.
 *
 * EXACT - the IP points to the exact instruction that triggered the
 *         event (HW bugs exempt).
 * VM    - original X86_VM_MASK; see set_linear_ip().
237 238
 */
#define PERF_EFLAGS_EXACT	(1UL << 3)
239
#define PERF_EFLAGS_VM		(1UL << 5)
240

241 242 243 244
struct pt_regs;
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
extern unsigned long perf_misc_flags(struct pt_regs *regs);
#define perf_misc_flags(regs)	perf_misc_flags(regs)
245

246 247 248 249 250 251 252 253 254 255 256
#include <asm/stacktrace.h>

/*
 * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
 * and the comment with PERF_EFLAGS_EXACT.
 */
#define perf_arch_fetch_caller_regs(regs, __ip)		{	\
	(regs)->ip = (__ip);					\
	(regs)->bp = caller_frame_pointer();			\
	(regs)->cs = __KERNEL_CS;				\
	regs->flags = 0;					\
257 258 259 260 261
	asm volatile(						\
		_ASM_MOV "%%"_ASM_SP ", %0\n"			\
		: "=m" ((regs)->sp)				\
		:: "memory"					\
	);							\
262 263
}

264 265 266 267 268 269
struct perf_guest_switch_msr {
	unsigned msr;
	u64 host, guest;
};

extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
270
extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
271
extern void perf_check_microcode(void);
I
Ingo Molnar 已提交
272
#else
273
static inline struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
274 275 276 277 278
{
	*nr = 0;
	return NULL;
}

279 280 281 282 283
static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
{
	memset(cap, 0, sizeof(*cap));
}

284
static inline void perf_events_lapic_init(void)	{ }
285
static inline void perf_check_microcode(void) { }
I
Ingo Molnar 已提交
286 287
#endif

288 289 290 291
#ifdef CONFIG_CPU_SUP_INTEL
 extern void intel_pt_handle_vmx(int on);
#endif

292 293 294 295 296 297 298 299
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
 extern void amd_pmu_enable_virt(void);
 extern void amd_pmu_disable_virt(void);
#else
 static inline void amd_pmu_enable_virt(void) { }
 static inline void amd_pmu_disable_virt(void) { }
#endif

300 301
#define arch_perf_out_copy_user copy_from_user_nmi

302
#endif /* _ASM_X86_PERF_EVENT_H */