perf_event.h 8.8 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3
#ifndef _ASM_X86_PERF_EVENT_H
#define _ASM_X86_PERF_EVENT_H
4

5
/*
6
 * Performance event hw details:
7 8
 */

9 10 11
#define INTEL_PMC_MAX_GENERIC				       32
#define INTEL_PMC_MAX_FIXED					3
#define INTEL_PMC_IDX_FIXED				       32
12

13 14
#define X86_PMC_IDX_MAX					       64

I
Ingo Molnar 已提交
15 16
#define MSR_ARCH_PERFMON_PERFCTR0			      0xc1
#define MSR_ARCH_PERFMON_PERFCTR1			      0xc2
17

I
Ingo Molnar 已提交
18 19
#define MSR_ARCH_PERFMON_EVENTSEL0			     0x186
#define MSR_ARCH_PERFMON_EVENTSEL1			     0x187
20

21 22 23 24 25
#define ARCH_PERFMON_EVENTSEL_EVENT			0x000000FFULL
#define ARCH_PERFMON_EVENTSEL_UMASK			0x0000FF00ULL
#define ARCH_PERFMON_EVENTSEL_USR			(1ULL << 16)
#define ARCH_PERFMON_EVENTSEL_OS			(1ULL << 17)
#define ARCH_PERFMON_EVENTSEL_EDGE			(1ULL << 18)
26
#define ARCH_PERFMON_EVENTSEL_PIN_CONTROL		(1ULL << 19)
27 28 29 30 31 32
#define ARCH_PERFMON_EVENTSEL_INT			(1ULL << 20)
#define ARCH_PERFMON_EVENTSEL_ANY			(1ULL << 21)
#define ARCH_PERFMON_EVENTSEL_ENABLE			(1ULL << 22)
#define ARCH_PERFMON_EVENTSEL_INV			(1ULL << 23)
#define ARCH_PERFMON_EVENTSEL_CMASK			0xFF000000ULL

33 34 35
#define HSW_IN_TX					(1ULL << 32)
#define HSW_IN_TX_CHECKPOINTED				(1ULL << 33)

36
#define AMD64_EVENTSEL_INT_CORE_ENABLE			(1ULL << 36)
37 38
#define AMD64_EVENTSEL_GUESTONLY			(1ULL << 40)
#define AMD64_EVENTSEL_HOSTONLY				(1ULL << 41)
39

40 41 42 43
#define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT		37
#define AMD64_EVENTSEL_INT_CORE_SEL_MASK		\
	(0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)

44 45 46 47 48
#define AMD64_EVENTSEL_EVENT	\
	(ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
#define INTEL_ARCH_EVENT_MASK	\
	(ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)

49 50 51 52 53 54 55 56
#define AMD64_L3_SLICE_SHIFT				48
#define AMD64_L3_SLICE_MASK				\
	((0xFULL) << AMD64_L3_SLICE_SHIFT)

#define AMD64_L3_THREAD_SHIFT				56
#define AMD64_L3_THREAD_MASK				\
	((0xFFULL) << AMD64_L3_THREAD_SHIFT)

57 58 59 60 61 62
#define X86_RAW_EVENT_MASK		\
	(ARCH_PERFMON_EVENTSEL_EVENT |	\
	 ARCH_PERFMON_EVENTSEL_UMASK |	\
	 ARCH_PERFMON_EVENTSEL_EDGE  |	\
	 ARCH_PERFMON_EVENTSEL_INV   |	\
	 ARCH_PERFMON_EVENTSEL_CMASK)
63 64 65 66 67 68 69 70
#define X86_ALL_EVENT_FLAGS  			\
	(ARCH_PERFMON_EVENTSEL_EDGE |  		\
	 ARCH_PERFMON_EVENTSEL_INV | 		\
	 ARCH_PERFMON_EVENTSEL_CMASK | 		\
	 ARCH_PERFMON_EVENTSEL_ANY | 		\
	 ARCH_PERFMON_EVENTSEL_PIN_CONTROL | 	\
	 HSW_IN_TX | 				\
	 HSW_IN_TX_CHECKPOINTED)
71 72 73
#define AMD64_RAW_EVENT_MASK		\
	(X86_RAW_EVENT_MASK          |  \
	 AMD64_EVENTSEL_EVENT)
74 75 76
#define AMD64_RAW_EVENT_MASK_NB		\
	(AMD64_EVENTSEL_EVENT        |  \
	 ARCH_PERFMON_EVENTSEL_UMASK)
77
#define AMD64_NUM_COUNTERS				4
78
#define AMD64_NUM_COUNTERS_CORE				6
79
#define AMD64_NUM_COUNTERS_NB				4
80

81
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL		0x3c
I
Ingo Molnar 已提交
82
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK		(0x00 << 8)
83
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX		0
84
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
I
Ingo Molnar 已提交
85 86
		(1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))

87
#define ARCH_PERFMON_BRANCH_MISSES_RETIRED		6
88
#define ARCH_PERFMON_EVENTS_COUNT			7
89

90 91 92 93
/*
 * Intel "Architectural Performance Monitoring" CPUID
 * detection/enumeration details:
 */
94 95 96
union cpuid10_eax {
	struct {
		unsigned int version_id:8;
97
		unsigned int num_counters:8;
98 99 100 101 102 103
		unsigned int bit_width:8;
		unsigned int mask_length:8;
	} split;
	unsigned int full;
};

104 105 106 107 108 109 110 111 112 113 114 115 116
union cpuid10_ebx {
	struct {
		unsigned int no_unhalted_core_cycles:1;
		unsigned int no_instructions_retired:1;
		unsigned int no_unhalted_reference_cycles:1;
		unsigned int no_llc_reference:1;
		unsigned int no_llc_misses:1;
		unsigned int no_branch_instruction_retired:1;
		unsigned int no_branch_misses_retired:1;
	} split;
	unsigned int full;
};

117 118
union cpuid10_edx {
	struct {
119 120 121
		unsigned int num_counters_fixed:5;
		unsigned int bit_width_fixed:8;
		unsigned int reserved:19;
122 123 124 125
	} split;
	unsigned int full;
};

126 127 128 129 130 131 132 133 134
struct x86_pmu_capability {
	int		version;
	int		num_counters_gp;
	int		num_counters_fixed;
	int		bit_width_gp;
	int		bit_width_fixed;
	unsigned int	events_mask;
	int		events_mask_len;
};
135 136

/*
137
 * Fixed-purpose performance events:
138 139
 */

140 141 142
/*
 * All 3 fixed-mode PMCs are configured via this single MSR:
 */
143
#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL	0x38d
144 145 146 147 148

/*
 * The counts are available in three separate MSRs:
 */

149
/* Instr_Retired.Any: */
150
#define MSR_ARCH_PERFMON_FIXED_CTR0	0x309
151
#define INTEL_PMC_IDX_FIXED_INSTRUCTIONS	(INTEL_PMC_IDX_FIXED + 0)
152 153

/* CPU_CLK_Unhalted.Core: */
154
#define MSR_ARCH_PERFMON_FIXED_CTR1	0x30a
155
#define INTEL_PMC_IDX_FIXED_CPU_CYCLES	(INTEL_PMC_IDX_FIXED + 1)
156 157

/* CPU_CLK_Unhalted.Ref: */
158
#define MSR_ARCH_PERFMON_FIXED_CTR2	0x30b
159 160
#define INTEL_PMC_IDX_FIXED_REF_CYCLES	(INTEL_PMC_IDX_FIXED + 2)
#define INTEL_PMC_MSK_FIXED_REF_CYCLES	(1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
161

162 163 164
/*
 * We model BTS tracing as another fixed-mode PMC.
 *
165 166
 * We choose a value in the middle of the fixed event range, since lower
 * values are used by actual fixed events and higher values are used
167 168
 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
 */
169
#define INTEL_PMC_IDX_FIXED_BTS				(INTEL_PMC_IDX_FIXED + 16)
170

171 172 173 174 175 176
#define GLOBAL_STATUS_COND_CHG				BIT_ULL(63)
#define GLOBAL_STATUS_BUFFER_OVF			BIT_ULL(62)
#define GLOBAL_STATUS_UNC_OVF				BIT_ULL(61)
#define GLOBAL_STATUS_ASIF				BIT_ULL(60)
#define GLOBAL_STATUS_COUNTERS_FROZEN			BIT_ULL(59)
#define GLOBAL_STATUS_LBRS_FROZEN			BIT_ULL(58)
177
#define GLOBAL_STATUS_TRACE_TOPAPMI			BIT_ULL(55)
178

179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
/*
 * IBS cpuid feature detection
 */

#define IBS_CPUID_FEATURES		0x8000001b

/*
 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
 * bit 0 is used to indicate the existence of IBS.
 */
#define IBS_CAPS_AVAIL			(1U<<0)
#define IBS_CAPS_FETCHSAM		(1U<<1)
#define IBS_CAPS_OPSAM			(1U<<2)
#define IBS_CAPS_RDWROPCNT		(1U<<3)
#define IBS_CAPS_OPCNT			(1U<<4)
#define IBS_CAPS_BRNTRGT		(1U<<5)
#define IBS_CAPS_OPCNTEXT		(1U<<6)
196
#define IBS_CAPS_RIPINVALIDCHK		(1U<<7)
197 198 199
#define IBS_CAPS_OPBRNFUSE		(1U<<8)
#define IBS_CAPS_FETCHCTLEXTD		(1U<<9)
#define IBS_CAPS_OPDATA4		(1U<<10)
200 201 202 203 204 205 206 207 208 209 210 211

#define IBS_CAPS_DEFAULT		(IBS_CAPS_AVAIL		\
					 | IBS_CAPS_FETCHSAM	\
					 | IBS_CAPS_OPSAM)

/*
 * IBS APIC setup
 */
#define IBSCTL				0x1cc
#define IBSCTL_LVT_OFFSET_VALID		(1ULL<<8)
#define IBSCTL_LVT_OFFSET_MASK		0x0F

212
/* IBS fetch bits/masks */
213 214 215 216 217
#define IBS_FETCH_RAND_EN	(1ULL<<57)
#define IBS_FETCH_VAL		(1ULL<<49)
#define IBS_FETCH_ENABLE	(1ULL<<48)
#define IBS_FETCH_CNT		0xFFFF0000ULL
#define IBS_FETCH_MAX_CNT	0x0000FFFFULL
218

219 220 221 222 223 224 225
/*
 * IBS op bits/masks
 * The lower 7 bits of the current count are random bits
 * preloaded by hardware and ignored in software
 */
#define IBS_OP_CUR_CNT		(0xFFF80ULL<<32)
#define IBS_OP_CUR_CNT_RAND	(0x0007FULL<<32)
226 227 228 229 230
#define IBS_OP_CNT_CTL		(1ULL<<19)
#define IBS_OP_VAL		(1ULL<<18)
#define IBS_OP_ENABLE		(1ULL<<17)
#define IBS_OP_MAX_CNT		0x0000FFFFULL
#define IBS_OP_MAX_CNT_EXT	0x007FFFFFULL	/* not a register bit mask */
231
#define IBS_RIP_INVALID		(1ULL<<38)
232

233
#ifdef CONFIG_X86_LOCAL_APIC
234
extern u32 get_ibs_caps(void);
235 236 237
#else
static inline u32 get_ibs_caps(void) { return 0; }
#endif
238

239 240
#ifdef CONFIG_PERF_EVENTS
extern void perf_events_lapic_init(void);
241

242
/*
243 244 245 246 247 248 249
 * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
 * unused and ABI specified to be 0, so nobody should care what we do with
 * them.
 *
 * EXACT - the IP points to the exact instruction that triggered the
 *         event (HW bugs exempt).
 * VM    - original X86_VM_MASK; see set_linear_ip().
250 251
 */
#define PERF_EFLAGS_EXACT	(1UL << 3)
252
#define PERF_EFLAGS_VM		(1UL << 5)
253

254 255 256 257
struct pt_regs;
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
extern unsigned long perf_misc_flags(struct pt_regs *regs);
#define perf_misc_flags(regs)	perf_misc_flags(regs)
258

259 260 261 262 263 264 265 266 267 268 269
#include <asm/stacktrace.h>

/*
 * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
 * and the comment with PERF_EFLAGS_EXACT.
 */
#define perf_arch_fetch_caller_regs(regs, __ip)		{	\
	(regs)->ip = (__ip);					\
	(regs)->bp = caller_frame_pointer();			\
	(regs)->cs = __KERNEL_CS;				\
	regs->flags = 0;					\
270 271 272 273 274
	asm volatile(						\
		_ASM_MOV "%%"_ASM_SP ", %0\n"			\
		: "=m" ((regs)->sp)				\
		:: "memory"					\
	);							\
275 276
}

277 278 279 280 281 282
struct perf_guest_switch_msr {
	unsigned msr;
	u64 host, guest;
};

extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
283
extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
284
extern void perf_check_microcode(void);
I
Ingo Molnar 已提交
285
#else
286
static inline struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
287 288 289 290 291
{
	*nr = 0;
	return NULL;
}

292 293 294 295 296
static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
{
	memset(cap, 0, sizeof(*cap));
}

297
static inline void perf_events_lapic_init(void)	{ }
298
static inline void perf_check_microcode(void) { }
I
Ingo Molnar 已提交
299 300
#endif

301 302 303 304
#ifdef CONFIG_CPU_SUP_INTEL
 extern void intel_pt_handle_vmx(int on);
#endif

305 306 307 308 309 310 311 312
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
 extern void amd_pmu_enable_virt(void);
 extern void amd_pmu_disable_virt(void);
#else
 static inline void amd_pmu_enable_virt(void) { }
 static inline void amd_pmu_disable_virt(void) { }
#endif

313 314
#define arch_perf_out_copy_user copy_from_user_nmi

315
#endif /* _ASM_X86_PERF_EVENT_H */