perf_event.h 8.6 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3
#ifndef _ASM_X86_PERF_EVENT_H
#define _ASM_X86_PERF_EVENT_H
4

5
/*
6
 * Performance event hw details:
7 8
 */

9 10 11
#define INTEL_PMC_MAX_GENERIC				       32
#define INTEL_PMC_MAX_FIXED					3
#define INTEL_PMC_IDX_FIXED				       32
12

13 14
#define X86_PMC_IDX_MAX					       64

I
Ingo Molnar 已提交
15 16
#define MSR_ARCH_PERFMON_PERFCTR0			      0xc1
#define MSR_ARCH_PERFMON_PERFCTR1			      0xc2
17

I
Ingo Molnar 已提交
18 19
#define MSR_ARCH_PERFMON_EVENTSEL0			     0x186
#define MSR_ARCH_PERFMON_EVENTSEL1			     0x187
20

21 22 23 24 25
#define ARCH_PERFMON_EVENTSEL_EVENT			0x000000FFULL
#define ARCH_PERFMON_EVENTSEL_UMASK			0x0000FF00ULL
#define ARCH_PERFMON_EVENTSEL_USR			(1ULL << 16)
#define ARCH_PERFMON_EVENTSEL_OS			(1ULL << 17)
#define ARCH_PERFMON_EVENTSEL_EDGE			(1ULL << 18)
26
#define ARCH_PERFMON_EVENTSEL_PIN_CONTROL		(1ULL << 19)
27 28 29 30 31 32
#define ARCH_PERFMON_EVENTSEL_INT			(1ULL << 20)
#define ARCH_PERFMON_EVENTSEL_ANY			(1ULL << 21)
#define ARCH_PERFMON_EVENTSEL_ENABLE			(1ULL << 22)
#define ARCH_PERFMON_EVENTSEL_INV			(1ULL << 23)
#define ARCH_PERFMON_EVENTSEL_CMASK			0xFF000000ULL

33 34 35
#define HSW_IN_TX					(1ULL << 32)
#define HSW_IN_TX_CHECKPOINTED				(1ULL << 33)

36
#define AMD64_EVENTSEL_INT_CORE_ENABLE			(1ULL << 36)
37 38
#define AMD64_EVENTSEL_GUESTONLY			(1ULL << 40)
#define AMD64_EVENTSEL_HOSTONLY				(1ULL << 41)
39

40 41 42 43
#define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT		37
#define AMD64_EVENTSEL_INT_CORE_SEL_MASK		\
	(0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)

44 45 46 47 48 49 50 51 52 53 54
#define AMD64_EVENTSEL_EVENT	\
	(ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
#define INTEL_ARCH_EVENT_MASK	\
	(ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)

#define X86_RAW_EVENT_MASK		\
	(ARCH_PERFMON_EVENTSEL_EVENT |	\
	 ARCH_PERFMON_EVENTSEL_UMASK |	\
	 ARCH_PERFMON_EVENTSEL_EDGE  |	\
	 ARCH_PERFMON_EVENTSEL_INV   |	\
	 ARCH_PERFMON_EVENTSEL_CMASK)
55 56 57 58 59 60 61 62
#define X86_ALL_EVENT_FLAGS  			\
	(ARCH_PERFMON_EVENTSEL_EDGE |  		\
	 ARCH_PERFMON_EVENTSEL_INV | 		\
	 ARCH_PERFMON_EVENTSEL_CMASK | 		\
	 ARCH_PERFMON_EVENTSEL_ANY | 		\
	 ARCH_PERFMON_EVENTSEL_PIN_CONTROL | 	\
	 HSW_IN_TX | 				\
	 HSW_IN_TX_CHECKPOINTED)
63 64 65
#define AMD64_RAW_EVENT_MASK		\
	(X86_RAW_EVENT_MASK          |  \
	 AMD64_EVENTSEL_EVENT)
66 67 68
#define AMD64_RAW_EVENT_MASK_NB		\
	(AMD64_EVENTSEL_EVENT        |  \
	 ARCH_PERFMON_EVENTSEL_UMASK)
69
#define AMD64_NUM_COUNTERS				4
70
#define AMD64_NUM_COUNTERS_CORE				6
71
#define AMD64_NUM_COUNTERS_NB				4
72

73
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL		0x3c
I
Ingo Molnar 已提交
74
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK		(0x00 << 8)
75
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX		0
76
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
I
Ingo Molnar 已提交
77 78
		(1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))

79
#define ARCH_PERFMON_BRANCH_MISSES_RETIRED		6
80
#define ARCH_PERFMON_EVENTS_COUNT			7
81

82 83 84 85
/*
 * Intel "Architectural Performance Monitoring" CPUID
 * detection/enumeration details:
 */
86 87 88
union cpuid10_eax {
	struct {
		unsigned int version_id:8;
89
		unsigned int num_counters:8;
90 91 92 93 94 95
		unsigned int bit_width:8;
		unsigned int mask_length:8;
	} split;
	unsigned int full;
};

96 97 98 99 100 101 102 103 104 105 106 107 108
union cpuid10_ebx {
	struct {
		unsigned int no_unhalted_core_cycles:1;
		unsigned int no_instructions_retired:1;
		unsigned int no_unhalted_reference_cycles:1;
		unsigned int no_llc_reference:1;
		unsigned int no_llc_misses:1;
		unsigned int no_branch_instruction_retired:1;
		unsigned int no_branch_misses_retired:1;
	} split;
	unsigned int full;
};

109 110
union cpuid10_edx {
	struct {
111 112 113
		unsigned int num_counters_fixed:5;
		unsigned int bit_width_fixed:8;
		unsigned int reserved:19;
114 115 116 117
	} split;
	unsigned int full;
};

118 119 120 121 122 123 124 125 126
struct x86_pmu_capability {
	int		version;
	int		num_counters_gp;
	int		num_counters_fixed;
	int		bit_width_gp;
	int		bit_width_fixed;
	unsigned int	events_mask;
	int		events_mask_len;
};
127 128

/*
129
 * Fixed-purpose performance events:
130 131
 */

132 133 134
/*
 * All 3 fixed-mode PMCs are configured via this single MSR:
 */
135
#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL	0x38d
136 137 138 139 140

/*
 * The counts are available in three separate MSRs:
 */

141
/* Instr_Retired.Any: */
142
#define MSR_ARCH_PERFMON_FIXED_CTR0	0x309
143
#define INTEL_PMC_IDX_FIXED_INSTRUCTIONS	(INTEL_PMC_IDX_FIXED + 0)
144 145

/* CPU_CLK_Unhalted.Core: */
146
#define MSR_ARCH_PERFMON_FIXED_CTR1	0x30a
147
#define INTEL_PMC_IDX_FIXED_CPU_CYCLES	(INTEL_PMC_IDX_FIXED + 1)
148 149

/* CPU_CLK_Unhalted.Ref: */
150
#define MSR_ARCH_PERFMON_FIXED_CTR2	0x30b
151 152
#define INTEL_PMC_IDX_FIXED_REF_CYCLES	(INTEL_PMC_IDX_FIXED + 2)
#define INTEL_PMC_MSK_FIXED_REF_CYCLES	(1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
153

154 155 156
/*
 * We model BTS tracing as another fixed-mode PMC.
 *
157 158
 * We choose a value in the middle of the fixed event range, since lower
 * values are used by actual fixed events and higher values are used
159 160
 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
 */
161
#define INTEL_PMC_IDX_FIXED_BTS				(INTEL_PMC_IDX_FIXED + 16)
162

163 164 165 166 167 168
#define GLOBAL_STATUS_COND_CHG				BIT_ULL(63)
#define GLOBAL_STATUS_BUFFER_OVF			BIT_ULL(62)
#define GLOBAL_STATUS_UNC_OVF				BIT_ULL(61)
#define GLOBAL_STATUS_ASIF				BIT_ULL(60)
#define GLOBAL_STATUS_COUNTERS_FROZEN			BIT_ULL(59)
#define GLOBAL_STATUS_LBRS_FROZEN			BIT_ULL(58)
169
#define GLOBAL_STATUS_TRACE_TOPAPMI			BIT_ULL(55)
170

171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
/*
 * IBS cpuid feature detection
 */

#define IBS_CPUID_FEATURES		0x8000001b

/*
 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
 * bit 0 is used to indicate the existence of IBS.
 */
#define IBS_CAPS_AVAIL			(1U<<0)
#define IBS_CAPS_FETCHSAM		(1U<<1)
#define IBS_CAPS_OPSAM			(1U<<2)
#define IBS_CAPS_RDWROPCNT		(1U<<3)
#define IBS_CAPS_OPCNT			(1U<<4)
#define IBS_CAPS_BRNTRGT		(1U<<5)
#define IBS_CAPS_OPCNTEXT		(1U<<6)
188
#define IBS_CAPS_RIPINVALIDCHK		(1U<<7)
189 190 191
#define IBS_CAPS_OPBRNFUSE		(1U<<8)
#define IBS_CAPS_FETCHCTLEXTD		(1U<<9)
#define IBS_CAPS_OPDATA4		(1U<<10)
192 193 194 195 196 197 198 199 200 201 202 203

#define IBS_CAPS_DEFAULT		(IBS_CAPS_AVAIL		\
					 | IBS_CAPS_FETCHSAM	\
					 | IBS_CAPS_OPSAM)

/*
 * IBS APIC setup
 */
#define IBSCTL				0x1cc
#define IBSCTL_LVT_OFFSET_VALID		(1ULL<<8)
#define IBSCTL_LVT_OFFSET_MASK		0x0F

204
/* ibs fetch bits/masks */
205 206 207 208 209
#define IBS_FETCH_RAND_EN	(1ULL<<57)
#define IBS_FETCH_VAL		(1ULL<<49)
#define IBS_FETCH_ENABLE	(1ULL<<48)
#define IBS_FETCH_CNT		0xFFFF0000ULL
#define IBS_FETCH_MAX_CNT	0x0000FFFFULL
210

211
/* ibs op bits/masks */
212 213
/* lower 4 bits of the current count are ignored: */
#define IBS_OP_CUR_CNT		(0xFFFF0ULL<<32)
214 215 216 217 218
#define IBS_OP_CNT_CTL		(1ULL<<19)
#define IBS_OP_VAL		(1ULL<<18)
#define IBS_OP_ENABLE		(1ULL<<17)
#define IBS_OP_MAX_CNT		0x0000FFFFULL
#define IBS_OP_MAX_CNT_EXT	0x007FFFFFULL	/* not a register bit mask */
219
#define IBS_RIP_INVALID		(1ULL<<38)
220

221
#ifdef CONFIG_X86_LOCAL_APIC
222
extern u32 get_ibs_caps(void);
223 224 225
#else
static inline u32 get_ibs_caps(void) { return 0; }
#endif
226

227 228
#ifdef CONFIG_PERF_EVENTS
extern void perf_events_lapic_init(void);
229

230
/*
231 232 233 234 235 236 237
 * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
 * unused and ABI specified to be 0, so nobody should care what we do with
 * them.
 *
 * EXACT - the IP points to the exact instruction that triggered the
 *         event (HW bugs exempt).
 * VM    - original X86_VM_MASK; see set_linear_ip().
238 239
 */
#define PERF_EFLAGS_EXACT	(1UL << 3)
240
#define PERF_EFLAGS_VM		(1UL << 5)
241

242 243 244 245
struct pt_regs;
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
extern unsigned long perf_misc_flags(struct pt_regs *regs);
#define perf_misc_flags(regs)	perf_misc_flags(regs)
246

247 248 249 250 251 252 253 254 255 256 257
#include <asm/stacktrace.h>

/*
 * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
 * and the comment with PERF_EFLAGS_EXACT.
 */
#define perf_arch_fetch_caller_regs(regs, __ip)		{	\
	(regs)->ip = (__ip);					\
	(regs)->bp = caller_frame_pointer();			\
	(regs)->cs = __KERNEL_CS;				\
	regs->flags = 0;					\
258 259 260 261 262
	asm volatile(						\
		_ASM_MOV "%%"_ASM_SP ", %0\n"			\
		: "=m" ((regs)->sp)				\
		:: "memory"					\
	);							\
263 264
}

265 266 267 268 269 270
struct perf_guest_switch_msr {
	unsigned msr;
	u64 host, guest;
};

extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
271
extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
272
extern void perf_check_microcode(void);
273
extern int x86_perf_rdpmc_index(struct perf_event *event);
I
Ingo Molnar 已提交
274
#else
275
static inline struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
276 277 278 279 280
{
	*nr = 0;
	return NULL;
}

281 282 283 284 285
static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
{
	memset(cap, 0, sizeof(*cap));
}

286
static inline void perf_events_lapic_init(void)	{ }
287
static inline void perf_check_microcode(void) { }
I
Ingo Molnar 已提交
288 289
#endif

290 291 292 293
#ifdef CONFIG_CPU_SUP_INTEL
 extern void intel_pt_handle_vmx(int on);
#endif

294 295 296 297 298 299 300 301
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
 extern void amd_pmu_enable_virt(void);
 extern void amd_pmu_disable_virt(void);
#else
 static inline void amd_pmu_enable_virt(void) { }
 static inline void amd_pmu_disable_virt(void) { }
#endif

302 303
#define arch_perf_out_copy_user copy_from_user_nmi

304
#endif /* _ASM_X86_PERF_EVENT_H */