diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 25a1b9953c1d9e4c11b1704ef0337e1007a17294..b4b3ecc9bbbaadcb5f7e36c370d004f7b7a23591 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -1063,6 +1063,7 @@ static inline void x86_assign_hw_event(struct perf_event *event, switch (hwc->idx) { case INTEL_PMC_IDX_FIXED_BTS: + case INTEL_PMC_IDX_FIXED_VLBR: hwc->config_base = 0; hwc->event_base = 0; break; diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index e5ef223a60931688da90425a928f88e2032a9623..ec4373143d1644e2962d9b6c31bf2d108aebdd37 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2494,6 +2494,20 @@ intel_bts_constraints(struct perf_event *event) return NULL; } +/* + * Note: matches a fake event, like Fixed2. + */ +static struct event_constraint * +intel_vlbr_constraints(struct perf_event *event) +{ + struct event_constraint *c = &vlbr_constraint; + + if (unlikely(constraint_match(c, event->hw.config))) + return c; + + return NULL; +} + static int intel_alt_er(int idx, u64 config) { int alt_idx = idx; @@ -2684,6 +2698,10 @@ __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, { struct event_constraint *c; + c = intel_vlbr_constraints(event); + if (c) + return c; + c = intel_bts_constraints(event); if (c) return c; diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index 15aa2fbdd60ec98da542e54f3cd50fb04e2be98c..eea75f1fc245daf52a3e3e6f205555addcb264ac 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -1330,3 +1330,7 @@ int x86_perf_get_lbr(struct x86_pmu_lbr *lbr) return 0; } EXPORT_SYMBOL_GPL(x86_perf_get_lbr); + +struct event_constraint vlbr_constraint = + FIXED_EVENT_CONSTRAINT(INTEL_FIXED_VLBR_EVENT, + (INTEL_PMC_IDX_FIXED_VLBR - INTEL_PMC_IDX_FIXED)); diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 21745fd662d68420c5f9775898d8948a2e5edbd5..639a8a6ba05c74a59bf10001978faced7e18bb60 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -966,6 +966,7 @@ void release_ds_buffers(void); void reserve_ds_buffers(void); extern struct event_constraint bts_constraint; +extern struct event_constraint vlbr_constraint; void intel_pmu_enable_bts(u64 config); diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index b0174e83ddda74370e688171585baf1ee078374c..c4729277d31d79715bcb607f28e9146e10ebd6d5 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -181,9 +181,29 @@ struct x86_pmu_capability { #define GLOBAL_STATUS_UNC_OVF BIT_ULL(61) #define GLOBAL_STATUS_ASIF BIT_ULL(60) #define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59) -#define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(58) +#define GLOBAL_STATUS_LBRS_FROZEN_BIT 58 +#define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(GLOBAL_STATUS_LBRS_FROZEN_BIT) #define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(55) +/* + * We model guest LBR event tracing as another fixed-mode PMC like BTS. + * + * We choose bit 58 because it's used to indicate LBR stack frozen state + * for architectural perfmon v4, also we unconditionally mask that bit in + * the handle_pmi_common(), so it'll never be set in the overflow handling. + * + * With this fake counter assigned, the guest LBR event user (such as KVM), + * can program the LBR registers on its own, and we don't actually do anything + * with then in the host context. + */ +#define INTEL_PMC_IDX_FIXED_VLBR (GLOBAL_STATUS_LBRS_FROZEN_BIT) + +/* + * Pseudo-encoding the guest LBR event as event=0x00,umask=0x1b, + * since it would claim bit 58 which is effectively Fixed26. + */ +#define INTEL_FIXED_VLBR_EVENT 0x1b00 + /* * Adaptive PEBS v4 */