pmu_intel.c 20.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * KVM PMU support for Intel CPUs
 *
 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
 *
 * Authors:
 *   Avi Kivity   <avi@redhat.com>
 *   Gleb Natapov <gleb@redhat.com>
 */
#include <linux/types.h>
#include <linux/kvm_host.h>
#include <linux/perf_event.h>
#include <asm/perf_event.h>
#include "x86.h"
#include "cpuid.h"
#include "lapic.h"
18
#include "nested.h"
19 20
#include "pmu.h"

21 22
#define MSR_PMC_FULL_WIDTH_BIT      (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)

23 24 25 26 27 28 29 30
static struct kvm_event_hw_type_mapping intel_arch_events[] = {
	[0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
	[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
	[2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES  },
	[3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
	[4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
	[5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
	[6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
31
	/* The above index must match CPUID 0x0A.EBX bit vector */
32
	[7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
};

/* mapping between fixed pmc index and intel_arch_events array */
static int fixed_pmc_events[] = {1, 0, 7};

static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
{
	int i;

	for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
		u8 new_ctrl = fixed_ctrl_field(data, i);
		u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
		struct kvm_pmc *pmc;

		pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);

		if (old_ctrl == new_ctrl)
			continue;

52
		__set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use);
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
		reprogram_fixed_counter(pmc, new_ctrl, i);
	}

	pmu->fixed_ctr_ctrl = data;
}

/* function is called when global control register has been updated. */
static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
{
	int bit;
	u64 diff = pmu->global_ctrl ^ data;

	pmu->global_ctrl = data;

	for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
		reprogram_counter(pmu, bit);
}

71
static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc)
72
{
73 74 75
	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
	u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
	u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
76 77
	int i;

78 79 80 81 82 83 84 85 86 87 88
	for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++) {
		if (intel_arch_events[i].eventsel != event_select ||
		    intel_arch_events[i].unit_mask != unit_mask)
			continue;

		/* disable event that reported as not present by cpuid */
		if ((i < 7) && !(pmu->available_event_types & (1 << i)))
			return PERF_COUNT_HW_MAX + 1;

		break;
	}
89 90 91 92 93 94 95

	if (i == ARRAY_SIZE(intel_arch_events))
		return PERF_COUNT_HW_MAX;

	return intel_arch_events[i].event_type;
}

A
Andrea Gelmini 已提交
96
/* check if a PMC is enabled by comparing it with globl_ctrl bits. */
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
{
	struct kvm_pmu *pmu = pmc_to_pmu(pmc);

	return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
}

static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
{
	if (pmc_idx < INTEL_PMC_IDX_FIXED)
		return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
				  MSR_P6_EVNTSEL0);
	else {
		u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;

		return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
	}
}

116
static bool intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
117 118 119 120 121 122
{
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
	bool fixed = idx & (1u << 30);

	idx &= ~(3u << 30);

123 124
	return fixed ? idx < pmu->nr_arch_fixed_counters
		     : idx < pmu->nr_arch_gp_counters;
125 126
}

127 128
static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
					    unsigned int idx, u64 *mask)
129 130 131 132
{
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
	bool fixed = idx & (1u << 30);
	struct kvm_pmc *counters;
133
	unsigned int num_counters;
134 135

	idx &= ~(3u << 30);
136 137 138 139 140 141 142 143
	if (fixed) {
		counters = pmu->fixed_counters;
		num_counters = pmu->nr_arch_fixed_counters;
	} else {
		counters = pmu->gp_counters;
		num_counters = pmu->nr_arch_gp_counters;
	}
	if (idx >= num_counters)
144
		return NULL;
145
	*mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
146
	return &counters[array_index_nospec(idx, num_counters)];
147 148
}

149
static inline u64 vcpu_get_perf_capabilities(struct kvm_vcpu *vcpu)
150 151
{
	if (!guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
152
		return 0;
153

154 155 156 157 158 159
	return vcpu->arch.perf_capabilities;
}

static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu)
{
	return (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_FW_WRITES) != 0;
160 161 162 163 164 165 166 167 168 169
}

static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
{
	if (!fw_writes_is_enabled(pmu_to_vcpu(pmu)))
		return NULL;

	return get_gp_pmc(pmu, msr, MSR_IA32_PMC0);
}

170 171 172 173 174 175 176 177 178 179
bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu)
{
	/*
	 * As a first step, a guest could only enable LBR feature if its
	 * cpu model is the same as the host because the LBR registers
	 * would be pass-through to the guest and they're model specific.
	 */
	return boot_cpu_data.x86_model == guest_cpuid_model(vcpu);
}

180 181 182 183 184 185 186
bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
{
	struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu);

	return lbr->nr && (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_LBR_FMT);
}

187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
{
	struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu);
	bool ret = false;

	if (!intel_pmu_lbr_is_enabled(vcpu))
		return ret;

	ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) ||
		(index >= records->from && index < records->from + records->nr) ||
		(index >= records->to && index < records->to + records->nr);

	if (!ret && records->info)
		ret = (index >= records->info && index < records->info + records->nr);

	return ret;
}

205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
{
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
	int ret;

	switch (msr) {
	case MSR_CORE_PERF_FIXED_CTR_CTRL:
	case MSR_CORE_PERF_GLOBAL_STATUS:
	case MSR_CORE_PERF_GLOBAL_CTRL:
	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
		ret = pmu->version > 1;
		break;
	default:
		ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
			get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
220 221
			get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr) ||
			intel_pmu_is_valid_lbr_msr(vcpu, msr);
222 223 224 225 226 227
		break;
	}

	return ret;
}

228 229 230 231 232 233 234 235 236 237 238 239
static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
{
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
	struct kvm_pmc *pmc;

	pmc = get_fixed_pmc(pmu, msr);
	pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0);
	pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0);

	return pmc;
}

240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
static inline void intel_pmu_release_guest_lbr_event(struct kvm_vcpu *vcpu)
{
	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);

	if (lbr_desc->event) {
		perf_event_release_kernel(lbr_desc->event);
		lbr_desc->event = NULL;
		vcpu_to_pmu(vcpu)->event_count--;
	}
}

int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu)
{
	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
	struct perf_event *event;

	/*
	 * The perf_event_attr is constructed in the minimum efficient way:
	 * - set 'pinned = true' to make it task pinned so that if another
	 *   cpu pinned event reclaims LBR, the event->oncpu will be set to -1;
	 * - set '.exclude_host = true' to record guest branches behavior;
	 *
	 * - set '.config = INTEL_FIXED_VLBR_EVENT' to indicates host perf
	 *   schedule the event without a real HW counter but a fake one;
	 *   check is_guest_lbr_event() and __intel_get_event_constraints();
	 *
	 * - set 'sample_type = PERF_SAMPLE_BRANCH_STACK' and
	 *   'branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK |
	 *   PERF_SAMPLE_BRANCH_USER' to configure it as a LBR callstack
	 *   event, which helps KVM to save/restore guest LBR records
	 *   during host context switches and reduces quite a lot overhead,
	 *   check branch_user_callstack() and intel_pmu_lbr_sched_task();
	 */
	struct perf_event_attr attr = {
		.type = PERF_TYPE_RAW,
		.size = sizeof(attr),
		.config = INTEL_FIXED_VLBR_EVENT,
		.sample_type = PERF_SAMPLE_BRANCH_STACK,
		.pinned = true,
		.exclude_host = true,
		.branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK |
					PERF_SAMPLE_BRANCH_USER,
	};

285 286
	if (unlikely(lbr_desc->event)) {
		__set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
287
		return 0;
288
	}
289 290 291 292 293 294

	event = perf_event_create_kernel_counter(&attr, -1,
						current, NULL, NULL);
	if (IS_ERR(event)) {
		pr_debug_ratelimited("%s: failed %ld\n",
					__func__, PTR_ERR(event));
295
		return PTR_ERR(event);
296 297 298
	}
	lbr_desc->event = event;
	pmu->event_count++;
299
	__set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
300 301 302
	return 0;
}

303 304 305 306 307 308 309 310 311 312 313 314 315 316
/*
 * It's safe to access LBR msrs from guest when they have not
 * been passthrough since the host would help restore or reset
 * the LBR msrs records when the guest LBR event is scheduled in.
 */
static bool intel_pmu_handle_lbr_msrs_access(struct kvm_vcpu *vcpu,
				     struct msr_data *msr_info, bool read)
{
	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
	u32 index = msr_info->index;

	if (!intel_pmu_is_valid_lbr_msr(vcpu, index))
		return false;

317
	if (!lbr_desc->event && intel_pmu_create_guest_lbr_event(vcpu) < 0)
318 319 320 321 322 323 324 325 326 327 328 329 330 331
		goto dummy;

	/*
	 * Disable irq to ensure the LBR feature doesn't get reclaimed by the
	 * host at the time the value is read from the msr, and this avoids the
	 * host LBR value to be leaked to the guest. If LBR has been reclaimed,
	 * return 0 on guest reads.
	 */
	local_irq_disable();
	if (lbr_desc->event->state == PERF_EVENT_STATE_ACTIVE) {
		if (read)
			rdmsrl(index, msr_info->data);
		else
			wrmsrl(index, msr_info->data);
332
		__set_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use);
333 334 335
		local_irq_enable();
		return true;
	}
336
	clear_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use);
337 338 339 340 341 342 343 344
	local_irq_enable();

dummy:
	if (read)
		msr_info->data = 0;
	return true;
}

345
static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
346 347 348
{
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
	struct kvm_pmc *pmc;
349
	u32 msr = msr_info->index;
350 351 352

	switch (msr) {
	case MSR_CORE_PERF_FIXED_CTR_CTRL:
353
		msr_info->data = pmu->fixed_ctr_ctrl;
354 355
		return 0;
	case MSR_CORE_PERF_GLOBAL_STATUS:
356
		msr_info->data = pmu->global_status;
357 358
		return 0;
	case MSR_CORE_PERF_GLOBAL_CTRL:
359
		msr_info->data = pmu->global_ctrl;
360 361
		return 0;
	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
362
		msr_info->data = 0;
363 364
		return 0;
	default:
365 366
		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
		    (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
367
			u64 val = pmc_read_counter(pmc);
368 369
			msr_info->data =
				val & pmu->counter_bitmask[KVM_PMC_GP];
370 371 372
			return 0;
		} else if ((pmc = get_fixed_pmc(pmu, msr))) {
			u64 val = pmc_read_counter(pmc);
373 374
			msr_info->data =
				val & pmu->counter_bitmask[KVM_PMC_FIXED];
375 376
			return 0;
		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
377
			msr_info->data = pmc->eventsel;
378
			return 0;
379 380
		} else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, true))
			return 0;
381 382 383 384 385 386 387 388 389 390 391
	}

	return 1;
}

static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
	struct kvm_pmc *pmc;
	u32 msr = msr_info->index;
	u64 data = msr_info->data;
392
	u64 reserved_bits;
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411

	switch (msr) {
	case MSR_CORE_PERF_FIXED_CTR_CTRL:
		if (pmu->fixed_ctr_ctrl == data)
			return 0;
		if (!(data & 0xfffffffffffff444ull)) {
			reprogram_fixed_counters(pmu, data);
			return 0;
		}
		break;
	case MSR_CORE_PERF_GLOBAL_STATUS:
		if (msr_info->host_initiated) {
			pmu->global_status = data;
			return 0;
		}
		break; /* RO MSR */
	case MSR_CORE_PERF_GLOBAL_CTRL:
		if (pmu->global_ctrl == data)
			return 0;
412
		if (kvm_valid_perf_global_ctrl(pmu, data)) {
413 414 415 416 417
			global_ctrl_changed(pmu, data);
			return 0;
		}
		break;
	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
418
		if (!(data & pmu->global_ovf_ctrl_mask)) {
419 420 421 422 423 424
			if (!msr_info->host_initiated)
				pmu->global_status &= ~data;
			return 0;
		}
		break;
	default:
425 426 427 428 429 430 431
		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
		    (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
			if ((msr & MSR_PMC_FULL_WIDTH_BIT) &&
			    (data & ~pmu->counter_bitmask[KVM_PMC_GP]))
				return 1;
			if (!msr_info->host_initiated &&
			    !(msr & MSR_PMC_FULL_WIDTH_BIT))
432 433
				data = (s64)(s32)data;
			pmc->counter += data - pmc_read_counter(pmc);
434
			pmc_update_sample_period(pmc);
435 436
			return 0;
		} else if ((pmc = get_fixed_pmc(pmu, msr))) {
437
			pmc->counter += data - pmc_read_counter(pmc);
438
			pmc_update_sample_period(pmc);
439 440 441 442
			return 0;
		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
			if (data == pmc->eventsel)
				return 0;
443 444 445 446 447
			reserved_bits = pmu->reserved_bits;
			if ((pmc->idx == 2) &&
			    (pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED))
				reserved_bits ^= HSW_IN_TX_CHECKPOINTED;
			if (!(data & reserved_bits)) {
448 449 450
				reprogram_gp_counter(pmc, data);
				return 0;
			}
451 452
		} else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false))
			return 0;
453 454 455 456 457
	}

	return 1;
}

458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
static void setup_fixed_pmc_eventsel(struct kvm_pmu *pmu)
{
	size_t size = ARRAY_SIZE(fixed_pmc_events);
	struct kvm_pmc *pmc;
	u32 event;
	int i;

	for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
		pmc = &pmu->fixed_counters[i];
		event = fixed_pmc_events[array_index_nospec(i, size)];
		pmc->eventsel = (intel_arch_events[event].unit_mask << 8) |
			intel_arch_events[event].eventsel;
	}
}

473 474 475
static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
{
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
476 477
	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);

478
	struct x86_pmu_capability x86_pmu;
479 480 481 482 483 484 485 486 487 488
	struct kvm_cpuid_entry2 *entry;
	union cpuid10_eax eax;
	union cpuid10_edx edx;

	pmu->nr_arch_gp_counters = 0;
	pmu->nr_arch_fixed_counters = 0;
	pmu->counter_bitmask[KVM_PMC_GP] = 0;
	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
	pmu->version = 0;
	pmu->reserved_bits = 0xffffffff00200000ull;
489
	pmu->raw_event_mask = X86_RAW_EVENT_MASK;
490 491

	entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
492
	if (!entry || !vcpu->kvm->arch.enable_pmu)
493 494 495 496 497 498 499 500
		return;
	eax.full = entry->eax;
	edx.full = entry->edx;

	pmu->version = eax.split.version_id;
	if (!pmu->version)
		return;

501
	vcpu->arch.ia32_misc_enable_msr |= MSR_IA32_MISC_ENABLE_EMON;
502 503
	perf_get_x86_pmu_capability(&x86_pmu);

504
	pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
505
					 x86_pmu.num_counters_gp);
506
	eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp);
507
	pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
508
	eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len);
509 510 511 512 513 514 515
	pmu->available_event_types = ~entry->ebx &
					((1ull << eax.split.mask_length) - 1);

	if (pmu->version == 1) {
		pmu->nr_arch_fixed_counters = 0;
	} else {
		pmu->nr_arch_fixed_counters =
516 517 518
			min3(ARRAY_SIZE(fixed_pmc_events),
			     (size_t) edx.split.num_counters_fixed,
			     (size_t) x86_pmu.num_counters_fixed);
519 520
		edx.split.bit_width_fixed = min_t(int,
			edx.split.bit_width_fixed, x86_pmu.bit_width_fixed);
521 522
		pmu->counter_bitmask[KVM_PMC_FIXED] =
			((u64)1 << edx.split.bit_width_fixed) - 1;
523
		setup_fixed_pmc_eventsel(pmu);
524 525
	}

526
	pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
527 528
		(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
	pmu->global_ctrl_mask = ~pmu->global_ctrl;
529 530 531
	pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
			& ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
			    MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
532
	if (vmx_pt_mode_is_host_guest())
533 534
		pmu->global_ovf_ctrl_mask &=
				~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
535 536 537 538

	entry = kvm_find_cpuid_entry(vcpu, 7, 0);
	if (entry &&
	    (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
539 540 541 542
	    (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) {
		pmu->reserved_bits ^= HSW_IN_TX;
		pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
	}
543 544 545 546 547

	bitmap_set(pmu->all_valid_pmc_idx,
		0, pmu->nr_arch_gp_counters);
	bitmap_set(pmu->all_valid_pmc_idx,
		INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
548

549 550
	nested_vmx_pmu_refresh(vcpu,
			       intel_is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL));
551 552 553 554 555

	if (intel_pmu_lbr_is_compatible(vcpu))
		x86_perf_get_lbr(&lbr_desc->records);
	else
		lbr_desc->records.nr = 0;
556 557 558

	if (lbr_desc->records.nr)
		bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1);
559 560 561 562 563 564
}

static void intel_pmu_init(struct kvm_vcpu *vcpu)
{
	int i;
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
565
	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
566 567 568 569 570

	for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
		pmu->gp_counters[i].type = KVM_PMC_GP;
		pmu->gp_counters[i].vcpu = vcpu;
		pmu->gp_counters[i].idx = i;
571
		pmu->gp_counters[i].current_config = 0;
572 573
	}

574
	for (i = 0; i < KVM_PMC_MAX_FIXED; i++) {
575 576 577
		pmu->fixed_counters[i].type = KVM_PMC_FIXED;
		pmu->fixed_counters[i].vcpu = vcpu;
		pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
578
		pmu->fixed_counters[i].current_config = 0;
579
	}
580 581

	vcpu->arch.perf_capabilities = vmx_get_perf_capabilities();
582
	lbr_desc->records.nr = 0;
583
	lbr_desc->event = NULL;
584
	lbr_desc->msr_passthrough = false;
585 586 587 588 589
}

static void intel_pmu_reset(struct kvm_vcpu *vcpu)
{
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
590
	struct kvm_pmc *pmc = NULL;
591 592 593
	int i;

	for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
594
		pmc = &pmu->gp_counters[i];
595 596 597 598 599

		pmc_stop_counter(pmc);
		pmc->counter = pmc->eventsel = 0;
	}

600
	for (i = 0; i < KVM_PMC_MAX_FIXED; i++) {
601 602 603 604 605
		pmc = &pmu->fixed_counters[i];

		pmc_stop_counter(pmc);
		pmc->counter = 0;
	}
606

607
	pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0;
608 609

	intel_pmu_release_guest_lbr_event(vcpu);
610 611
}

612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640
/*
 * Emulate LBR_On_PMI behavior for 1 < pmu.version < 4.
 *
 * If Freeze_LBR_On_PMI = 1, the LBR is frozen on PMI and
 * the KVM emulates to clear the LBR bit (bit 0) in IA32_DEBUGCTL.
 *
 * Guest needs to re-enable LBR to resume branches recording.
 */
static void intel_pmu_legacy_freezing_lbrs_on_pmi(struct kvm_vcpu *vcpu)
{
	u64 data = vmcs_read64(GUEST_IA32_DEBUGCTL);

	if (data & DEBUGCTLMSR_FREEZE_LBRS_ON_PMI) {
		data &= ~DEBUGCTLMSR_LBR;
		vmcs_write64(GUEST_IA32_DEBUGCTL, data);
	}
}

static void intel_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
{
	u8 version = vcpu_to_pmu(vcpu)->version;

	if (!intel_pmu_lbr_is_enabled(vcpu))
		return;

	if (version > 1 && version < 4)
		intel_pmu_legacy_freezing_lbrs_on_pmi(vcpu);
}

641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658
static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set)
{
	struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu);
	int i;

	for (i = 0; i < lbr->nr; i++) {
		vmx_set_intercept_for_msr(vcpu, lbr->from + i, MSR_TYPE_RW, set);
		vmx_set_intercept_for_msr(vcpu, lbr->to + i, MSR_TYPE_RW, set);
		if (lbr->info)
			vmx_set_intercept_for_msr(vcpu, lbr->info + i, MSR_TYPE_RW, set);
	}

	vmx_set_intercept_for_msr(vcpu, MSR_LBR_SELECT, MSR_TYPE_RW, set);
	vmx_set_intercept_for_msr(vcpu, MSR_LBR_TOS, MSR_TYPE_RW, set);
}

static inline void vmx_disable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
{
659 660 661 662 663
	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);

	if (!lbr_desc->msr_passthrough)
		return;

664
	vmx_update_intercept_for_lbr_msrs(vcpu, true);
665
	lbr_desc->msr_passthrough = false;
666 667 668 669
}

static inline void vmx_enable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
{
670 671 672 673 674
	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);

	if (lbr_desc->msr_passthrough)
		return;

675
	vmx_update_intercept_for_lbr_msrs(vcpu, false);
676
	lbr_desc->msr_passthrough = true;
677 678 679 680 681 682 683 684 685 686 687 688 689 690
}

/*
 * Higher priority host perf events (e.g. cpu pinned) could reclaim the
 * pmu resources (e.g. LBR) that were assigned to the guest. This is
 * usually done via ipi calls (more details in perf_install_in_context).
 *
 * Before entering the non-root mode (with irq disabled here), double
 * confirm that the pmu features enabled to the guest are not reclaimed
 * by higher priority host events. Otherwise, disallow vcpu's access to
 * the reclaimed features.
 */
void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
{
691
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
692 693 694 695 696 697
	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);

	if (!lbr_desc->event) {
		vmx_disable_lbr_msrs_passthrough(vcpu);
		if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)
			goto warn;
698 699
		if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use))
			goto warn;
700 701 702 703 704
		return;
	}

	if (lbr_desc->event->state < PERF_EVENT_STATE_ACTIVE) {
		vmx_disable_lbr_msrs_passthrough(vcpu);
705
		__clear_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
706 707 708 709 710 711 712 713 714 715 716
		goto warn;
	} else
		vmx_enable_lbr_msrs_passthrough(vcpu);

	return;

warn:
	pr_warn_ratelimited("kvm: vcpu-%d: fail to passthrough LBR.\n",
		vcpu->vcpu_id);
}

717 718 719 720 721 722
static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
{
	if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR))
		intel_pmu_release_guest_lbr_event(vcpu);
}

723
struct kvm_pmu_ops intel_pmu_ops __initdata = {
724
	.pmc_perf_hw_id = intel_pmc_perf_hw_id,
725 726
	.pmc_is_enabled = intel_pmc_is_enabled,
	.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
727
	.rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
728
	.msr_idx_to_pmc = intel_msr_idx_to_pmc,
729
	.is_valid_rdpmc_ecx = intel_is_valid_rdpmc_ecx,
730 731 732 733 734 735
	.is_valid_msr = intel_is_valid_msr,
	.get_msr = intel_pmu_get_msr,
	.set_msr = intel_pmu_set_msr,
	.refresh = intel_pmu_refresh,
	.init = intel_pmu_init,
	.reset = intel_pmu_reset,
736
	.deliver_pmi = intel_pmu_deliver_pmi,
737
	.cleanup = intel_pmu_cleanup,
738
};