pmu.c 15.1 KB
Newer Older
1
/*
G
Guo Chao 已提交
2
 * Kernel-based Virtual Machine -- Performance Monitoring Unit support
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
 *
 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
 *
 * Authors:
 *   Avi Kivity   <avi@redhat.com>
 *   Gleb Natapov <gleb@redhat.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

#include <linux/types.h>
#include <linux/kvm_host.h>
#include <linux/perf_event.h>
18
#include <asm/perf_event.h>
19 20 21
#include "x86.h"
#include "cpuid.h"
#include "lapic.h"
22
#include "pmu.h"
23

24
static struct kvm_event_hw_type_mapping arch_events[] = {
25 26 27 28 29 30 31 32
	/* Index must match CPUID 0x0A.EBX bit vector */
	[0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
	[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
	[2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES  },
	[3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
	[4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
	[5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
	[6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
33
	[7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
34 35 36
};

/* mapping between fixed pmc index and arch_events array */
37
static int fixed_pmc_events[] = {1, 0, 7};
38 39 40 41 42 43 44 45

static bool pmc_is_gp(struct kvm_pmc *pmc)
{
	return pmc->type == KVM_PMC_GP;
}

static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
{
46
	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
47 48 49 50

	return pmu->counter_bitmask[pmc->type];
}

51
static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
52
{
53
	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
	return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
}

static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
					 u32 base)
{
	if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
		return &pmu->gp_counters[msr - base];
	return NULL;
}

static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
{
	int base = MSR_CORE_PERF_FIXED_CTR0;
	if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
		return &pmu->fixed_counters[msr - base];
	return NULL;
}

static inline struct kvm_pmc *get_fixed_pmc_idx(struct kvm_pmu *pmu, int idx)
{
	return get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + idx);
}

static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
{
80
	if (idx < INTEL_PMC_IDX_FIXED)
81 82
		return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0);
	else
83
		return get_fixed_pmc_idx(pmu, idx - INTEL_PMC_IDX_FIXED);
84 85
}

86
static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
87
{
88 89
	struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
90

91
	kvm_pmu_deliver_pmi(vcpu);
92 93 94 95 96 97 98
}

static void kvm_perf_overflow(struct perf_event *perf_event,
			      struct perf_sample_data *data,
			      struct pt_regs *regs)
{
	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
99
	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
100 101 102

	if (!test_and_set_bit(pmc->idx,
			      (unsigned long *)&pmu->reprogram_pmi)) {
103 104 105
		__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
		kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
	}
106 107 108
}

static void kvm_perf_overflow_intr(struct perf_event *perf_event,
109 110
				   struct perf_sample_data *data,
				   struct pt_regs *regs)
111 112
{
	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
113
	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
114 115 116

	if (!test_and_set_bit(pmc->idx,
			      (unsigned long *)&pmu->reprogram_pmi)) {
117
		__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
118
		kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
119

120 121 122 123
		/*
		 * Inject PMI. If vcpu was in a guest mode during NMI PMI
		 * can be ejected on a guest mode re-entry. Otherwise we can't
		 * be sure that vcpu wasn't executing hlt instruction at the
124
		 * time of vmexit and is not going to re-enter guest mode until
125 126 127 128
		 * woken up. So we should wake it, but this is impossible from
		 * NMI context. Do it from irq work instead.
		 */
		if (!kvm_is_in_guest())
129
			irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
130 131 132 133 134
		else
			kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
	}
}

135
static u64 pmc_read_counter(struct kvm_pmc *pmc)
136 137 138 139 140 141 142 143 144 145 146 147 148 149
{
	u64 counter, enabled, running;

	counter = pmc->counter;

	if (pmc->perf_event)
		counter += perf_event_read_value(pmc->perf_event,
						 &enabled, &running);

	/* FIXME: Scaling needed? */

	return counter & pmc_bitmask(pmc);
}

150
static void pmc_stop_counter(struct kvm_pmc *pmc)
151 152
{
	if (pmc->perf_event) {
153
		pmc->counter = pmc_read_counter(pmc);
154 155 156 157 158
		perf_event_release_kernel(pmc->perf_event);
		pmc->perf_event = NULL;
	}
}

159
static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
160 161 162
				  unsigned config, bool exclude_user,
				  bool exclude_kernel, bool intr,
				  bool in_tx, bool in_tx_cp)
163 164 165 166 167 168 169 170 171 172 173 174
{
	struct perf_event *event;
	struct perf_event_attr attr = {
		.type = type,
		.size = sizeof(attr),
		.pinned = true,
		.exclude_idle = true,
		.exclude_host = 1,
		.exclude_user = exclude_user,
		.exclude_kernel = exclude_kernel,
		.config = config,
	};
175

176 177 178 179
	if (in_tx)
		attr.config |= HSW_IN_TX;
	if (in_tx_cp)
		attr.config |= HSW_IN_TX_CHECKPOINTED;
180 181 182 183 184 185 186

	attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);

	event = perf_event_create_kernel_counter(&attr, -1, current,
						 intr ? kvm_perf_overflow_intr :
						 kvm_perf_overflow, pmc);
	if (IS_ERR(event)) {
187 188
		printk_once("kvm_pmu: event creation failed %ld\n",
			    PTR_ERR(event));
189 190 191 192
		return;
	}

	pmc->perf_event = event;
193
	clear_bit(pmc->idx, (unsigned long*)&pmc_to_pmu(pmc)->reprogram_pmi);
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
}

static unsigned find_arch_event(struct kvm_pmu *pmu, u8 event_select,
		u8 unit_mask)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(arch_events); i++)
		if (arch_events[i].eventsel == event_select
				&& arch_events[i].unit_mask == unit_mask
				&& (pmu->available_event_types & (1 << i)))
			break;

	if (i == ARRAY_SIZE(arch_events))
		return PERF_COUNT_HW_MAX;

	return arch_events[i].event_type;
}

static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
{
	unsigned config, type = PERF_TYPE_RAW;
	u8 event_select, unit_mask;

218 219 220
	if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
		printk_once("kvm pmu: pin control bit is ignored\n");

221 222
	pmc->eventsel = eventsel;

223
	pmc_stop_counter(pmc);
224

225
	if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
226 227 228 229 230
		return;

	event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
	unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;

G
Gleb Natapov 已提交
231
	if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
232 233 234 235
			  ARCH_PERFMON_EVENTSEL_INV |
			  ARCH_PERFMON_EVENTSEL_CMASK |
			  HSW_IN_TX |
			  HSW_IN_TX_CHECKPOINTED))) {
236
		config = find_arch_event(pmc_to_pmu(pmc), event_select,
237 238 239 240 241 242 243 244
				unit_mask);
		if (config != PERF_COUNT_HW_MAX)
			type = PERF_TYPE_HARDWARE;
	}

	if (type == PERF_TYPE_RAW)
		config = eventsel & X86_RAW_EVENT_MASK;

245
	pmc_reprogram_counter(pmc, type, config,
246 247 248 249 250
			      !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
			      !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
			      eventsel & ARCH_PERFMON_EVENTSEL_INT,
			      (eventsel & HSW_IN_TX),
			      (eventsel & HSW_IN_TX_CHECKPOINTED));
251 252
}

253
static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
254
{
255 256
	unsigned en_field = ctrl & 0x3;
	bool pmi = ctrl & 0x8;
257

258
	pmc_stop_counter(pmc);
259

260
	if (!en_field || !pmc_is_enabled(pmc))
261 262
		return;

263
	pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
264 265 266 267
			      arch_events[fixed_pmc_events[idx]].event_type,
			      !(en_field & 0x2), /* exclude user */
			      !(en_field & 0x1), /* exclude kernel */
			      pmi, false, false);
268 269
}

270
static inline u8 fixed_ctrl_field(u64 ctrl, int idx)
271 272 273 274 275 276 277 278 279
{
	return (ctrl >> (idx * 4)) & 0xf;
}

static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
{
	int i;

	for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
280 281
		u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
		u8 new_ctrl = fixed_ctrl_field(data, i);
282 283
		struct kvm_pmc *pmc = get_fixed_pmc_idx(pmu, i);

284
		if (old_ctrl == new_ctrl)
285 286
			continue;

287
		reprogram_fixed_counter(pmc, new_ctrl, i);
288 289 290 291 292
	}

	pmu->fixed_ctr_ctrl = data;
}

293
static void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
294
{
295
	struct kvm_pmc *pmc = global_idx_to_pmc(pmu, pmc_idx);
296 297 298 299 300 301 302

	if (!pmc)
		return;

	if (pmc_is_gp(pmc))
		reprogram_gp_counter(pmc, pmc->eventsel);
	else {
303 304 305 306
		int idx = pmc_idx - INTEL_PMC_IDX_FIXED;
		u8 ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, idx);

		reprogram_fixed_counter(pmc, ctrl, idx);
307 308 309 310 311 312 313 314 315 316 317
	}
}

static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
{
	int bit;
	u64 diff = pmu->global_ctrl ^ data;

	pmu->global_ctrl = data;

	for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
318
		reprogram_counter(pmu, bit);
319 320
}

W
Wei Huang 已提交
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
{
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
	u64 bitmask;
	int bit;

	bitmask = pmu->reprogram_pmi;

	for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
		struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit);

		if (unlikely(!pmc || !pmc->perf_event)) {
			clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
			continue;
		}

		reprogram_counter(pmu, bit);
	}
}

/* check if idx is a valid index to access PMU */
int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
{
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
	bool fixed = idx & (1u << 30);
	idx &= ~(3u << 30);
	return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
		(fixed && idx >= pmu->nr_arch_fixed_counters);
}

351 352
static struct kvm_pmc *kvm_pmu_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
                                            unsigned idx)
W
Wei Huang 已提交
353 354 355 356 357 358 359
{
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
	bool fixed = idx & (1u << 30);
	struct kvm_pmc *counters;

	idx &= ~(3u << 30);
	if (!fixed && idx >= pmu->nr_arch_gp_counters)
360
		return NULL;
W
Wei Huang 已提交
361
	if (fixed && idx >= pmu->nr_arch_fixed_counters)
362
		return NULL;
W
Wei Huang 已提交
363 364
	counters = fixed ? pmu->fixed_counters : pmu->gp_counters;

365 366 367 368 369 370 371 372 373 374 375 376 377 378
	return &counters[idx];
}

int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
{
	bool fast_mode = idx & (1u << 31);
	struct kvm_pmc *pmc;
	u64 ctr_val;

	pmc = kvm_pmu_msr_idx_to_pmc(vcpu, idx);
	if (!pmc)
		return 1;

	ctr_val = pmc_read_counter(pmc);
W
Wei Huang 已提交
379 380 381 382 383 384 385 386 387 388 389 390 391
	if (fast_mode)
		ctr_val = (u32)ctr_val;

	*data = ctr_val;
	return 0;
}

void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
{
	if (vcpu->arch.apic)
		kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
}

392
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
393
{
394
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
	int ret;

	switch (msr) {
	case MSR_CORE_PERF_FIXED_CTR_CTRL:
	case MSR_CORE_PERF_GLOBAL_STATUS:
	case MSR_CORE_PERF_GLOBAL_CTRL:
	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
		ret = pmu->version > 1;
		break;
	default:
		ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)
			|| get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0)
			|| get_fixed_pmc(pmu, msr);
		break;
	}
	return ret;
}

int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
{
415
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
	struct kvm_pmc *pmc;

	switch (index) {
	case MSR_CORE_PERF_FIXED_CTR_CTRL:
		*data = pmu->fixed_ctr_ctrl;
		return 0;
	case MSR_CORE_PERF_GLOBAL_STATUS:
		*data = pmu->global_status;
		return 0;
	case MSR_CORE_PERF_GLOBAL_CTRL:
		*data = pmu->global_ctrl;
		return 0;
	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
		*data = pmu->global_ovf_ctrl;
		return 0;
	default:
		if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
				(pmc = get_fixed_pmc(pmu, index))) {
434
			*data = pmc_read_counter(pmc);
435 436 437 438 439 440 441 442 443
			return 0;
		} else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
			*data = pmc->eventsel;
			return 0;
		}
	}
	return 1;
}

444
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
445
{
446
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
447
	struct kvm_pmc *pmc;
448 449
	u32 index = msr_info->index;
	u64 data = msr_info->data;
450 451 452 453 454

	switch (index) {
	case MSR_CORE_PERF_FIXED_CTR_CTRL:
		if (pmu->fixed_ctr_ctrl == data)
			return 0;
455
		if (!(data & 0xfffffffffffff444ull)) {
456 457 458 459 460
			reprogram_fixed_counters(pmu, data);
			return 0;
		}
		break;
	case MSR_CORE_PERF_GLOBAL_STATUS:
461 462 463 464
		if (msr_info->host_initiated) {
			pmu->global_status = data;
			return 0;
		}
465 466 467 468 469 470 471 472 473 474 475
		break; /* RO MSR */
	case MSR_CORE_PERF_GLOBAL_CTRL:
		if (pmu->global_ctrl == data)
			return 0;
		if (!(data & pmu->global_ctrl_mask)) {
			global_ctrl_changed(pmu, data);
			return 0;
		}
		break;
	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
		if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) {
476 477
			if (!msr_info->host_initiated)
				pmu->global_status &= ~data;
478 479 480 481 482 483 484
			pmu->global_ovf_ctrl = data;
			return 0;
		}
		break;
	default:
		if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
				(pmc = get_fixed_pmc(pmu, index))) {
485 486
			if (!msr_info->host_initiated)
				data = (s64)(s32)data;
487
			pmc->counter += data - pmc_read_counter(pmc);
488 489 490 491
			return 0;
		} else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
			if (data == pmc->eventsel)
				return 0;
492
			if (!(data & pmu->reserved_bits)) {
493 494 495 496 497 498 499 500
				reprogram_gp_counter(pmc, data);
				return 0;
			}
		}
	}
	return 1;
}

501 502 503 504
/* refresh PMU settings. This function generally is called when underlying
 * settings are changed (such as changes of PMU CPUID by guest VMs), which
 * should rarely happen.
 */
505
void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
506
{
507
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
508
	struct kvm_cpuid_entry2 *entry;
509 510
	union cpuid10_eax eax;
	union cpuid10_edx edx;
511 512 513 514 515 516

	pmu->nr_arch_gp_counters = 0;
	pmu->nr_arch_fixed_counters = 0;
	pmu->counter_bitmask[KVM_PMC_GP] = 0;
	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
	pmu->version = 0;
517
	pmu->reserved_bits = 0xffffffff00200000ull;
518 519 520 521

	entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
	if (!entry)
		return;
522 523
	eax.full = entry->eax;
	edx.full = entry->edx;
524

525
	pmu->version = eax.split.version_id;
526 527 528
	if (!pmu->version)
		return;

529 530 531 532 533
	pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
					INTEL_PMC_MAX_GENERIC);
	pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
	pmu->available_event_types = ~entry->ebx &
					((1ull << eax.split.mask_length) - 1);
534 535

	if (pmu->version == 1) {
536 537
		pmu->nr_arch_fixed_counters = 0;
	} else {
538 539
		pmu->nr_arch_fixed_counters =
			min_t(int, edx.split.num_counters_fixed,
540
				INTEL_PMC_MAX_FIXED);
541
		pmu->counter_bitmask[KVM_PMC_FIXED] =
542
			((u64)1 << edx.split.bit_width_fixed) - 1;
543 544
	}

545
	pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
546
		(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
547
	pmu->global_ctrl_mask = ~pmu->global_ctrl;
548 549 550 551 552 553

	entry = kvm_find_cpuid_entry(vcpu, 7, 0);
	if (entry &&
	    (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
	    (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
		pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
554 555 556 557
}

void kvm_pmu_reset(struct kvm_vcpu *vcpu)
{
558
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
559 560 561
	int i;

	irq_work_sync(&pmu->irq_work);
562
	for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
563
		struct kvm_pmc *pmc = &pmu->gp_counters[i];
564
		pmc_stop_counter(pmc);
565 566 567
		pmc->counter = pmc->eventsel = 0;
	}

568
	for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
569
		pmc_stop_counter(&pmu->fixed_counters[i]);
570 571 572 573 574

	pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
		pmu->global_ovf_ctrl = 0;
}

W
Wei Huang 已提交
575
void kvm_pmu_init(struct kvm_vcpu *vcpu)
576
{
W
Wei Huang 已提交
577
	int i;
578
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
579

W
Wei Huang 已提交
580 581 582 583 584
	memset(pmu, 0, sizeof(*pmu));
	for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
		pmu->gp_counters[i].type = KVM_PMC_GP;
		pmu->gp_counters[i].vcpu = vcpu;
		pmu->gp_counters[i].idx = i;
585
	}
W
Wei Huang 已提交
586 587 588 589 590 591 592 593 594 595 596 597
	for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
		pmu->fixed_counters[i].type = KVM_PMC_FIXED;
		pmu->fixed_counters[i].vcpu = vcpu;
		pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
	}
	init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
	kvm_pmu_refresh(vcpu);
}

void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
{
	kvm_pmu_reset(vcpu);
598
}