pmu.c 9.5 KB
Newer Older
1
/*
G
Guo Chao 已提交
2
 * Kernel-based Virtual Machine -- Performance Monitoring Unit support
3
 *
4
 * Copyright 2015 Red Hat, Inc. and/or its affiliates.
5 6 7 8
 *
 * Authors:
 *   Avi Kivity   <avi@redhat.com>
 *   Gleb Natapov <gleb@redhat.com>
9
 *   Wei Huang    <wei@redhat.com>
10 11 12 13 14 15 16 17 18
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

#include <linux/types.h>
#include <linux/kvm_host.h>
#include <linux/perf_event.h>
19
#include <asm/perf_event.h>
20 21 22
#include "x86.h"
#include "cpuid.h"
#include "lapic.h"
23
#include "pmu.h"
24

25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
/* NOTE:
 * - Each perf counter is defined as "struct kvm_pmc";
 * - There are two types of perf counters: general purpose (gp) and fixed.
 *   gp counters are stored in gp_counters[] and fixed counters are stored
 *   in fixed_counters[] respectively. Both of them are part of "struct
 *   kvm_pmu";
 * - pmu.c understands the difference between gp counters and fixed counters.
 *   However AMD doesn't support fixed-counters;
 * - There are three types of index to access perf counters (PMC):
 *     1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
 *        has MSR_K7_PERFCTRn.
 *     2. MSR Index (named idx): This normally is used by RDPMC instruction.
 *        For instance AMD RDPMC instruction uses 0000_0003h in ECX to access
 *        C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except
 *        that it also supports fixed counters. idx can be used to as index to
 *        gp and fixed counters.
 *     3. Global PMC Index (named pmc): pmc is an index specific to PMU
 *        code. Each pmc, stored in kvm_pmc.idx field, is unique across
 *        all perf counters (both gp and fixed). The mapping relationship
 *        between pmc and perf counters is as the following:
 *        * Intel: [0 .. INTEL_PMC_MAX_GENERIC-1] <=> gp counters
 *                 [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
 *        * AMD:   [0 .. AMD64_NUM_COUNTERS-1] <=> gp counters
 */
49

50
static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
51
{
52 53
	struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
54

55
	kvm_pmu_deliver_pmi(vcpu);
56 57 58 59 60 61 62
}

static void kvm_perf_overflow(struct perf_event *perf_event,
			      struct perf_sample_data *data,
			      struct pt_regs *regs)
{
	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
63
	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
64 65 66

	if (!test_and_set_bit(pmc->idx,
			      (unsigned long *)&pmu->reprogram_pmi)) {
67 68 69
		__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
		kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
	}
70 71 72
}

static void kvm_perf_overflow_intr(struct perf_event *perf_event,
73 74
				   struct perf_sample_data *data,
				   struct pt_regs *regs)
75 76
{
	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
77
	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
78 79 80

	if (!test_and_set_bit(pmc->idx,
			      (unsigned long *)&pmu->reprogram_pmi)) {
81
		__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
82
		kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
83

84 85 86 87
		/*
		 * Inject PMI. If vcpu was in a guest mode during NMI PMI
		 * can be ejected on a guest mode re-entry. Otherwise we can't
		 * be sure that vcpu wasn't executing hlt instruction at the
88
		 * time of vmexit and is not going to re-enter guest mode until
89 90 91 92
		 * woken up. So we should wake it, but this is impossible from
		 * NMI context. Do it from irq work instead.
		 */
		if (!kvm_is_in_guest())
93
			irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
94 95 96 97 98
		else
			kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
	}
}

99
static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
100 101 102
				  unsigned config, bool exclude_user,
				  bool exclude_kernel, bool intr,
				  bool in_tx, bool in_tx_cp)
103 104 105 106 107 108 109 110 111 112 113 114
{
	struct perf_event *event;
	struct perf_event_attr attr = {
		.type = type,
		.size = sizeof(attr),
		.pinned = true,
		.exclude_idle = true,
		.exclude_host = 1,
		.exclude_user = exclude_user,
		.exclude_kernel = exclude_kernel,
		.config = config,
	};
115

116 117
	attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);

118 119
	if (in_tx)
		attr.config |= HSW_IN_TX;
120 121 122 123 124 125 126
	if (in_tx_cp) {
		/*
		 * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
		 * period. Just clear the sample period so at least
		 * allocating the counter doesn't fail.
		 */
		attr.sample_period = 0;
127
		attr.config |= HSW_IN_TX_CHECKPOINTED;
128
	}
129 130 131 132 133

	event = perf_event_create_kernel_counter(&attr, -1, current,
						 intr ? kvm_perf_overflow_intr :
						 kvm_perf_overflow, pmc);
	if (IS_ERR(event)) {
134 135
		pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
			    PTR_ERR(event), pmc->idx);
136 137 138 139
		return;
	}

	pmc->perf_event = event;
140
	clear_bit(pmc->idx, (unsigned long*)&pmc_to_pmu(pmc)->reprogram_pmi);
141 142
}

143
void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
144 145 146 147
{
	unsigned config, type = PERF_TYPE_RAW;
	u8 event_select, unit_mask;

148 149 150
	if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
		printk_once("kvm pmu: pin control bit is ignored\n");

151 152
	pmc->eventsel = eventsel;

153
	pmc_stop_counter(pmc);
154

155
	if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
156 157 158 159 160
		return;

	event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
	unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;

G
Gleb Natapov 已提交
161
	if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
162 163 164 165
			  ARCH_PERFMON_EVENTSEL_INV |
			  ARCH_PERFMON_EVENTSEL_CMASK |
			  HSW_IN_TX |
			  HSW_IN_TX_CHECKPOINTED))) {
166 167 168
		config = kvm_x86_ops->pmu_ops->find_arch_event(pmc_to_pmu(pmc),
						      event_select,
						      unit_mask);
169 170 171 172 173 174 175
		if (config != PERF_COUNT_HW_MAX)
			type = PERF_TYPE_HARDWARE;
	}

	if (type == PERF_TYPE_RAW)
		config = eventsel & X86_RAW_EVENT_MASK;

176
	pmc_reprogram_counter(pmc, type, config,
177 178 179 180 181
			      !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
			      !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
			      eventsel & ARCH_PERFMON_EVENTSEL_INT,
			      (eventsel & HSW_IN_TX),
			      (eventsel & HSW_IN_TX_CHECKPOINTED));
182
}
183
EXPORT_SYMBOL_GPL(reprogram_gp_counter);
184

185
void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
186
{
187 188
	unsigned en_field = ctrl & 0x3;
	bool pmi = ctrl & 0x8;
189

190
	pmc_stop_counter(pmc);
191

192
	if (!en_field || !pmc_is_enabled(pmc))
193 194
		return;

195
	pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
196
			      kvm_x86_ops->pmu_ops->find_fixed_event(idx),
197 198 199
			      !(en_field & 0x2), /* exclude user */
			      !(en_field & 0x1), /* exclude kernel */
			      pmi, false, false);
200
}
201
EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
202

203
void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
204
{
205
	struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx);
206 207 208 209 210 211 212

	if (!pmc)
		return;

	if (pmc_is_gp(pmc))
		reprogram_gp_counter(pmc, pmc->eventsel);
	else {
213 214 215 216
		int idx = pmc_idx - INTEL_PMC_IDX_FIXED;
		u8 ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, idx);

		reprogram_fixed_counter(pmc, ctrl, idx);
217 218
	}
}
219
EXPORT_SYMBOL_GPL(reprogram_counter);
220

W
Wei Huang 已提交
221 222 223 224 225 226 227 228 229
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
{
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
	u64 bitmask;
	int bit;

	bitmask = pmu->reprogram_pmi;

	for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
230
		struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, bit);
W
Wei Huang 已提交
231 232 233 234 235 236 237 238 239 240 241 242 243

		if (unlikely(!pmc || !pmc->perf_event)) {
			clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
			continue;
		}

		reprogram_counter(pmu, bit);
	}
}

/* check if idx is a valid index to access PMU */
int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
{
244
	return kvm_x86_ops->pmu_ops->is_valid_msr_idx(vcpu, idx);
245 246
}

247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
bool is_vmware_backdoor_pmc(u32 pmc_idx)
{
	switch (pmc_idx) {
	case VMWARE_BACKDOOR_PMC_HOST_TSC:
	case VMWARE_BACKDOOR_PMC_REAL_TIME:
	case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
		return true;
	}
	return false;
}

static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
{
	u64 ctr_val;

	switch (idx) {
	case VMWARE_BACKDOOR_PMC_HOST_TSC:
		ctr_val = rdtsc();
		break;
	case VMWARE_BACKDOOR_PMC_REAL_TIME:
		ctr_val = ktime_get_boot_ns();
		break;
	case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
		ctr_val = ktime_get_boot_ns() +
			vcpu->kvm->arch.kvmclock_offset;
		break;
	default:
		return 1;
	}

	*data = ctr_val;
	return 0;
}

281 282 283 284
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
{
	bool fast_mode = idx & (1u << 31);
	struct kvm_pmc *pmc;
285
	u64 mask = fast_mode ? ~0u : ~0ull;
286

287 288 289
	if (is_vmware_backdoor_pmc(idx))
		return kvm_pmu_rdpmc_vmware(vcpu, idx, data);

290
	pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx, &mask);
291 292 293
	if (!pmc)
		return 1;

294
	*data = pmc_read_counter(pmc) & mask;
W
Wei Huang 已提交
295 296 297 298 299
	return 0;
}

void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
{
300
	if (lapic_in_kernel(vcpu))
W
Wei Huang 已提交
301 302 303
		kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
}

304
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
305
{
306
	return kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, msr);
307 308
}

309
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
310
{
311
	return kvm_x86_ops->pmu_ops->get_msr(vcpu, msr, data);
312 313
}

314
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
315
{
316
	return kvm_x86_ops->pmu_ops->set_msr(vcpu, msr_info);
317 318
}

319 320 321 322
/* refresh PMU settings. This function generally is called when underlying
 * settings are changed (such as changes of PMU CPUID by guest VMs), which
 * should rarely happen.
 */
323
void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
324
{
325
	kvm_x86_ops->pmu_ops->refresh(vcpu);
326 327 328 329
}

void kvm_pmu_reset(struct kvm_vcpu *vcpu)
{
330
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
331 332

	irq_work_sync(&pmu->irq_work);
333
	kvm_x86_ops->pmu_ops->reset(vcpu);
334 335
}

W
Wei Huang 已提交
336
void kvm_pmu_init(struct kvm_vcpu *vcpu)
337
{
338
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
339

W
Wei Huang 已提交
340
	memset(pmu, 0, sizeof(*pmu));
341
	kvm_x86_ops->pmu_ops->init(vcpu);
W
Wei Huang 已提交
342 343 344 345 346 347 348
	init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
	kvm_pmu_refresh(vcpu);
}

void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
{
	kvm_pmu_reset(vcpu);
349
}