svm.c 108.0 KB
Newer Older
1 2
#define pr_fmt(fmt) "SVM: " fmt

3 4
#include <linux/kvm_host.h>

5
#include "irq.h"
6
#include "mmu.h"
7
#include "kvm_cache_regs.h"
8
#include "x86.h"
9
#include "cpuid.h"
10
#include "pmu.h"
A
Avi Kivity 已提交
11

A
Avi Kivity 已提交
12
#include <linux/module.h>
13
#include <linux/mod_devicetable.h>
14
#include <linux/kernel.h>
A
Avi Kivity 已提交
15 16
#include <linux/vmalloc.h>
#include <linux/highmem.h>
17
#include <linux/amd-iommu.h>
A
Alexey Dobriyan 已提交
18
#include <linux/sched.h>
19
#include <linux/trace_events.h>
20
#include <linux/slab.h>
21
#include <linux/hashtable.h>
22
#include <linux/frame.h>
B
Brijesh Singh 已提交
23
#include <linux/psp-sev.h>
B
Brijesh Singh 已提交
24
#include <linux/file.h>
25 26
#include <linux/pagemap.h>
#include <linux/swap.h>
27
#include <linux/rwsem.h>
A
Avi Kivity 已提交
28

29
#include <asm/apic.h>
30
#include <asm/perf_event.h>
31
#include <asm/tlbflush.h>
A
Avi Kivity 已提交
32
#include <asm/desc.h>
33
#include <asm/debugreg.h>
G
Gleb Natapov 已提交
34
#include <asm/kvm_para.h>
35
#include <asm/irq_remapping.h>
36
#include <asm/mce.h>
37
#include <asm/spec-ctrl.h>
38
#include <asm/cpu_device_id.h>
A
Avi Kivity 已提交
39

40
#include <asm/virtext.h>
41
#include "trace.h"
42

43 44
#include "svm.h"

45 46
#define __ex(x) __kvm_handle_fault_on_reboot(x)

A
Avi Kivity 已提交
47 48 49
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");

50
#ifdef MODULE
51
static const struct x86_cpu_id svm_cpu_id[] = {
52
	X86_MATCH_FEATURE(X86_FEATURE_SVM, NULL),
53 54 55
	{}
};
MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
56
#endif
57

A
Avi Kivity 已提交
58 59 60 61 62 63
#define IOPM_ALLOC_ORDER 2
#define MSRPM_ALLOC_ORDER 1

#define SEG_TYPE_LDT 2
#define SEG_TYPE_BUSY_TSS16 3

64 65
#define SVM_FEATURE_LBRV           (1 <<  1)
#define SVM_FEATURE_SVML           (1 <<  2)
66 67 68 69
#define SVM_FEATURE_TSC_RATE       (1 <<  4)
#define SVM_FEATURE_VMCB_CLEAN     (1 <<  5)
#define SVM_FEATURE_FLUSH_ASID     (1 <<  6)
#define SVM_FEATURE_DECODE_ASSIST  (1 <<  7)
70
#define SVM_FEATURE_PAUSE_FILTER   (1 << 10)
71

72 73
#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))

74
#define TSC_RATIO_RSVD          0xffffff0000000000ULL
75 76
#define TSC_RATIO_MIN		0x0000000000000001ULL
#define TSC_RATIO_MAX		0x000000ffffffffffULL
77

78 79
static bool erratum_383_found __read_mostly;

80
u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
81

82 83 84 85 86 87
/*
 * Set osvw_len to higher value when updated Revision Guides
 * are published and we know what the new status bits are
 */
static uint64_t osvw_len = 4, osvw_status;

88 89 90
static DEFINE_PER_CPU(u64, current_tsc_ratio);
#define TSC_RATIO_DEFAULT	0x0100000000ULL

91
static const struct svm_direct_access_msrs {
92 93 94
	u32 index;   /* Index of the MSR */
	bool always; /* True if intercept is always on */
} direct_access_msrs[] = {
B
Brian Gerst 已提交
95
	{ .index = MSR_STAR,				.always = true  },
96 97 98 99 100 101 102 103 104
	{ .index = MSR_IA32_SYSENTER_CS,		.always = true  },
#ifdef CONFIG_X86_64
	{ .index = MSR_GS_BASE,				.always = true  },
	{ .index = MSR_FS_BASE,				.always = true  },
	{ .index = MSR_KERNEL_GS_BASE,			.always = true  },
	{ .index = MSR_LSTAR,				.always = true  },
	{ .index = MSR_CSTAR,				.always = true  },
	{ .index = MSR_SYSCALL_MASK,			.always = true  },
#endif
105
	{ .index = MSR_IA32_SPEC_CTRL,			.always = false },
A
Ashok Raj 已提交
106
	{ .index = MSR_IA32_PRED_CMD,			.always = false },
107 108 109 110 111
	{ .index = MSR_IA32_LASTBRANCHFROMIP,		.always = false },
	{ .index = MSR_IA32_LASTBRANCHTOIP,		.always = false },
	{ .index = MSR_IA32_LASTINTFROMIP,		.always = false },
	{ .index = MSR_IA32_LASTINTTOIP,		.always = false },
	{ .index = MSR_INVALID,				.always = false },
A
Avi Kivity 已提交
112 113
};

114 115
/* enable NPT for AMD64 and X86 with PAE */
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
116
bool npt_enabled = true;
117
#else
118
bool npt_enabled;
119
#endif
120

121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
/*
 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
 * pause_filter_count: On processors that support Pause filtering(indicated
 *	by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter
 *	count value. On VMRUN this value is loaded into an internal counter.
 *	Each time a pause instruction is executed, this counter is decremented
 *	until it reaches zero at which time a #VMEXIT is generated if pause
 *	intercept is enabled. Refer to  AMD APM Vol 2 Section 15.14.4 Pause
 *	Intercept Filtering for more details.
 *	This also indicate if ple logic enabled.
 *
 * pause_filter_thresh: In addition, some processor families support advanced
 *	pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on
 *	the amount of time a guest is allowed to execute in a pause loop.
 *	In this mode, a 16-bit pause filter threshold field is added in the
 *	VMCB. The threshold value is a cycle count that is used to reset the
 *	pause counter. As with simple pause filtering, VMRUN loads the pause
 *	count value from VMCB into an internal counter. Then, on each pause
 *	instruction the hardware checks the elapsed number of cycles since
 *	the most recent pause instruction against the pause filter threshold.
 *	If the elapsed cycle count is greater than the pause filter threshold,
 *	then the internal pause count is reloaded from the VMCB and execution
 *	continues. If the elapsed cycle count is less than the pause filter
 *	threshold, then the internal pause count is decremented. If the count
 *	value is less than zero and PAUSE intercept is enabled, a #VMEXIT is
 *	triggered. If advanced pause filtering is supported and pause filter
 *	threshold field is set to zero, the filter will operate in the simpler,
 *	count only mode.
 */

static unsigned short pause_filter_thresh = KVM_DEFAULT_PLE_GAP;
module_param(pause_filter_thresh, ushort, 0444);

static unsigned short pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW;
module_param(pause_filter_count, ushort, 0444);

/* Default doubles per-vcpu window every exit. */
static unsigned short pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
module_param(pause_filter_count_grow, ushort, 0444);

/* Default resets per-vcpu window every exit to pause_filter_count. */
static unsigned short pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
module_param(pause_filter_count_shrink, ushort, 0444);

/* Default is to compute the maximum so we can never overflow. */
static unsigned short pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX;
module_param(pause_filter_count_max, ushort, 0444);

169 170
/* allow nested paging (virtualized MMU) for all guests */
static int npt = true;
171
module_param(npt, int, S_IRUGO);
172

173 174
/* allow nested virtualization in KVM/SVM */
static int nested = true;
175 176
module_param(nested, int, S_IRUGO);

177 178 179 180
/* enable/disable Next RIP Save */
static int nrips = true;
module_param(nrips, int, 0444);

181 182 183 184
/* enable/disable Virtual VMLOAD VMSAVE */
static int vls = true;
module_param(vls, int, 0444);

185 186 187
/* enable/disable Virtual GIF */
static int vgif = true;
module_param(vgif, int, 0444);
188

B
Brijesh Singh 已提交
189 190 191 192
/* enable/disable SEV support */
static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
module_param(sev, int, 0444);

193 194 195
static bool __read_mostly dump_invalid_vmcb = 0;
module_param(dump_invalid_vmcb, bool, 0644);

B
Brijesh Singh 已提交
196 197
static u8 rsm_ins_bytes[] = "\x0f\xaa";

198
static void svm_complete_interrupts(struct vcpu_svm *svm);
199

200
static unsigned long iopm_base;
A
Avi Kivity 已提交
201 202 203 204

struct kvm_ldttss_desc {
	u16 limit0;
	u16 base0;
J
Joerg Roedel 已提交
205 206
	unsigned base1:8, type:5, dpl:2, p:1;
	unsigned limit1:4, zero0:3, g:1, base2:8;
A
Avi Kivity 已提交
207 208 209 210
	u32 base3;
	u32 zero1;
} __attribute__((packed));

211
DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
A
Avi Kivity 已提交
212

213
static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
A
Avi Kivity 已提交
214

215
#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
A
Avi Kivity 已提交
216 217 218
#define MSRS_RANGE_SIZE 2048
#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)

219
u32 svm_msrpm_offset(u32 msr)
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
{
	u32 offset;
	int i;

	for (i = 0; i < NUM_MSR_MAPS; i++) {
		if (msr < msrpm_ranges[i] ||
		    msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
			continue;

		offset  = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
		offset += (i * MSRS_RANGE_SIZE);       /* add range offset */

		/* Now we have the u8 offset - but need the u32 offset */
		return offset / 4;
	}

	/* MSR not in any range */
	return MSR_INVALID;
}

A
Avi Kivity 已提交
240 241 242 243
#define MAX_INST_SIZE 15

static inline void clgi(void)
{
244
	asm volatile (__ex("clgi"));
A
Avi Kivity 已提交
245 246 247 248
}

static inline void stgi(void)
{
249
	asm volatile (__ex("stgi"));
A
Avi Kivity 已提交
250 251 252 253
}

static inline void invlpga(unsigned long addr, u32 asid)
{
254
	asm volatile (__ex("invlpga %1, %0") : : "c"(asid), "a"(addr));
A
Avi Kivity 已提交
255 256
}

257
static int get_npt_level(struct kvm_vcpu *vcpu)
258 259
{
#ifdef CONFIG_X86_64
260
	return PT64_ROOT_4LEVEL;
261 262 263 264 265
#else
	return PT32E_ROOT_LEVEL;
#endif
}

266
void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
A
Avi Kivity 已提交
267
{
268
	vcpu->arch.efer = efer;
269 270 271 272 273 274 275 276

	if (!npt_enabled) {
		/* Shadow paging assumes NX to be available.  */
		efer |= EFER_NX;

		if (!(efer & EFER_LMA))
			efer &= ~EFER_LME;
	}
A
Avi Kivity 已提交
277

278
	to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
279
	mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
A
Avi Kivity 已提交
280 281 282 283 284 285 286 287
}

static int is_external_interrupt(u32 info)
{
	info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
	return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
}

288
static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
289 290 291 292 293
{
	struct vcpu_svm *svm = to_svm(vcpu);
	u32 ret = 0;

	if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
294 295
		ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
	return ret;
296 297 298 299 300 301 302 303 304 305 306 307 308
}

static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	if (mask == 0)
		svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
	else
		svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;

}

309
static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
310
{
311 312
	struct vcpu_svm *svm = to_svm(vcpu);

313
	if (nrips && svm->vmcb->control.next_rip != 0) {
314
		WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
315
		svm->next_rip = svm->vmcb->control.next_rip;
316
	}
317

318 319 320 321 322 323
	if (!svm->next_rip) {
		if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
			return 0;
	} else {
		kvm_rip_write(vcpu, svm->next_rip);
	}
324
	svm_set_interrupt_shadow(vcpu, 0);
325

326
	return 1;
A
Avi Kivity 已提交
327 328
}

329
static void svm_queue_exception(struct kvm_vcpu *vcpu)
J
Jan Kiszka 已提交
330 331
{
	struct vcpu_svm *svm = to_svm(vcpu);
332 333
	unsigned nr = vcpu->arch.exception.nr;
	bool has_error_code = vcpu->arch.exception.has_error_code;
334
	bool reinject = vcpu->arch.exception.injected;
335
	u32 error_code = vcpu->arch.exception.error_code;
J
Jan Kiszka 已提交
336

J
Joerg Roedel 已提交
337 338 339 340
	/*
	 * If we are within a nested VM we'd better #VMEXIT and let the guest
	 * handle the exception
	 */
341 342
	if (!reinject &&
	    nested_svm_check_exception(svm, nr, has_error_code, error_code))
J
Jan Kiszka 已提交
343 344
		return;

345 346
	kvm_deliver_exception_payload(&svm->vcpu);

347
	if (nr == BP_VECTOR && !nrips) {
348 349 350 351 352 353 354 355 356
		unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);

		/*
		 * For guest debugging where we have to reinject #BP if some
		 * INT3 is guest-owned:
		 * Emulate nRIP by moving RIP forward. Will fail if injection
		 * raises a fault that is not intercepted. Still better than
		 * failing in all cases.
		 */
357
		(void)skip_emulated_instruction(&svm->vcpu);
358 359 360 361 362
		rip = kvm_rip_read(&svm->vcpu);
		svm->int3_rip = rip + svm->vmcb->save.cs.base;
		svm->int3_injected = rip - old_rip;
	}

J
Jan Kiszka 已提交
363 364 365 366 367 368 369
	svm->vmcb->control.event_inj = nr
		| SVM_EVTINJ_VALID
		| (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
		| SVM_EVTINJ_TYPE_EXEPT;
	svm->vmcb->control.event_inj_err = error_code;
}

370 371 372 373 374 375
static void svm_init_erratum_383(void)
{
	u32 low, high;
	int err;
	u64 val;

376
	if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
		return;

	/* Use _safe variants to not break nested virtualization */
	val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
	if (err)
		return;

	val |= (1ULL << 47);

	low  = lower_32_bits(val);
	high = upper_32_bits(val);

	native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);

	erratum_383_found = true;
}

394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
static void svm_init_osvw(struct kvm_vcpu *vcpu)
{
	/*
	 * Guests should see errata 400 and 415 as fixed (assuming that
	 * HLT and IO instructions are intercepted).
	 */
	vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
	vcpu->arch.osvw.status = osvw_status & ~(6ULL);

	/*
	 * By increasing VCPU's osvw.length to 3 we are telling the guest that
	 * all osvw.status bits inside that length, including bit 0 (which is
	 * reserved for erratum 298), are valid. However, if host processor's
	 * osvw_len is 0 then osvw_status[0] carries no information. We need to
	 * be conservative here and therefore we tell the guest that erratum 298
	 * is present (because we really don't know).
	 */
	if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
		vcpu->arch.osvw.status |= 1;
}

A
Avi Kivity 已提交
415 416
static int has_svm(void)
{
417
	const char *msg;
A
Avi Kivity 已提交
418

419
	if (!cpu_has_svm(&msg)) {
J
Joe Perches 已提交
420
		printk(KERN_INFO "has_svm: %s\n", msg);
A
Avi Kivity 已提交
421 422 423 424 425 426
		return 0;
	}

	return 1;
}

427
static void svm_hardware_disable(void)
A
Avi Kivity 已提交
428
{
429 430 431 432
	/* Make sure we clean up behind us */
	if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
		wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);

433
	cpu_svm_disable();
434 435

	amd_pmu_disable_virt();
A
Avi Kivity 已提交
436 437
}

438
static int svm_hardware_enable(void)
A
Avi Kivity 已提交
439 440
{

441
	struct svm_cpu_data *sd;
A
Avi Kivity 已提交
442 443 444 445
	uint64_t efer;
	struct desc_struct *gdt;
	int me = raw_smp_processor_id();

446 447 448 449
	rdmsrl(MSR_EFER, efer);
	if (efer & EFER_SVME)
		return -EBUSY;

A
Avi Kivity 已提交
450
	if (!has_svm()) {
451
		pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
452
		return -EINVAL;
A
Avi Kivity 已提交
453
	}
454 455
	sd = per_cpu(svm_data, me);
	if (!sd) {
456
		pr_err("%s: svm_data is NULL on %d\n", __func__, me);
457
		return -EINVAL;
A
Avi Kivity 已提交
458 459
	}

460 461 462
	sd->asid_generation = 1;
	sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
	sd->next_asid = sd->max_asid + 1;
463
	sd->min_asid = max_sev_asid + 1;
A
Avi Kivity 已提交
464

465
	gdt = get_current_gdt_rw();
466
	sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
A
Avi Kivity 已提交
467

468
	wrmsrl(MSR_EFER, efer | EFER_SVME);
A
Avi Kivity 已提交
469

470
	wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
471

472 473
	if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
		wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
474
		__this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
475 476
	}

477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506

	/*
	 * Get OSVW bits.
	 *
	 * Note that it is possible to have a system with mixed processor
	 * revisions and therefore different OSVW bits. If bits are not the same
	 * on different processors then choose the worst case (i.e. if erratum
	 * is present on one processor and not on another then assume that the
	 * erratum is present everywhere).
	 */
	if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
		uint64_t len, status = 0;
		int err;

		len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
		if (!err)
			status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
						      &err);

		if (err)
			osvw_status = osvw_len = 0;
		else {
			if (len < osvw_len)
				osvw_len = len;
			osvw_status |= status;
			osvw_status &= (1ULL << osvw_len) - 1;
		}
	} else
		osvw_status = osvw_len = 0;

507 508
	svm_init_erratum_383();

509 510
	amd_pmu_enable_virt();

511
	return 0;
A
Avi Kivity 已提交
512 513
}

514 515
static void svm_cpu_uninit(int cpu)
{
516
	struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
517

518
	if (!sd)
519 520 521
		return;

	per_cpu(svm_data, raw_smp_processor_id()) = NULL;
522
	kfree(sd->sev_vmcbs);
523 524
	__free_page(sd->save_area);
	kfree(sd);
525 526
}

A
Avi Kivity 已提交
527 528
static int svm_cpu_init(int cpu)
{
529
	struct svm_cpu_data *sd;
A
Avi Kivity 已提交
530

531 532
	sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
	if (!sd)
A
Avi Kivity 已提交
533
		return -ENOMEM;
534
	sd->cpu = cpu;
535
	sd->save_area = alloc_page(GFP_KERNEL);
536
	if (!sd->save_area)
537
		goto free_cpu_data;
A
Avi Kivity 已提交
538

539
	if (svm_sev_enabled()) {
540 541 542
		sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1,
					      sizeof(void *),
					      GFP_KERNEL);
543
		if (!sd->sev_vmcbs)
544
			goto free_save_area;
545 546
	}

547
	per_cpu(svm_data, cpu) = sd;
A
Avi Kivity 已提交
548 549 550

	return 0;

551 552 553
free_save_area:
	__free_page(sd->save_area);
free_cpu_data:
554
	kfree(sd);
555
	return -ENOMEM;
A
Avi Kivity 已提交
556 557 558

}

559 560 561 562 563 564 565 566 567 568 569
static bool valid_msr_intercept(u32 index)
{
	int i;

	for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
		if (direct_access_msrs[i].index == index)
			return true;

	return false;
}

570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
static bool msr_write_intercepted(struct kvm_vcpu *vcpu, unsigned msr)
{
	u8 bit_write;
	unsigned long tmp;
	u32 offset;
	u32 *msrpm;

	msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
				      to_svm(vcpu)->msrpm;

	offset    = svm_msrpm_offset(msr);
	bit_write = 2 * (msr & 0x0f) + 1;
	tmp       = msrpm[offset];

	BUG_ON(offset == MSR_INVALID);

	return !!test_bit(bit_write,  &tmp);
}

589 590
static void set_msr_interception(u32 *msrpm, unsigned msr,
				 int read, int write)
A
Avi Kivity 已提交
591
{
592 593 594
	u8 bit_read, bit_write;
	unsigned long tmp;
	u32 offset;
A
Avi Kivity 已提交
595

596 597 598 599 600 601
	/*
	 * If this warning triggers extend the direct_access_msrs list at the
	 * beginning of the file
	 */
	WARN_ON(!valid_msr_intercept(msr));

602 603 604 605 606 607 608 609 610 611 612
	offset    = svm_msrpm_offset(msr);
	bit_read  = 2 * (msr & 0x0f);
	bit_write = 2 * (msr & 0x0f) + 1;
	tmp       = msrpm[offset];

	BUG_ON(offset == MSR_INVALID);

	read  ? clear_bit(bit_read,  &tmp) : set_bit(bit_read,  &tmp);
	write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);

	msrpm[offset] = tmp;
A
Avi Kivity 已提交
613 614
}

615
static void svm_vcpu_init_msrpm(u32 *msrpm)
A
Avi Kivity 已提交
616 617 618
{
	int i;

619 620
	memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));

621 622 623 624 625 626
	for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
		if (!direct_access_msrs[i].always)
			continue;

		set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
	}
627 628
}

629 630 631 632 633 634 635 636
static void add_msr_offset(u32 offset)
{
	int i;

	for (i = 0; i < MSRPM_OFFSETS; ++i) {

		/* Offset already in list? */
		if (msrpm_offsets[i] == offset)
637
			return;
638 639 640 641 642 643 644 645 646

		/* Slot used by another offset? */
		if (msrpm_offsets[i] != MSR_INVALID)
			continue;

		/* Add offset to list */
		msrpm_offsets[i] = offset;

		return;
A
Avi Kivity 已提交
647
	}
648 649 650 651 652

	/*
	 * If this BUG triggers the msrpm_offsets table has an overflow. Just
	 * increase MSRPM_OFFSETS in this case.
	 */
653
	BUG();
A
Avi Kivity 已提交
654 655
}

656
static void init_msrpm_offsets(void)
657
{
658
	int i;
659

660 661 662 663 664 665 666 667 668 669
	memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));

	for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
		u32 offset;

		offset = svm_msrpm_offset(direct_access_msrs[i].index);
		BUG_ON(offset == MSR_INVALID);

		add_msr_offset(offset);
	}
670 671
}

672 673 674 675
static void svm_enable_lbrv(struct vcpu_svm *svm)
{
	u32 *msrpm = svm->msrpm;

676
	svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
677 678 679 680 681 682 683 684 685 686
	set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
	set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
	set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
	set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
}

static void svm_disable_lbrv(struct vcpu_svm *svm)
{
	u32 *msrpm = svm->msrpm;

687
	svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
688 689 690 691 692 693
	set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
	set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
	set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
	set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
}

694
void disable_nmi_singlestep(struct vcpu_svm *svm)
695 696
{
	svm->nmi_singlestep = false;
697

698 699 700 701 702 703 704
	if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
		/* Clear our flags if they were not set by the guest */
		if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
			svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
		if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
			svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
	}
705 706
}

707 708 709 710 711 712 713 714 715 716 717
static void grow_ple_window(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	struct vmcb_control_area *control = &svm->vmcb->control;
	int old = control->pause_filter_count;

	control->pause_filter_count = __grow_ple_window(old,
							pause_filter_count,
							pause_filter_count_grow,
							pause_filter_count_max);

P
Peter Xu 已提交
718
	if (control->pause_filter_count != old) {
719
		mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
P
Peter Xu 已提交
720 721 722
		trace_kvm_ple_window_update(vcpu->vcpu_id,
					    control->pause_filter_count, old);
	}
723 724 725 726 727 728 729 730 731 732 733 734 735
}

static void shrink_ple_window(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	struct vmcb_control_area *control = &svm->vmcb->control;
	int old = control->pause_filter_count;

	control->pause_filter_count =
				__shrink_ple_window(old,
						    pause_filter_count,
						    pause_filter_count_shrink,
						    pause_filter_count);
P
Peter Xu 已提交
736
	if (control->pause_filter_count != old) {
737
		mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
P
Peter Xu 已提交
738 739 740
		trace_kvm_ple_window_update(vcpu->vcpu_id,
					    control->pause_filter_count, old);
	}
741 742
}

743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783
/*
 * The default MMIO mask is a single bit (excluding the present bit),
 * which could conflict with the memory encryption bit. Check for
 * memory encryption support and override the default MMIO mask if
 * memory encryption is enabled.
 */
static __init void svm_adjust_mmio_mask(void)
{
	unsigned int enc_bit, mask_bit;
	u64 msr, mask;

	/* If there is no memory encryption support, use existing mask */
	if (cpuid_eax(0x80000000) < 0x8000001f)
		return;

	/* If memory encryption is not enabled, use existing mask */
	rdmsrl(MSR_K8_SYSCFG, msr);
	if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
		return;

	enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
	mask_bit = boot_cpu_data.x86_phys_bits;

	/* Increment the mask bit if it is the same as the encryption bit */
	if (enc_bit == mask_bit)
		mask_bit++;

	/*
	 * If the mask bit location is below 52, then some bits above the
	 * physical addressing limit will always be reserved, so use the
	 * rsvd_bits() function to generate the mask. This mask, along with
	 * the present bit, will be used to generate a page fault with
	 * PFER.RSV = 1.
	 *
	 * If the mask bit location is 52 (or above), then clear the mask.
	 */
	mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;

	kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK);
}

784 785 786 787
static void svm_hardware_teardown(void)
{
	int cpu;

788 789
	if (svm_sev_enabled())
		sev_hardware_teardown();
790 791 792 793 794 795 796 797

	for_each_possible_cpu(cpu)
		svm_cpu_uninit(cpu);

	__free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
	iopm_base = 0;
}

798 799 800 801
static __init void svm_set_cpu_caps(void)
{
	kvm_set_cpu_caps();

802 803
	supported_xss = 0;

804 805
	/* CPUID 0x80000001 and 0x8000000A (SVM features) */
	if (nested) {
806 807
		kvm_cpu_cap_set(X86_FEATURE_SVM);

808
		if (nrips)
809 810 811 812 813 814
			kvm_cpu_cap_set(X86_FEATURE_NRIPS);

		if (npt_enabled)
			kvm_cpu_cap_set(X86_FEATURE_NPT);
	}

815 816 817 818
	/* CPUID 0x80000008 */
	if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) ||
	    boot_cpu_has(X86_FEATURE_AMD_SSBD))
		kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
819 820
}

A
Avi Kivity 已提交
821 822 823 824
static __init int svm_hardware_setup(void)
{
	int cpu;
	struct page *iopm_pages;
825
	void *iopm_va;
A
Avi Kivity 已提交
826 827 828 829 830 831
	int r;

	iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);

	if (!iopm_pages)
		return -ENOMEM;
832 833 834

	iopm_va = page_address(iopm_pages);
	memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
A
Avi Kivity 已提交
835 836
	iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;

837 838
	init_msrpm_offsets();

839 840
	supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);

841 842 843
	if (boot_cpu_has(X86_FEATURE_NX))
		kvm_enable_efer_bits(EFER_NX);

A
Alexander Graf 已提交
844 845 846
	if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
		kvm_enable_efer_bits(EFER_FFXSR);

847 848
	if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
		kvm_has_tsc_control = true;
849 850
		kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
		kvm_tsc_scaling_ratio_frac_bits = 32;
851 852
	}

853 854 855 856 857 858 859 860
	/* Check for pause filtering support */
	if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
		pause_filter_count = 0;
		pause_filter_thresh = 0;
	} else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) {
		pause_filter_thresh = 0;
	}

861 862
	if (nested) {
		printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
863
		kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
864 865
	}

B
Brijesh Singh 已提交
866 867 868 869 870 871 872 873 874 875 876
	if (sev) {
		if (boot_cpu_has(X86_FEATURE_SEV) &&
		    IS_ENABLED(CONFIG_KVM_AMD_SEV)) {
			r = sev_hardware_setup();
			if (r)
				sev = false;
		} else {
			sev = false;
		}
	}

877 878
	svm_adjust_mmio_mask();

Z
Zachary Amsden 已提交
879
	for_each_possible_cpu(cpu) {
A
Avi Kivity 已提交
880 881
		r = svm_cpu_init(cpu);
		if (r)
882
			goto err;
A
Avi Kivity 已提交
883
	}
884

885
	if (!boot_cpu_has(X86_FEATURE_NPT))
886 887
		npt_enabled = false;

888
	if (npt_enabled && !npt)
889 890
		npt_enabled = false;

891
	kvm_configure_mmu(npt_enabled, PT_PDPE_LEVEL);
892
	pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
893

894 895 896 897 898
	if (nrips) {
		if (!boot_cpu_has(X86_FEATURE_NRIPS))
			nrips = false;
	}

899 900 901
	if (avic) {
		if (!npt_enabled ||
		    !boot_cpu_has(X86_FEATURE_AVIC) ||
902
		    !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
903
			avic = false;
904
		} else {
905
			pr_info("AVIC enabled\n");
906 907 908

			amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
		}
909
	}
910

911 912
	if (vls) {
		if (!npt_enabled ||
913
		    !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
914 915 916 917 918 919 920
		    !IS_ENABLED(CONFIG_X86_64)) {
			vls = false;
		} else {
			pr_info("Virtual VMLOAD VMSAVE supported\n");
		}
	}

921 922 923 924 925 926 927
	if (vgif) {
		if (!boot_cpu_has(X86_FEATURE_VGIF))
			vgif = false;
		else
			pr_info("Virtual GIF supported\n");
	}

928
	svm_set_cpu_caps();
929

A
Avi Kivity 已提交
930 931
	return 0;

932
err:
933
	svm_hardware_teardown();
A
Avi Kivity 已提交
934 935 936 937 938 939 940
	return r;
}

static void init_seg(struct vmcb_seg *seg)
{
	seg->selector = 0;
	seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
J
Joerg Roedel 已提交
941
		      SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
A
Avi Kivity 已提交
942 943 944 945 946 947 948 949 950 951 952 953
	seg->limit = 0xffff;
	seg->base = 0;
}

static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
{
	seg->selector = 0;
	seg->attrib = SVM_SELECTOR_P_MASK | type;
	seg->limit = 0xffff;
	seg->base = 0;
}

954
static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
955 956 957 958
{
	struct vcpu_svm *svm = to_svm(vcpu);
	u64 g_tsc_offset = 0;

959
	if (is_guest_mode(vcpu)) {
960
		/* Write L1's TSC offset.  */
961 962 963
		g_tsc_offset = svm->vmcb->control.tsc_offset -
			       svm->nested.hsave->control.tsc_offset;
		svm->nested.hsave->control.tsc_offset = offset;
964 965 966 967 968
	}

	trace_kvm_write_tsc_offset(vcpu->vcpu_id,
				   svm->vmcb->control.tsc_offset - g_tsc_offset,
				   offset);
969 970

	svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
971 972

	mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
973
	return svm->vmcb->control.tsc_offset;
974 975
}

P
Paolo Bonzini 已提交
976
static void init_vmcb(struct vcpu_svm *svm)
A
Avi Kivity 已提交
977
{
978 979
	struct vmcb_control_area *control = &svm->vmcb->control;
	struct vmcb_save_area *save = &svm->vmcb->save;
A
Avi Kivity 已提交
980

981
	svm->vcpu.arch.hflags = 0;
982

983 984 985 986 987 988
	set_cr_intercept(svm, INTERCEPT_CR0_READ);
	set_cr_intercept(svm, INTERCEPT_CR3_READ);
	set_cr_intercept(svm, INTERCEPT_CR4_READ);
	set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
	set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
	set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
989 990
	if (!kvm_vcpu_apicv_active(&svm->vcpu))
		set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
A
Avi Kivity 已提交
991

992
	set_dr_intercepts(svm);
A
Avi Kivity 已提交
993

994 995 996
	set_exception_intercept(svm, PF_VECTOR);
	set_exception_intercept(svm, UD_VECTOR);
	set_exception_intercept(svm, MC_VECTOR);
997
	set_exception_intercept(svm, AC_VECTOR);
998
	set_exception_intercept(svm, DB_VECTOR);
999 1000 1001 1002 1003 1004 1005 1006
	/*
	 * Guest access to VMware backdoor ports could legitimately
	 * trigger #GP because of TSS I/O permission bitmap.
	 * We intercept those #GP and allow access to them anyway
	 * as VMware does.
	 */
	if (enable_vmware_backdoor)
		set_exception_intercept(svm, GP_VECTOR);
A
Avi Kivity 已提交
1007

1008 1009 1010 1011
	set_intercept(svm, INTERCEPT_INTR);
	set_intercept(svm, INTERCEPT_NMI);
	set_intercept(svm, INTERCEPT_SMI);
	set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
A
Avi Kivity 已提交
1012
	set_intercept(svm, INTERCEPT_RDPMC);
1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
	set_intercept(svm, INTERCEPT_CPUID);
	set_intercept(svm, INTERCEPT_INVD);
	set_intercept(svm, INTERCEPT_INVLPG);
	set_intercept(svm, INTERCEPT_INVLPGA);
	set_intercept(svm, INTERCEPT_IOIO_PROT);
	set_intercept(svm, INTERCEPT_MSR_PROT);
	set_intercept(svm, INTERCEPT_TASK_SWITCH);
	set_intercept(svm, INTERCEPT_SHUTDOWN);
	set_intercept(svm, INTERCEPT_VMRUN);
	set_intercept(svm, INTERCEPT_VMMCALL);
	set_intercept(svm, INTERCEPT_VMLOAD);
	set_intercept(svm, INTERCEPT_VMSAVE);
	set_intercept(svm, INTERCEPT_STGI);
	set_intercept(svm, INTERCEPT_CLGI);
	set_intercept(svm, INTERCEPT_SKINIT);
	set_intercept(svm, INTERCEPT_WBINVD);
J
Joerg Roedel 已提交
1029
	set_intercept(svm, INTERCEPT_XSETBV);
J
Jim Mattson 已提交
1030
	set_intercept(svm, INTERCEPT_RDPRU);
B
Brijesh Singh 已提交
1031
	set_intercept(svm, INTERCEPT_RSM);
A
Avi Kivity 已提交
1032

1033
	if (!kvm_mwait_in_guest(svm->vcpu.kvm)) {
1034 1035 1036 1037
		set_intercept(svm, INTERCEPT_MONITOR);
		set_intercept(svm, INTERCEPT_MWAIT);
	}

1038 1039 1040
	if (!kvm_hlt_in_guest(svm->vcpu.kvm))
		set_intercept(svm, INTERCEPT_HLT);

1041 1042
	control->iopm_base_pa = __sme_set(iopm_base);
	control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
A
Avi Kivity 已提交
1043 1044 1045 1046 1047 1048 1049 1050 1051
	control->int_ctl = V_INTR_MASKING_MASK;

	init_seg(&save->es);
	init_seg(&save->ss);
	init_seg(&save->ds);
	init_seg(&save->fs);
	init_seg(&save->gs);

	save->cs.selector = 0xf000;
1052
	save->cs.base = 0xffff0000;
A
Avi Kivity 已提交
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
	/* Executable/Readable Code Segment */
	save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
		SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
	save->cs.limit = 0xffff;

	save->gdtr.limit = 0xffff;
	save->idtr.limit = 0xffff;

	init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
	init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);

P
Paolo Bonzini 已提交
1064
	svm_set_efer(&svm->vcpu, 0);
M
Mike Day 已提交
1065
	save->dr6 = 0xffff0ff0;
1066
	kvm_set_rflags(&svm->vcpu, 2);
A
Avi Kivity 已提交
1067
	save->rip = 0x0000fff0;
1068
	svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
A
Avi Kivity 已提交
1069

J
Joerg Roedel 已提交
1070
	/*
1071
	 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
1072
	 * It also updates the guest-visible cr0 value.
A
Avi Kivity 已提交
1073
	 */
1074
	svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
1075
	kvm_mmu_reset_context(&svm->vcpu);
1076

1077
	save->cr4 = X86_CR4_PAE;
A
Avi Kivity 已提交
1078
	/* rdx = ?? */
1079 1080 1081

	if (npt_enabled) {
		/* Setup VMCB for Nested Paging */
1082
		control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
1083
		clr_intercept(svm, INTERCEPT_INVLPG);
1084
		clr_exception_intercept(svm, PF_VECTOR);
1085 1086
		clr_cr_intercept(svm, INTERCEPT_CR3_READ);
		clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1087
		save->g_pat = svm->vcpu.arch.pat;
1088 1089 1090
		save->cr3 = 0;
		save->cr4 = 0;
	}
1091
	svm->asid_generation = 0;
1092

1093
	svm->nested.vmcb = 0;
1094 1095
	svm->vcpu.arch.hflags = 0;

1096 1097 1098 1099
	if (pause_filter_count) {
		control->pause_filter_count = pause_filter_count;
		if (pause_filter_thresh)
			control->pause_filter_thresh = pause_filter_thresh;
1100
		set_intercept(svm, INTERCEPT_PAUSE);
1101 1102
	} else {
		clr_intercept(svm, INTERCEPT_PAUSE);
1103 1104
	}

1105
	if (kvm_vcpu_apicv_active(&svm->vcpu))
1106 1107
		avic_init_vmcb(svm);

1108 1109 1110 1111 1112 1113 1114 1115 1116 1117
	/*
	 * If hardware supports Virtual VMLOAD VMSAVE then enable it
	 * in VMCB and clear intercepts to avoid #VMEXIT.
	 */
	if (vls) {
		clr_intercept(svm, INTERCEPT_VMLOAD);
		clr_intercept(svm, INTERCEPT_VMSAVE);
		svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
	}

1118 1119 1120 1121 1122 1123
	if (vgif) {
		clr_intercept(svm, INTERCEPT_STGI);
		clr_intercept(svm, INTERCEPT_CLGI);
		svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
	}

1124
	if (sev_guest(svm->vcpu.kvm)) {
B
Brijesh Singh 已提交
1125
		svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
1126 1127
		clr_exception_intercept(svm, UD_VECTOR);
	}
B
Brijesh Singh 已提交
1128

1129 1130
	mark_all_dirty(svm->vmcb);

1131
	enable_gif(svm);
1132 1133 1134

}

1135
static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
1136 1137
{
	struct vcpu_svm *svm = to_svm(vcpu);
1138 1139
	u32 dummy;
	u32 eax = 1;
1140

1141
	svm->spec_ctrl = 0;
1142
	svm->virt_spec_ctrl = 0;
1143

1144 1145 1146 1147 1148 1149
	if (!init_event) {
		svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
					   MSR_IA32_APICBASE_ENABLE;
		if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
			svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
	}
P
Paolo Bonzini 已提交
1150
	init_vmcb(svm);
A
Avi Kivity 已提交
1151

1152
	kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, false);
1153
	kvm_rdx_write(vcpu, eax);
1154 1155 1156

	if (kvm_vcpu_apicv_active(vcpu) && !init_event)
		avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE);
1157 1158
}

1159
static int svm_create_vcpu(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
1160
{
1161
	struct vcpu_svm *svm;
A
Avi Kivity 已提交
1162
	struct page *page;
1163
	struct page *msrpm_pages;
A
Alexander Graf 已提交
1164
	struct page *hsave_page;
A
Alexander Graf 已提交
1165
	struct page *nested_msrpm_pages;
R
Rusty Russell 已提交
1166
	int err;
A
Avi Kivity 已提交
1167

1168 1169
	BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0);
	svm = to_svm(vcpu);
R
Rusty Russell 已提交
1170

1171
	err = -ENOMEM;
1172
	page = alloc_page(GFP_KERNEL_ACCOUNT);
1173
	if (!page)
1174
		goto out;
A
Avi Kivity 已提交
1175

1176
	msrpm_pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER);
1177
	if (!msrpm_pages)
1178
		goto free_page1;
A
Alexander Graf 已提交
1179

1180
	nested_msrpm_pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER);
A
Alexander Graf 已提交
1181
	if (!nested_msrpm_pages)
1182
		goto free_page2;
1183

1184
	hsave_page = alloc_page(GFP_KERNEL_ACCOUNT);
A
Alexander Graf 已提交
1185
	if (!hsave_page)
1186 1187
		goto free_page3;

1188 1189 1190
	err = avic_init_vcpu(svm);
	if (err)
		goto free_page4;
1191

1192 1193 1194
	/* We initialize this flag to true to make sure that the is_running
	 * bit would be set the first time the vcpu is loaded.
	 */
1195 1196
	if (irqchip_in_kernel(vcpu->kvm) && kvm_apicv_activated(vcpu->kvm))
		svm->avic_is_running = true;
1197

1198
	svm->nested.hsave = page_address(hsave_page);
A
Alexander Graf 已提交
1199

1200 1201 1202
	svm->msrpm = page_address(msrpm_pages);
	svm_vcpu_init_msrpm(svm->msrpm);

1203
	svm->nested.msrpm = page_address(nested_msrpm_pages);
1204
	svm_vcpu_init_msrpm(svm->nested.msrpm);
A
Alexander Graf 已提交
1205

1206 1207
	svm->vmcb = page_address(page);
	clear_page(svm->vmcb);
1208
	svm->vmcb_pa = __sme_set(page_to_pfn(page) << PAGE_SHIFT);
1209
	svm->asid_generation = 0;
P
Paolo Bonzini 已提交
1210
	init_vmcb(svm);
A
Avi Kivity 已提交
1211

1212
	svm_init_osvw(vcpu);
1213
	vcpu->arch.microcode_version = 0x01000065;
1214

1215
	return 0;
1216

1217 1218
free_page4:
	__free_page(hsave_page);
1219 1220 1221 1222 1223 1224
free_page3:
	__free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
free_page2:
	__free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
free_page1:
	__free_page(page);
1225
out:
1226
	return err;
A
Avi Kivity 已提交
1227 1228
}

1229 1230 1231 1232 1233 1234 1235 1236
static void svm_clear_current_vmcb(struct vmcb *vmcb)
{
	int i;

	for_each_online_cpu(i)
		cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
}

A
Avi Kivity 已提交
1237 1238
static void svm_free_vcpu(struct kvm_vcpu *vcpu)
{
1239 1240
	struct vcpu_svm *svm = to_svm(vcpu);

1241 1242 1243 1244 1245 1246 1247
	/*
	 * The vmcb page can be recycled, causing a false negative in
	 * svm_vcpu_load(). So, ensure that no logical CPU has this
	 * vmcb page recorded as its current vmcb.
	 */
	svm_clear_current_vmcb(svm->vmcb);

1248
	__free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
1249
	__free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
1250 1251
	__free_page(virt_to_page(svm->nested.hsave));
	__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
A
Avi Kivity 已提交
1252 1253
}

1254
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
A
Avi Kivity 已提交
1255
{
1256
	struct vcpu_svm *svm = to_svm(vcpu);
A
Ashok Raj 已提交
1257
	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
1258
	int i;
1259 1260

	if (unlikely(cpu != vcpu->cpu)) {
1261
		svm->asid_generation = 0;
1262
		mark_all_dirty(svm->vmcb);
1263
	}
1264

1265 1266 1267
#ifdef CONFIG_X86_64
	rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
#endif
1268 1269 1270 1271
	savesegment(fs, svm->host.fs);
	savesegment(gs, svm->host.gs);
	svm->host.ldt = kvm_read_ldt();

1272
	for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
1273
		rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
1274

1275 1276 1277 1278 1279 1280
	if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
		u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
		if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
			__this_cpu_write(current_tsc_ratio, tsc_ratio);
			wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
		}
1281
	}
P
Paolo Bonzini 已提交
1282 1283 1284
	/* This assumes that the kernel never uses MSR_TSC_AUX */
	if (static_cpu_has(X86_FEATURE_RDTSCP))
		wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
1285

A
Ashok Raj 已提交
1286 1287 1288 1289
	if (sd->current_vmcb != svm->vmcb) {
		sd->current_vmcb = svm->vmcb;
		indirect_branch_prediction_barrier();
	}
1290
	avic_vcpu_load(vcpu, cpu);
A
Avi Kivity 已提交
1291 1292 1293 1294
}

static void svm_vcpu_put(struct kvm_vcpu *vcpu)
{
1295
	struct vcpu_svm *svm = to_svm(vcpu);
1296 1297
	int i;

1298 1299
	avic_vcpu_put(vcpu);

1300
	++vcpu->stat.host_state_reload;
1301 1302 1303
	kvm_load_ldt(svm->host.ldt);
#ifdef CONFIG_X86_64
	loadsegment(fs, svm->host.fs);
1304
	wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase);
1305
	load_gs_index(svm->host.gs);
1306
#else
1307
#ifdef CONFIG_X86_32_LAZY_GS
1308
	loadsegment(gs, svm->host.gs);
1309
#endif
1310
#endif
1311
	for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
1312
		wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
A
Avi Kivity 已提交
1313 1314 1315 1316
}

static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
{
1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327
	struct vcpu_svm *svm = to_svm(vcpu);
	unsigned long rflags = svm->vmcb->save.rflags;

	if (svm->nmi_singlestep) {
		/* Hide our flags if they were not set by the guest */
		if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
			rflags &= ~X86_EFLAGS_TF;
		if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
			rflags &= ~X86_EFLAGS_RF;
	}
	return rflags;
A
Avi Kivity 已提交
1328 1329 1330 1331
}

static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
1332 1333 1334
	if (to_svm(vcpu)->nmi_singlestep)
		rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);

P
Paolo Bonzini 已提交
1335
       /*
A
Andrea Gelmini 已提交
1336
        * Any change of EFLAGS.VM is accompanied by a reload of SS
P
Paolo Bonzini 已提交
1337 1338 1339
        * (caused by either a task switch or an inter-privilege IRET),
        * so we do not need to update the CPL here.
        */
1340
	to_svm(vcpu)->vmcb->save.rflags = rflags;
A
Avi Kivity 已提交
1341 1342
}

A
Avi Kivity 已提交
1343 1344 1345 1346 1347
static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{
	switch (reg) {
	case VCPU_EXREG_PDPTR:
		BUG_ON(!npt_enabled);
1348
		load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
A
Avi Kivity 已提交
1349 1350
		break;
	default:
1351
		WARN_ON_ONCE(1);
A
Avi Kivity 已提交
1352 1353 1354
	}
}

1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373
static inline void svm_enable_vintr(struct vcpu_svm *svm)
{
	struct vmcb_control_area *control;

	/* The following fields are ignored when AVIC is enabled */
	WARN_ON(kvm_vcpu_apicv_active(&svm->vcpu));

	/*
	 * This is just a dummy VINTR to actually cause a vmexit to happen.
	 * Actual injection of virtual interrupts happens through EVENTINJ.
	 */
	control = &svm->vmcb->control;
	control->int_vector = 0x0;
	control->int_ctl &= ~V_INTR_PRIO_MASK;
	control->int_ctl |= V_IRQ_MASK |
		((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
	mark_dirty(svm->vmcb, VMCB_INTR);
}

1374 1375
static void svm_set_vintr(struct vcpu_svm *svm)
{
1376
	set_intercept(svm, INTERCEPT_VINTR);
1377 1378
	if (is_intercept(svm, INTERCEPT_VINTR))
		svm_enable_vintr(svm);
1379 1380 1381 1382
}

static void svm_clear_vintr(struct vcpu_svm *svm)
{
1383
	clr_intercept(svm, INTERCEPT_VINTR);
1384 1385 1386

	svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
	mark_dirty(svm->vmcb, VMCB_INTR);
1387 1388
}

A
Avi Kivity 已提交
1389 1390
static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
{
1391
	struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
A
Avi Kivity 已提交
1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403

	switch (seg) {
	case VCPU_SREG_CS: return &save->cs;
	case VCPU_SREG_DS: return &save->ds;
	case VCPU_SREG_ES: return &save->es;
	case VCPU_SREG_FS: return &save->fs;
	case VCPU_SREG_GS: return &save->gs;
	case VCPU_SREG_SS: return &save->ss;
	case VCPU_SREG_TR: return &save->tr;
	case VCPU_SREG_LDTR: return &save->ldtr;
	}
	BUG();
A
Al Viro 已提交
1404
	return NULL;
A
Avi Kivity 已提交
1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428
}

static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
{
	struct vmcb_seg *s = svm_seg(vcpu, seg);

	return s->base;
}

static void svm_get_segment(struct kvm_vcpu *vcpu,
			    struct kvm_segment *var, int seg)
{
	struct vmcb_seg *s = svm_seg(vcpu, seg);

	var->base = s->base;
	var->limit = s->limit;
	var->selector = s->selector;
	var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
	var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
	var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
	var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
	var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
	var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
	var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
1429 1430 1431 1432 1433 1434 1435 1436 1437 1438

	/*
	 * AMD CPUs circa 2014 track the G bit for all segments except CS.
	 * However, the SVM spec states that the G bit is not observed by the
	 * CPU, and some VMware virtual CPUs drop the G bit for all segments.
	 * So let's synthesize a legal G bit for all segments, this helps
	 * running KVM nested. It also helps cross-vendor migration, because
	 * Intel's vmentry has a check on the 'G' bit.
	 */
	var->g = s->limit > 0xfffff;
1439

J
Joerg Roedel 已提交
1440 1441
	/*
	 * AMD's VMCB does not have an explicit unusable field, so emulate it
1442 1443
	 * for cross vendor migration purposes by "not present"
	 */
1444
	var->unusable = !var->present;
1445

1446 1447 1448 1449 1450 1451
	switch (seg) {
	case VCPU_SREG_TR:
		/*
		 * Work around a bug where the busy flag in the tr selector
		 * isn't exposed
		 */
1452
		var->type |= 0x2;
1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467
		break;
	case VCPU_SREG_DS:
	case VCPU_SREG_ES:
	case VCPU_SREG_FS:
	case VCPU_SREG_GS:
		/*
		 * The accessed bit must always be set in the segment
		 * descriptor cache, although it can be cleared in the
		 * descriptor, the cached bit always remains at 1. Since
		 * Intel has a check on this, set it here to support
		 * cross-vendor migration.
		 */
		if (!var->unusable)
			var->type |= 0x1;
		break;
1468
	case VCPU_SREG_SS:
J
Joerg Roedel 已提交
1469 1470
		/*
		 * On AMD CPUs sometimes the DB bit in the segment
1471 1472 1473 1474 1475 1476
		 * descriptor is left as 1, although the whole segment has
		 * been made unusable. Clear it here to pass an Intel VMX
		 * entry check when cross vendor migrating.
		 */
		if (var->unusable)
			var->db = 0;
1477
		/* This is symmetric with svm_set_segment() */
J
Jan Kiszka 已提交
1478
		var->dpl = to_svm(vcpu)->vmcb->save.cpl;
1479
		break;
1480
	}
A
Avi Kivity 已提交
1481 1482
}

1483 1484 1485 1486 1487 1488 1489
static int svm_get_cpl(struct kvm_vcpu *vcpu)
{
	struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;

	return save->cpl;
}

1490
static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
A
Avi Kivity 已提交
1491
{
1492 1493
	struct vcpu_svm *svm = to_svm(vcpu);

1494 1495
	dt->size = svm->vmcb->save.idtr.limit;
	dt->address = svm->vmcb->save.idtr.base;
A
Avi Kivity 已提交
1496 1497
}

1498
static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
A
Avi Kivity 已提交
1499
{
1500 1501
	struct vcpu_svm *svm = to_svm(vcpu);

1502 1503
	svm->vmcb->save.idtr.limit = dt->size;
	svm->vmcb->save.idtr.base = dt->address ;
1504
	mark_dirty(svm->vmcb, VMCB_DT);
A
Avi Kivity 已提交
1505 1506
}

1507
static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
A
Avi Kivity 已提交
1508
{
1509 1510
	struct vcpu_svm *svm = to_svm(vcpu);

1511 1512
	dt->size = svm->vmcb->save.gdtr.limit;
	dt->address = svm->vmcb->save.gdtr.base;
A
Avi Kivity 已提交
1513 1514
}

1515
static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
A
Avi Kivity 已提交
1516
{
1517 1518
	struct vcpu_svm *svm = to_svm(vcpu);

1519 1520
	svm->vmcb->save.gdtr.limit = dt->size;
	svm->vmcb->save.gdtr.base = dt->address ;
1521
	mark_dirty(svm->vmcb, VMCB_DT);
A
Avi Kivity 已提交
1522 1523
}

1524 1525 1526 1527
static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
{
}

1528
static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
1529 1530 1531
{
}

A
Avi Kivity 已提交
1532 1533 1534 1535 1536
static void update_cr0_intercept(struct vcpu_svm *svm)
{
	ulong gcr0 = svm->vcpu.arch.cr0;
	u64 *hcr0 = &svm->vmcb->save.cr0;

1537 1538
	*hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
		| (gcr0 & SVM_CR0_SELECTIVE_MASK);
A
Avi Kivity 已提交
1539

1540
	mark_dirty(svm->vmcb, VMCB_CR);
A
Avi Kivity 已提交
1541

1542
	if (gcr0 == *hcr0) {
1543 1544
		clr_cr_intercept(svm, INTERCEPT_CR0_READ);
		clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
A
Avi Kivity 已提交
1545
	} else {
1546 1547
		set_cr_intercept(svm, INTERCEPT_CR0_READ);
		set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
A
Avi Kivity 已提交
1548 1549 1550
	}
}

1551
void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
A
Avi Kivity 已提交
1552
{
1553 1554
	struct vcpu_svm *svm = to_svm(vcpu);

1555
#ifdef CONFIG_X86_64
1556
	if (vcpu->arch.efer & EFER_LME) {
1557
		if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
1558
			vcpu->arch.efer |= EFER_LMA;
1559
			svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
A
Avi Kivity 已提交
1560 1561
		}

M
Mike Day 已提交
1562
		if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
1563
			vcpu->arch.efer &= ~EFER_LMA;
1564
			svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
A
Avi Kivity 已提交
1565 1566 1567
		}
	}
#endif
1568
	vcpu->arch.cr0 = cr0;
1569 1570 1571

	if (!npt_enabled)
		cr0 |= X86_CR0_PG | X86_CR0_WP;
1572

1573 1574 1575 1576 1577 1578 1579
	/*
	 * re-enable caching here because the QEMU bios
	 * does not do it - this results in some delay at
	 * reboot
	 */
	if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
		cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1580
	svm->vmcb->save.cr0 = cr0;
1581
	mark_dirty(svm->vmcb, VMCB_CR);
A
Avi Kivity 已提交
1582
	update_cr0_intercept(svm);
A
Avi Kivity 已提交
1583 1584
}

1585
int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
A
Avi Kivity 已提交
1586
{
1587
	unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
1588 1589
	unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;

1590 1591 1592
	if (cr4 & X86_CR4_VMXE)
		return 1;

1593
	if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
1594
		svm_flush_tlb(vcpu);
1595

1596 1597 1598
	vcpu->arch.cr4 = cr4;
	if (!npt_enabled)
		cr4 |= X86_CR4_PAE;
1599
	cr4 |= host_cr4_mce;
1600
	to_svm(vcpu)->vmcb->save.cr4 = cr4;
1601
	mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
1602
	return 0;
A
Avi Kivity 已提交
1603 1604 1605 1606 1607
}

static void svm_set_segment(struct kvm_vcpu *vcpu,
			    struct kvm_segment *var, int seg)
{
1608
	struct vcpu_svm *svm = to_svm(vcpu);
A
Avi Kivity 已提交
1609 1610 1611 1612 1613
	struct vmcb_seg *s = svm_seg(vcpu, seg);

	s->base = var->base;
	s->limit = var->limit;
	s->selector = var->selector;
1614 1615 1616 1617 1618 1619 1620 1621
	s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
	s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
	s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
	s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
	s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
	s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
	s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
	s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
P
Paolo Bonzini 已提交
1622 1623 1624 1625 1626 1627 1628 1629

	/*
	 * This is always accurate, except if SYSRET returned to a segment
	 * with SS.DPL != 3.  Intel does not have this quirk, and always
	 * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
	 * would entail passing the CPL to userspace and back.
	 */
	if (seg == VCPU_SREG_SS)
1630 1631
		/* This is symmetric with svm_get_segment() */
		svm->vmcb->save.cpl = (var->dpl & 3);
A
Avi Kivity 已提交
1632

1633
	mark_dirty(svm->vmcb, VMCB_SEG);
A
Avi Kivity 已提交
1634 1635
}

1636
static void update_bp_intercept(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
1637
{
J
Jan Kiszka 已提交
1638 1639
	struct vcpu_svm *svm = to_svm(vcpu);

1640
	clr_exception_intercept(svm, BP_VECTOR);
1641

J
Jan Kiszka 已提交
1642 1643
	if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
		if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1644
			set_exception_intercept(svm, BP_VECTOR);
J
Jan Kiszka 已提交
1645 1646
	} else
		vcpu->guest_debug = 0;
1647 1648
}

1649
static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
A
Avi Kivity 已提交
1650
{
1651 1652
	if (sd->next_asid > sd->max_asid) {
		++sd->asid_generation;
1653
		sd->next_asid = sd->min_asid;
1654
		svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
A
Avi Kivity 已提交
1655 1656
	}

1657 1658
	svm->asid_generation = sd->asid_generation;
	svm->vmcb->control.asid = sd->next_asid++;
1659 1660

	mark_dirty(svm->vmcb, VMCB_ASID);
A
Avi Kivity 已提交
1661 1662
}

1663
static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value)
J
Jan Kiszka 已提交
1664
{
1665
	struct vmcb *vmcb = svm->vmcb;
J
Jan Kiszka 已提交
1666

1667 1668 1669 1670
	if (unlikely(value != vmcb->save.dr6)) {
		vmcb->save.dr6 = value;
		mark_dirty(vmcb, VMCB_DR);
	}
J
Jan Kiszka 已提交
1671 1672
}

1673 1674 1675 1676 1677 1678 1679 1680
static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	get_debugreg(vcpu->arch.db[0], 0);
	get_debugreg(vcpu->arch.db[1], 1);
	get_debugreg(vcpu->arch.db[2], 2);
	get_debugreg(vcpu->arch.db[3], 3);
1681 1682 1683 1684
	/*
	 * We cannot reset svm->vmcb->save.dr6 to DR6_FIXED_1|DR6_RTM here,
	 * because db_interception might need it.  We can do it before vmentry.
	 */
1685
	vcpu->arch.dr6 = svm->vmcb->save.dr6;
1686 1687 1688 1689 1690
	vcpu->arch.dr7 = svm->vmcb->save.dr7;
	vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
	set_dr_intercepts(svm);
}

1691
static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
A
Avi Kivity 已提交
1692
{
1693 1694
	struct vcpu_svm *svm = to_svm(vcpu);

1695
	svm->vmcb->save.dr7 = value;
1696
	mark_dirty(svm->vmcb, VMCB_DR);
A
Avi Kivity 已提交
1697 1698
}

A
Avi Kivity 已提交
1699
static int pf_interception(struct vcpu_svm *svm)
A
Avi Kivity 已提交
1700
{
1701
	u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
1702
	u64 error_code = svm->vmcb->control.exit_info_1;
A
Avi Kivity 已提交
1703

1704
	return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
1705 1706
			static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
			svm->vmcb->control.insn_bytes : NULL,
1707 1708 1709 1710 1711
			svm->vmcb->control.insn_len);
}

static int npf_interception(struct vcpu_svm *svm)
{
1712
	u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
1713 1714 1715 1716
	u64 error_code = svm->vmcb->control.exit_info_1;

	trace_kvm_page_fault(fault_address, error_code);
	return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
1717 1718
			static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
			svm->vmcb->control.insn_bytes : NULL,
1719
			svm->vmcb->control.insn_len);
A
Avi Kivity 已提交
1720 1721
}

A
Avi Kivity 已提交
1722
static int db_interception(struct vcpu_svm *svm)
J
Jan Kiszka 已提交
1723
{
A
Avi Kivity 已提交
1724
	struct kvm_run *kvm_run = svm->vcpu.run;
1725
	struct kvm_vcpu *vcpu = &svm->vcpu;
A
Avi Kivity 已提交
1726

J
Jan Kiszka 已提交
1727
	if (!(svm->vcpu.guest_debug &
1728
	      (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
J
Jan Kiszka 已提交
1729
		!svm->nmi_singlestep) {
1730 1731
		u32 payload = (svm->vmcb->save.dr6 ^ DR6_RTM) & ~DR6_FIXED_1;
		kvm_queue_exception_p(&svm->vcpu, DB_VECTOR, payload);
J
Jan Kiszka 已提交
1732 1733
		return 1;
	}
1734

J
Jan Kiszka 已提交
1735
	if (svm->nmi_singlestep) {
1736
		disable_nmi_singlestep(svm);
1737 1738
		/* Make sure we check for pending NMIs upon entry */
		kvm_make_request(KVM_REQ_EVENT, vcpu);
1739 1740 1741
	}

	if (svm->vcpu.guest_debug &
J
Joerg Roedel 已提交
1742
	    (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
1743
		kvm_run->exit_reason = KVM_EXIT_DEBUG;
1744 1745
		kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6;
		kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7;
1746 1747 1748 1749 1750 1751 1752
		kvm_run->debug.arch.pc =
			svm->vmcb->save.cs.base + svm->vmcb->save.rip;
		kvm_run->debug.arch.exception = DB_VECTOR;
		return 0;
	}

	return 1;
J
Jan Kiszka 已提交
1753 1754
}

A
Avi Kivity 已提交
1755
static int bp_interception(struct vcpu_svm *svm)
J
Jan Kiszka 已提交
1756
{
A
Avi Kivity 已提交
1757 1758
	struct kvm_run *kvm_run = svm->vcpu.run;

J
Jan Kiszka 已提交
1759 1760 1761 1762 1763 1764
	kvm_run->exit_reason = KVM_EXIT_DEBUG;
	kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
	kvm_run->debug.arch.exception = BP_VECTOR;
	return 0;
}

A
Avi Kivity 已提交
1765
static int ud_interception(struct vcpu_svm *svm)
1766
{
W
Wanpeng Li 已提交
1767
	return handle_ud(&svm->vcpu);
1768 1769
}

1770 1771 1772 1773 1774 1775
static int ac_interception(struct vcpu_svm *svm)
{
	kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0);
	return 1;
}

1776 1777 1778 1779 1780 1781 1782
static int gp_interception(struct vcpu_svm *svm)
{
	struct kvm_vcpu *vcpu = &svm->vcpu;
	u32 error_code = svm->vmcb->control.exit_info_1;

	WARN_ON_ONCE(!enable_vmware_backdoor);

1783 1784 1785 1786 1787 1788 1789 1790
	/*
	 * VMware backdoor emulation on #GP interception only handles IN{S},
	 * OUT{S}, and RDPMC, none of which generate a non-zero error code.
	 */
	if (error_code) {
		kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
		return 1;
	}
1791
	return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP);
1792 1793
}

1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832
static bool is_erratum_383(void)
{
	int err, i;
	u64 value;

	if (!erratum_383_found)
		return false;

	value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
	if (err)
		return false;

	/* Bit 62 may or may not be set for this mce */
	value &= ~(1ULL << 62);

	if (value != 0xb600000000010015ULL)
		return false;

	/* Clear MCi_STATUS registers */
	for (i = 0; i < 6; ++i)
		native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);

	value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
	if (!err) {
		u32 low, high;

		value &= ~(1ULL << 2);
		low    = lower_32_bits(value);
		high   = upper_32_bits(value);

		native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
	}

	/* Flush tlb to evict multi-match entries */
	__flush_tlb_all();

	return true;
}

1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851
/*
 * Trigger machine check on the host. We assume all the MSRs are already set up
 * by the CPU and that we still run on the same CPU as the MCE occurred on.
 * We pass a fake environment to the machine check handler because we want
 * the guest to be always treated like user space, no matter what context
 * it used internally.
 */
static void kvm_machine_check(void)
{
#if defined(CONFIG_X86_MCE)
	struct pt_regs regs = {
		.cs = 3, /* Fake ring 3 no matter what the guest ran on */
		.flags = X86_EFLAGS_IF,
	};

	do_machine_check(&regs, 0);
#endif
}

1852
static void svm_handle_mce(struct vcpu_svm *svm)
1853
{
1854 1855 1856 1857 1858 1859 1860
	if (is_erratum_383()) {
		/*
		 * Erratum 383 triggered. Guest state is corrupt so kill the
		 * guest.
		 */
		pr_err("KVM: Guest triggered AMD Erratum 383\n");

1861
		kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
1862 1863 1864 1865

		return;
	}

1866 1867 1868 1869
	/*
	 * On an #MC intercept the MCE handler is not called automatically in
	 * the host. So do it by hand here.
	 */
1870
	kvm_machine_check();
1871 1872 1873 1874
}

static int mc_interception(struct vcpu_svm *svm)
{
1875 1876 1877
	return 1;
}

A
Avi Kivity 已提交
1878
static int shutdown_interception(struct vcpu_svm *svm)
1879
{
A
Avi Kivity 已提交
1880 1881
	struct kvm_run *kvm_run = svm->vcpu.run;

1882 1883 1884 1885
	/*
	 * VMCB is undefined after a SHUTDOWN intercept
	 * so reinitialize it.
	 */
1886
	clear_page(svm->vmcb);
P
Paolo Bonzini 已提交
1887
	init_vmcb(svm);
1888 1889 1890 1891 1892

	kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
	return 0;
}

A
Avi Kivity 已提交
1893
static int io_interception(struct vcpu_svm *svm)
A
Avi Kivity 已提交
1894
{
1895
	struct kvm_vcpu *vcpu = &svm->vcpu;
M
Mike Day 已提交
1896
	u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
1897
	int size, in, string;
1898
	unsigned port;
A
Avi Kivity 已提交
1899

R
Rusty Russell 已提交
1900
	++svm->vcpu.stat.io_exits;
1901
	string = (io_info & SVM_IOIO_STR_MASK) != 0;
1902
	in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
1903
	if (string)
1904
		return kvm_emulate_instruction(vcpu, 0);
1905

1906 1907
	port = io_info >> 16;
	size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
1908 1909
	svm->next_rip = svm->vmcb->control.exit_info_2;

1910
	return kvm_fast_pio(&svm->vcpu, size, port, in);
A
Avi Kivity 已提交
1911 1912
}

A
Avi Kivity 已提交
1913
static int nmi_interception(struct vcpu_svm *svm)
1914 1915 1916 1917
{
	return 1;
}

A
Avi Kivity 已提交
1918
static int intr_interception(struct vcpu_svm *svm)
1919 1920 1921 1922 1923
{
	++svm->vcpu.stat.irq_exits;
	return 1;
}

A
Avi Kivity 已提交
1924
static int nop_on_interception(struct vcpu_svm *svm)
A
Avi Kivity 已提交
1925 1926 1927 1928
{
	return 1;
}

A
Avi Kivity 已提交
1929
static int halt_interception(struct vcpu_svm *svm)
A
Avi Kivity 已提交
1930
{
R
Rusty Russell 已提交
1931
	return kvm_emulate_halt(&svm->vcpu);
A
Avi Kivity 已提交
1932 1933
}

A
Avi Kivity 已提交
1934
static int vmmcall_interception(struct vcpu_svm *svm)
1935
{
1936
	return kvm_emulate_hypercall(&svm->vcpu);
1937 1938
}

A
Avi Kivity 已提交
1939
static int vmload_interception(struct vcpu_svm *svm)
1940
{
1941
	struct vmcb *nested_vmcb;
1942
	struct kvm_host_map map;
1943
	int ret;
1944

1945 1946 1947
	if (nested_svm_check_permissions(svm))
		return 1;

1948 1949 1950 1951
	ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
	if (ret) {
		if (ret == -EINVAL)
			kvm_inject_gp(&svm->vcpu, 0);
1952
		return 1;
1953 1954 1955
	}

	nested_vmcb = map.hva;
1956

1957
	ret = kvm_skip_emulated_instruction(&svm->vcpu);
1958

1959
	nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
1960
	kvm_vcpu_unmap(&svm->vcpu, &map, true);
1961

1962
	return ret;
1963 1964
}

A
Avi Kivity 已提交
1965
static int vmsave_interception(struct vcpu_svm *svm)
1966
{
1967
	struct vmcb *nested_vmcb;
1968
	struct kvm_host_map map;
1969
	int ret;
1970

1971 1972 1973
	if (nested_svm_check_permissions(svm))
		return 1;

1974 1975 1976 1977
	ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
	if (ret) {
		if (ret == -EINVAL)
			kvm_inject_gp(&svm->vcpu, 0);
1978
		return 1;
1979 1980 1981
	}

	nested_vmcb = map.hva;
1982

1983
	ret = kvm_skip_emulated_instruction(&svm->vcpu);
1984

1985
	nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
1986
	kvm_vcpu_unmap(&svm->vcpu, &map, true);
1987

1988
	return ret;
1989 1990
}

A
Avi Kivity 已提交
1991
static int vmrun_interception(struct vcpu_svm *svm)
A
Alexander Graf 已提交
1992 1993 1994 1995
{
	if (nested_svm_check_permissions(svm))
		return 1;

1996
	return nested_svm_vmrun(svm);
A
Alexander Graf 已提交
1997 1998
}

A
Avi Kivity 已提交
1999
static int stgi_interception(struct vcpu_svm *svm)
2000
{
2001 2002
	int ret;

2003 2004 2005
	if (nested_svm_check_permissions(svm))
		return 1;

2006 2007
	/*
	 * If VGIF is enabled, the STGI intercept is only added to
2008
	 * detect the opening of the SMI/NMI window; remove it now.
2009 2010 2011 2012
	 */
	if (vgif_enabled(svm))
		clr_intercept(svm, INTERCEPT_STGI);

2013
	ret = kvm_skip_emulated_instruction(&svm->vcpu);
2014
	kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2015

2016
	enable_gif(svm);
2017

2018
	return ret;
2019 2020
}

A
Avi Kivity 已提交
2021
static int clgi_interception(struct vcpu_svm *svm)
2022
{
2023 2024
	int ret;

2025 2026 2027
	if (nested_svm_check_permissions(svm))
		return 1;

2028
	ret = kvm_skip_emulated_instruction(&svm->vcpu);
2029

2030
	disable_gif(svm);
2031 2032

	/* After a CLGI no interrupts should come */
2033
	if (!kvm_vcpu_apicv_active(&svm->vcpu))
2034
		svm_clear_vintr(svm);
2035

2036
	return ret;
2037 2038
}

A
Avi Kivity 已提交
2039
static int invlpga_interception(struct vcpu_svm *svm)
A
Alexander Graf 已提交
2040 2041 2042
{
	struct kvm_vcpu *vcpu = &svm->vcpu;

2043 2044
	trace_kvm_invlpga(svm->vmcb->save.rip, kvm_rcx_read(&svm->vcpu),
			  kvm_rax_read(&svm->vcpu));
2045

A
Alexander Graf 已提交
2046
	/* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
2047
	kvm_mmu_invlpg(vcpu, kvm_rax_read(&svm->vcpu));
A
Alexander Graf 已提交
2048

2049
	return kvm_skip_emulated_instruction(&svm->vcpu);
A
Alexander Graf 已提交
2050 2051
}

2052 2053
static int skinit_interception(struct vcpu_svm *svm)
{
2054
	trace_kvm_skinit(svm->vmcb->save.rip, kvm_rax_read(&svm->vcpu));
2055 2056 2057 2058 2059

	kvm_queue_exception(&svm->vcpu, UD_VECTOR);
	return 1;
}

D
David Kaplan 已提交
2060 2061
static int wbinvd_interception(struct vcpu_svm *svm)
{
2062
	return kvm_emulate_wbinvd(&svm->vcpu);
D
David Kaplan 已提交
2063 2064
}

J
Joerg Roedel 已提交
2065 2066 2067
static int xsetbv_interception(struct vcpu_svm *svm)
{
	u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
2068
	u32 index = kvm_rcx_read(&svm->vcpu);
J
Joerg Roedel 已提交
2069 2070

	if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
2071
		return kvm_skip_emulated_instruction(&svm->vcpu);
J
Joerg Roedel 已提交
2072 2073 2074 2075 2076
	}

	return 1;
}

J
Jim Mattson 已提交
2077 2078 2079 2080 2081 2082
static int rdpru_interception(struct vcpu_svm *svm)
{
	kvm_queue_exception(&svm->vcpu, UD_VECTOR);
	return 1;
}

A
Avi Kivity 已提交
2083
static int task_switch_interception(struct vcpu_svm *svm)
A
Avi Kivity 已提交
2084
{
2085
	u16 tss_selector;
2086 2087 2088
	int reason;
	int int_type = svm->vmcb->control.exit_int_info &
		SVM_EXITINTINFO_TYPE_MASK;
2089
	int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
2090 2091 2092 2093
	uint32_t type =
		svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
	uint32_t idt_v =
		svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
2094 2095
	bool has_error_code = false;
	u32 error_code = 0;
2096 2097

	tss_selector = (u16)svm->vmcb->control.exit_info_1;
2098

2099 2100
	if (svm->vmcb->control.exit_info_2 &
	    (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
2101 2102 2103 2104
		reason = TASK_SWITCH_IRET;
	else if (svm->vmcb->control.exit_info_2 &
		 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
		reason = TASK_SWITCH_JMP;
2105
	else if (idt_v)
2106 2107 2108 2109
		reason = TASK_SWITCH_GATE;
	else
		reason = TASK_SWITCH_CALL;

2110 2111 2112 2113 2114 2115
	if (reason == TASK_SWITCH_GATE) {
		switch (type) {
		case SVM_EXITINTINFO_TYPE_NMI:
			svm->vcpu.arch.nmi_injected = false;
			break;
		case SVM_EXITINTINFO_TYPE_EXEPT:
2116 2117 2118 2119 2120 2121
			if (svm->vmcb->control.exit_info_2 &
			    (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
				has_error_code = true;
				error_code =
					(u32)svm->vmcb->control.exit_info_2;
			}
2122 2123 2124 2125 2126 2127 2128 2129 2130
			kvm_clear_exception_queue(&svm->vcpu);
			break;
		case SVM_EXITINTINFO_TYPE_INTR:
			kvm_clear_interrupt_queue(&svm->vcpu);
			break;
		default:
			break;
		}
	}
2131

2132 2133 2134
	if (reason != TASK_SWITCH_GATE ||
	    int_type == SVM_EXITINTINFO_TYPE_SOFT ||
	    (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
2135
	     (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) {
2136
		if (!skip_emulated_instruction(&svm->vcpu))
2137
			return 0;
2138
	}
2139

2140 2141 2142
	if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
		int_vec = -1;

2143
	return kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason,
2144
			       has_error_code, error_code);
A
Avi Kivity 已提交
2145 2146
}

A
Avi Kivity 已提交
2147
static int cpuid_interception(struct vcpu_svm *svm)
A
Avi Kivity 已提交
2148
{
2149
	return kvm_emulate_cpuid(&svm->vcpu);
A
Avi Kivity 已提交
2150 2151
}

A
Avi Kivity 已提交
2152
static int iret_interception(struct vcpu_svm *svm)
2153 2154
{
	++svm->vcpu.stat.nmi_window_exits;
2155
	clr_intercept(svm, INTERCEPT_IRET);
2156
	svm->vcpu.arch.hflags |= HF_IRET_MASK;
2157
	svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
2158
	kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2159 2160 2161
	return 1;
}

A
Avi Kivity 已提交
2162
static int invlpg_interception(struct vcpu_svm *svm)
M
Marcelo Tosatti 已提交
2163
{
2164
	if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2165
		return kvm_emulate_instruction(&svm->vcpu, 0);
2166 2167

	kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
2168
	return kvm_skip_emulated_instruction(&svm->vcpu);
M
Marcelo Tosatti 已提交
2169 2170
}

A
Avi Kivity 已提交
2171
static int emulate_on_interception(struct vcpu_svm *svm)
A
Avi Kivity 已提交
2172
{
2173
	return kvm_emulate_instruction(&svm->vcpu, 0);
A
Avi Kivity 已提交
2174 2175
}

B
Brijesh Singh 已提交
2176 2177
static int rsm_interception(struct vcpu_svm *svm)
{
2178
	return kvm_emulate_instruction_from_buffer(&svm->vcpu, rsm_ins_bytes, 2);
B
Brijesh Singh 已提交
2179 2180
}

A
Avi Kivity 已提交
2181 2182 2183 2184
static int rdpmc_interception(struct vcpu_svm *svm)
{
	int err;

2185
	if (!nrips)
A
Avi Kivity 已提交
2186 2187 2188
		return emulate_on_interception(svm);

	err = kvm_rdpmc(&svm->vcpu);
2189
	return kvm_complete_insn_gp(&svm->vcpu, err);
A
Avi Kivity 已提交
2190 2191
}

2192 2193
static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
					    unsigned long val)
2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215
{
	unsigned long cr0 = svm->vcpu.arch.cr0;
	bool ret = false;
	u64 intercept;

	intercept = svm->nested.intercept;

	if (!is_guest_mode(&svm->vcpu) ||
	    (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
		return false;

	cr0 &= ~SVM_CR0_SELECTIVE_MASK;
	val &= ~SVM_CR0_SELECTIVE_MASK;

	if (cr0 ^ val) {
		svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
		ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
	}

	return ret;
}

2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230
#define CR_VALID (1ULL << 63)

static int cr_interception(struct vcpu_svm *svm)
{
	int reg, cr;
	unsigned long val;
	int err;

	if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
		return emulate_on_interception(svm);

	if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
		return emulate_on_interception(svm);

	reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2231 2232 2233 2234
	if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
		cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
	else
		cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
2235 2236 2237 2238 2239 2240 2241

	err = 0;
	if (cr >= 16) { /* mov to cr */
		cr -= 16;
		val = kvm_register_read(&svm->vcpu, reg);
		switch (cr) {
		case 0:
2242 2243
			if (!check_selective_cr0_intercepted(svm, val))
				err = kvm_set_cr0(&svm->vcpu, val);
2244 2245 2246
			else
				return 1;

2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270
			break;
		case 3:
			err = kvm_set_cr3(&svm->vcpu, val);
			break;
		case 4:
			err = kvm_set_cr4(&svm->vcpu, val);
			break;
		case 8:
			err = kvm_set_cr8(&svm->vcpu, val);
			break;
		default:
			WARN(1, "unhandled write to CR%d", cr);
			kvm_queue_exception(&svm->vcpu, UD_VECTOR);
			return 1;
		}
	} else { /* mov from cr */
		switch (cr) {
		case 0:
			val = kvm_read_cr0(&svm->vcpu);
			break;
		case 2:
			val = svm->vcpu.arch.cr2;
			break;
		case 3:
2271
			val = kvm_read_cr3(&svm->vcpu);
2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285
			break;
		case 4:
			val = kvm_read_cr4(&svm->vcpu);
			break;
		case 8:
			val = kvm_get_cr8(&svm->vcpu);
			break;
		default:
			WARN(1, "unhandled read from CR%d", cr);
			kvm_queue_exception(&svm->vcpu, UD_VECTOR);
			return 1;
		}
		kvm_register_write(&svm->vcpu, reg, val);
	}
2286
	return kvm_complete_insn_gp(&svm->vcpu, err);
2287 2288
}

2289 2290 2291 2292 2293
static int dr_interception(struct vcpu_svm *svm)
{
	int reg, dr;
	unsigned long val;

2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304
	if (svm->vcpu.guest_debug == 0) {
		/*
		 * No more DR vmexits; force a reload of the debug registers
		 * and reenter on this instruction.  The next vmexit will
		 * retrieve the full state of the debug registers.
		 */
		clr_dr_intercepts(svm);
		svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
		return 1;
	}

2305 2306 2307 2308 2309 2310 2311
	if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
		return emulate_on_interception(svm);

	reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
	dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;

	if (dr >= 16) { /* mov to DRn */
2312 2313
		if (!kvm_require_dr(&svm->vcpu, dr - 16))
			return 1;
2314 2315 2316
		val = kvm_register_read(&svm->vcpu, reg);
		kvm_set_dr(&svm->vcpu, dr - 16, val);
	} else {
2317 2318 2319 2320
		if (!kvm_require_dr(&svm->vcpu, dr))
			return 1;
		kvm_get_dr(&svm->vcpu, dr, &val);
		kvm_register_write(&svm->vcpu, reg, val);
2321 2322
	}

2323
	return kvm_skip_emulated_instruction(&svm->vcpu);
2324 2325
}

A
Avi Kivity 已提交
2326
static int cr8_write_interception(struct vcpu_svm *svm)
2327
{
A
Avi Kivity 已提交
2328
	struct kvm_run *kvm_run = svm->vcpu.run;
A
Andre Przywara 已提交
2329
	int r;
A
Avi Kivity 已提交
2330

2331 2332
	u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
	/* instruction emulation calls kvm_set_cr8() */
2333
	r = cr_interception(svm);
2334
	if (lapic_in_kernel(&svm->vcpu))
2335
		return r;
2336
	if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
2337
		return r;
2338 2339 2340 2341
	kvm_run->exit_reason = KVM_EXIT_SET_TPR;
	return 0;
}

2342 2343
static int svm_get_msr_feature(struct kvm_msr_entry *msr)
{
2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355
	msr->data = 0;

	switch (msr->index) {
	case MSR_F10H_DECFG:
		if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
			msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
		break;
	default:
		return 1;
	}

	return 0;
2356 2357
}

2358
static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
A
Avi Kivity 已提交
2359
{
2360 2361
	struct vcpu_svm *svm = to_svm(vcpu);

2362
	switch (msr_info->index) {
B
Brian Gerst 已提交
2363
	case MSR_STAR:
2364
		msr_info->data = svm->vmcb->save.star;
A
Avi Kivity 已提交
2365
		break;
2366
#ifdef CONFIG_X86_64
A
Avi Kivity 已提交
2367
	case MSR_LSTAR:
2368
		msr_info->data = svm->vmcb->save.lstar;
A
Avi Kivity 已提交
2369 2370
		break;
	case MSR_CSTAR:
2371
		msr_info->data = svm->vmcb->save.cstar;
A
Avi Kivity 已提交
2372 2373
		break;
	case MSR_KERNEL_GS_BASE:
2374
		msr_info->data = svm->vmcb->save.kernel_gs_base;
A
Avi Kivity 已提交
2375 2376
		break;
	case MSR_SYSCALL_MASK:
2377
		msr_info->data = svm->vmcb->save.sfmask;
A
Avi Kivity 已提交
2378 2379 2380
		break;
#endif
	case MSR_IA32_SYSENTER_CS:
2381
		msr_info->data = svm->vmcb->save.sysenter_cs;
A
Avi Kivity 已提交
2382 2383
		break;
	case MSR_IA32_SYSENTER_EIP:
2384
		msr_info->data = svm->sysenter_eip;
A
Avi Kivity 已提交
2385 2386
		break;
	case MSR_IA32_SYSENTER_ESP:
2387
		msr_info->data = svm->sysenter_esp;
A
Avi Kivity 已提交
2388
		break;
P
Paolo Bonzini 已提交
2389 2390 2391 2392 2393
	case MSR_TSC_AUX:
		if (!boot_cpu_has(X86_FEATURE_RDTSCP))
			return 1;
		msr_info->data = svm->tsc_aux;
		break;
J
Joerg Roedel 已提交
2394 2395 2396 2397 2398
	/*
	 * Nobody will change the following 5 values in the VMCB so we can
	 * safely return them on rdmsr. They will always be 0 until LBRV is
	 * implemented.
	 */
2399
	case MSR_IA32_DEBUGCTLMSR:
2400
		msr_info->data = svm->vmcb->save.dbgctl;
2401 2402
		break;
	case MSR_IA32_LASTBRANCHFROMIP:
2403
		msr_info->data = svm->vmcb->save.br_from;
2404 2405
		break;
	case MSR_IA32_LASTBRANCHTOIP:
2406
		msr_info->data = svm->vmcb->save.br_to;
2407 2408
		break;
	case MSR_IA32_LASTINTFROMIP:
2409
		msr_info->data = svm->vmcb->save.last_excp_from;
2410 2411
		break;
	case MSR_IA32_LASTINTTOIP:
2412
		msr_info->data = svm->vmcb->save.last_excp_to;
2413
		break;
A
Alexander Graf 已提交
2414
	case MSR_VM_HSAVE_PA:
2415
		msr_info->data = svm->nested.hsave_msr;
A
Alexander Graf 已提交
2416
		break;
2417
	case MSR_VM_CR:
2418
		msr_info->data = svm->nested.vm_cr_msr;
2419
		break;
2420 2421
	case MSR_IA32_SPEC_CTRL:
		if (!msr_info->host_initiated &&
2422 2423
		    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
		    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) &&
2424 2425
		    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
		    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
2426 2427 2428 2429
			return 1;

		msr_info->data = svm->spec_ctrl;
		break;
2430 2431 2432 2433 2434 2435 2436
	case MSR_AMD64_VIRT_SPEC_CTRL:
		if (!msr_info->host_initiated &&
		    !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
			return 1;

		msr_info->data = svm->virt_spec_ctrl;
		break;
2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453
	case MSR_F15H_IC_CFG: {

		int family, model;

		family = guest_cpuid_family(vcpu);
		model  = guest_cpuid_model(vcpu);

		if (family < 0 || model < 0)
			return kvm_get_msr_common(vcpu, msr_info);

		msr_info->data = 0;

		if (family == 0x15 &&
		    (model >= 0x2 && model < 0x20))
			msr_info->data = 0x1E;
		}
		break;
2454 2455 2456
	case MSR_F10H_DECFG:
		msr_info->data = svm->msr_decfg;
		break;
A
Avi Kivity 已提交
2457
	default:
2458
		return kvm_get_msr_common(vcpu, msr_info);
A
Avi Kivity 已提交
2459 2460 2461 2462
	}
	return 0;
}

A
Avi Kivity 已提交
2463
static int rdmsr_interception(struct vcpu_svm *svm)
A
Avi Kivity 已提交
2464
{
2465
	return kvm_emulate_rdmsr(&svm->vcpu);
A
Avi Kivity 已提交
2466 2467
}

2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492
static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	int svm_dis, chg_mask;

	if (data & ~SVM_VM_CR_VALID_MASK)
		return 1;

	chg_mask = SVM_VM_CR_VALID_MASK;

	if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
		chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);

	svm->nested.vm_cr_msr &= ~chg_mask;
	svm->nested.vm_cr_msr |= (data & chg_mask);

	svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;

	/* check for svm_disable while efer.svme is set */
	if (svm_dis && (vcpu->arch.efer & EFER_SVME))
		return 1;

	return 0;
}

2493
static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
A
Avi Kivity 已提交
2494
{
2495 2496
	struct vcpu_svm *svm = to_svm(vcpu);

2497 2498
	u32 ecx = msr->index;
	u64 data = msr->data;
A
Avi Kivity 已提交
2499
	switch (ecx) {
P
Paolo Bonzini 已提交
2500 2501 2502 2503 2504 2505 2506
	case MSR_IA32_CR_PAT:
		if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
			return 1;
		vcpu->arch.pat = data;
		svm->vmcb->save.g_pat = data;
		mark_dirty(svm->vmcb, VMCB_NPT);
		break;
2507 2508
	case MSR_IA32_SPEC_CTRL:
		if (!msr->host_initiated &&
2509 2510
		    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
		    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) &&
2511 2512
		    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
		    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
2513 2514
			return 1;

2515
		if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534
			return 1;

		svm->spec_ctrl = data;
		if (!data)
			break;

		/*
		 * For non-nested:
		 * When it's written (to non-zero) for the first time, pass
		 * it through.
		 *
		 * For nested:
		 * The handling of the MSR bitmap for L2 guests is done in
		 * nested_svm_vmrun_msrpm.
		 * We update the L1 MSR bit as well since it will end up
		 * touching the MSR anyway now.
		 */
		set_msr_interception(svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
		break;
A
Ashok Raj 已提交
2535 2536
	case MSR_IA32_PRED_CMD:
		if (!msr->host_initiated &&
2537
		    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB))
A
Ashok Raj 已提交
2538 2539 2540 2541
			return 1;

		if (data & ~PRED_CMD_IBPB)
			return 1;
2542 2543
		if (!boot_cpu_has(X86_FEATURE_AMD_IBPB))
			return 1;
A
Ashok Raj 已提交
2544 2545 2546 2547 2548 2549
		if (!data)
			break;

		wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
		set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
		break;
2550 2551 2552 2553 2554 2555 2556 2557 2558 2559
	case MSR_AMD64_VIRT_SPEC_CTRL:
		if (!msr->host_initiated &&
		    !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
			return 1;

		if (data & ~SPEC_CTRL_SSBD)
			return 1;

		svm->virt_spec_ctrl = data;
		break;
B
Brian Gerst 已提交
2560
	case MSR_STAR:
2561
		svm->vmcb->save.star = data;
A
Avi Kivity 已提交
2562
		break;
2563
#ifdef CONFIG_X86_64
A
Avi Kivity 已提交
2564
	case MSR_LSTAR:
2565
		svm->vmcb->save.lstar = data;
A
Avi Kivity 已提交
2566 2567
		break;
	case MSR_CSTAR:
2568
		svm->vmcb->save.cstar = data;
A
Avi Kivity 已提交
2569 2570
		break;
	case MSR_KERNEL_GS_BASE:
2571
		svm->vmcb->save.kernel_gs_base = data;
A
Avi Kivity 已提交
2572 2573
		break;
	case MSR_SYSCALL_MASK:
2574
		svm->vmcb->save.sfmask = data;
A
Avi Kivity 已提交
2575 2576 2577
		break;
#endif
	case MSR_IA32_SYSENTER_CS:
2578
		svm->vmcb->save.sysenter_cs = data;
A
Avi Kivity 已提交
2579 2580
		break;
	case MSR_IA32_SYSENTER_EIP:
2581
		svm->sysenter_eip = data;
2582
		svm->vmcb->save.sysenter_eip = data;
A
Avi Kivity 已提交
2583 2584
		break;
	case MSR_IA32_SYSENTER_ESP:
2585
		svm->sysenter_esp = data;
2586
		svm->vmcb->save.sysenter_esp = data;
A
Avi Kivity 已提交
2587
		break;
P
Paolo Bonzini 已提交
2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599
	case MSR_TSC_AUX:
		if (!boot_cpu_has(X86_FEATURE_RDTSCP))
			return 1;

		/*
		 * This is rare, so we update the MSR here instead of using
		 * direct_access_msrs.  Doing that would require a rdmsr in
		 * svm_vcpu_put.
		 */
		svm->tsc_aux = data;
		wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
		break;
2600
	case MSR_IA32_DEBUGCTLMSR:
2601
		if (!boot_cpu_has(X86_FEATURE_LBRV)) {
2602 2603
			vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
				    __func__, data);
2604 2605 2606 2607 2608 2609
			break;
		}
		if (data & DEBUGCTL_RESERVED_BITS)
			return 1;

		svm->vmcb->save.dbgctl = data;
2610
		mark_dirty(svm->vmcb, VMCB_LBR);
2611 2612 2613 2614
		if (data & (1ULL<<0))
			svm_enable_lbrv(svm);
		else
			svm_disable_lbrv(svm);
2615
		break;
A
Alexander Graf 已提交
2616
	case MSR_VM_HSAVE_PA:
2617
		svm->nested.hsave_msr = data;
2618
		break;
2619
	case MSR_VM_CR:
2620
		return svm_set_vm_cr(vcpu, data);
2621
	case MSR_VM_IGNNE:
2622
		vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
2623
		break;
2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641
	case MSR_F10H_DECFG: {
		struct kvm_msr_entry msr_entry;

		msr_entry.index = msr->index;
		if (svm_get_msr_feature(&msr_entry))
			return 1;

		/* Check the supported bits */
		if (data & ~msr_entry.data)
			return 1;

		/* Don't allow the guest to change a bit, #GP */
		if (!msr->host_initiated && (data ^ msr_entry.data))
			return 1;

		svm->msr_decfg = data;
		break;
	}
2642 2643 2644
	case MSR_IA32_APICBASE:
		if (kvm_vcpu_apicv_active(vcpu))
			avic_update_vapic_bar(to_svm(vcpu), data);
2645
		/* Fall through */
A
Avi Kivity 已提交
2646
	default:
2647
		return kvm_set_msr_common(vcpu, msr);
A
Avi Kivity 已提交
2648 2649 2650 2651
	}
	return 0;
}

A
Avi Kivity 已提交
2652
static int wrmsr_interception(struct vcpu_svm *svm)
A
Avi Kivity 已提交
2653
{
2654
	return kvm_emulate_wrmsr(&svm->vcpu);
A
Avi Kivity 已提交
2655 2656
}

A
Avi Kivity 已提交
2657
static int msr_interception(struct vcpu_svm *svm)
A
Avi Kivity 已提交
2658
{
R
Rusty Russell 已提交
2659
	if (svm->vmcb->control.exit_info_1)
A
Avi Kivity 已提交
2660
		return wrmsr_interception(svm);
A
Avi Kivity 已提交
2661
	else
A
Avi Kivity 已提交
2662
		return rdmsr_interception(svm);
A
Avi Kivity 已提交
2663 2664
}

A
Avi Kivity 已提交
2665
static int interrupt_window_interception(struct vcpu_svm *svm)
2666
{
2667
	kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2668
	svm_clear_vintr(svm);
2669 2670 2671 2672 2673 2674 2675 2676

	/*
	 * For AVIC, the only reason to end up here is ExtINTs.
	 * In this case AVIC was temporarily disabled for
	 * requesting the IRQ window and we have to re-enable it.
	 */
	svm_toggle_avic_for_irq_window(&svm->vcpu, true);

2677
	svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
2678
	mark_dirty(svm->vmcb, VMCB_INTR);
2679
	++svm->vcpu.stat.irq_window_exits;
2680 2681 2682
	return 1;
}

2683 2684
static int pause_interception(struct vcpu_svm *svm)
{
2685 2686 2687
	struct kvm_vcpu *vcpu = &svm->vcpu;
	bool in_kernel = (svm_get_cpl(vcpu) == 0);

2688 2689 2690
	if (pause_filter_thresh)
		grow_ple_window(vcpu);

2691
	kvm_vcpu_on_spin(vcpu, in_kernel);
2692 2693 2694
	return 1;
}

2695 2696
static int nop_interception(struct vcpu_svm *svm)
{
2697
	return kvm_skip_emulated_instruction(&(svm->vcpu));
2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711
}

static int monitor_interception(struct vcpu_svm *svm)
{
	printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
	return nop_interception(svm);
}

static int mwait_interception(struct vcpu_svm *svm)
{
	printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
	return nop_interception(svm);
}

2712
static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
2713 2714 2715 2716
	[SVM_EXIT_READ_CR0]			= cr_interception,
	[SVM_EXIT_READ_CR3]			= cr_interception,
	[SVM_EXIT_READ_CR4]			= cr_interception,
	[SVM_EXIT_READ_CR8]			= cr_interception,
2717
	[SVM_EXIT_CR0_SEL_WRITE]		= cr_interception,
2718
	[SVM_EXIT_WRITE_CR0]			= cr_interception,
2719 2720
	[SVM_EXIT_WRITE_CR3]			= cr_interception,
	[SVM_EXIT_WRITE_CR4]			= cr_interception,
J
Joerg Roedel 已提交
2721
	[SVM_EXIT_WRITE_CR8]			= cr8_write_interception,
2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737
	[SVM_EXIT_READ_DR0]			= dr_interception,
	[SVM_EXIT_READ_DR1]			= dr_interception,
	[SVM_EXIT_READ_DR2]			= dr_interception,
	[SVM_EXIT_READ_DR3]			= dr_interception,
	[SVM_EXIT_READ_DR4]			= dr_interception,
	[SVM_EXIT_READ_DR5]			= dr_interception,
	[SVM_EXIT_READ_DR6]			= dr_interception,
	[SVM_EXIT_READ_DR7]			= dr_interception,
	[SVM_EXIT_WRITE_DR0]			= dr_interception,
	[SVM_EXIT_WRITE_DR1]			= dr_interception,
	[SVM_EXIT_WRITE_DR2]			= dr_interception,
	[SVM_EXIT_WRITE_DR3]			= dr_interception,
	[SVM_EXIT_WRITE_DR4]			= dr_interception,
	[SVM_EXIT_WRITE_DR5]			= dr_interception,
	[SVM_EXIT_WRITE_DR6]			= dr_interception,
	[SVM_EXIT_WRITE_DR7]			= dr_interception,
J
Jan Kiszka 已提交
2738 2739
	[SVM_EXIT_EXCP_BASE + DB_VECTOR]	= db_interception,
	[SVM_EXIT_EXCP_BASE + BP_VECTOR]	= bp_interception,
2740
	[SVM_EXIT_EXCP_BASE + UD_VECTOR]	= ud_interception,
J
Joerg Roedel 已提交
2741 2742
	[SVM_EXIT_EXCP_BASE + PF_VECTOR]	= pf_interception,
	[SVM_EXIT_EXCP_BASE + MC_VECTOR]	= mc_interception,
2743
	[SVM_EXIT_EXCP_BASE + AC_VECTOR]	= ac_interception,
2744
	[SVM_EXIT_EXCP_BASE + GP_VECTOR]	= gp_interception,
J
Joerg Roedel 已提交
2745
	[SVM_EXIT_INTR]				= intr_interception,
2746
	[SVM_EXIT_NMI]				= nmi_interception,
A
Avi Kivity 已提交
2747 2748
	[SVM_EXIT_SMI]				= nop_on_interception,
	[SVM_EXIT_INIT]				= nop_on_interception,
2749
	[SVM_EXIT_VINTR]			= interrupt_window_interception,
A
Avi Kivity 已提交
2750
	[SVM_EXIT_RDPMC]			= rdpmc_interception,
A
Avi Kivity 已提交
2751
	[SVM_EXIT_CPUID]			= cpuid_interception,
2752
	[SVM_EXIT_IRET]                         = iret_interception,
2753
	[SVM_EXIT_INVD]                         = emulate_on_interception,
2754
	[SVM_EXIT_PAUSE]			= pause_interception,
A
Avi Kivity 已提交
2755
	[SVM_EXIT_HLT]				= halt_interception,
M
Marcelo Tosatti 已提交
2756
	[SVM_EXIT_INVLPG]			= invlpg_interception,
A
Alexander Graf 已提交
2757
	[SVM_EXIT_INVLPGA]			= invlpga_interception,
J
Joerg Roedel 已提交
2758
	[SVM_EXIT_IOIO]				= io_interception,
A
Avi Kivity 已提交
2759 2760
	[SVM_EXIT_MSR]				= msr_interception,
	[SVM_EXIT_TASK_SWITCH]			= task_switch_interception,
2761
	[SVM_EXIT_SHUTDOWN]			= shutdown_interception,
A
Alexander Graf 已提交
2762
	[SVM_EXIT_VMRUN]			= vmrun_interception,
2763
	[SVM_EXIT_VMMCALL]			= vmmcall_interception,
2764 2765
	[SVM_EXIT_VMLOAD]			= vmload_interception,
	[SVM_EXIT_VMSAVE]			= vmsave_interception,
2766 2767
	[SVM_EXIT_STGI]				= stgi_interception,
	[SVM_EXIT_CLGI]				= clgi_interception,
2768
	[SVM_EXIT_SKINIT]			= skinit_interception,
D
David Kaplan 已提交
2769
	[SVM_EXIT_WBINVD]                       = wbinvd_interception,
2770 2771
	[SVM_EXIT_MONITOR]			= monitor_interception,
	[SVM_EXIT_MWAIT]			= mwait_interception,
J
Joerg Roedel 已提交
2772
	[SVM_EXIT_XSETBV]			= xsetbv_interception,
J
Jim Mattson 已提交
2773
	[SVM_EXIT_RDPRU]			= rdpru_interception,
2774
	[SVM_EXIT_NPF]				= npf_interception,
B
Brijesh Singh 已提交
2775
	[SVM_EXIT_RSM]                          = rsm_interception,
2776 2777
	[SVM_EXIT_AVIC_INCOMPLETE_IPI]		= avic_incomplete_ipi_interception,
	[SVM_EXIT_AVIC_UNACCELERATED_ACCESS]	= avic_unaccelerated_access_interception,
A
Avi Kivity 已提交
2778 2779
};

2780
static void dump_vmcb(struct kvm_vcpu *vcpu)
2781 2782 2783 2784 2785
{
	struct vcpu_svm *svm = to_svm(vcpu);
	struct vmcb_control_area *control = &svm->vmcb->control;
	struct vmcb_save_area *save = &svm->vmcb->save;

2786 2787 2788 2789 2790
	if (!dump_invalid_vmcb) {
		pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
		return;
	}

2791
	pr_err("VMCB Control Area:\n");
2792 2793 2794 2795 2796 2797 2798
	pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff);
	pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16);
	pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff);
	pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
	pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
	pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
	pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
2799 2800
	pr_err("%-20s%d\n", "pause filter threshold:",
	       control->pause_filter_thresh);
2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815
	pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
	pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
	pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
	pr_err("%-20s%d\n", "asid:", control->asid);
	pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
	pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
	pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
	pr_err("%-20s%08x\n", "int_state:", control->int_state);
	pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
	pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
	pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
	pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
	pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
	pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
	pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
2816
	pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
2817 2818
	pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
	pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
2819
	pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext);
2820
	pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
2821 2822 2823
	pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page);
	pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
	pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
2824
	pr_err("VMCB State Save Area:\n");
2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "es:",
	       save->es.selector, save->es.attrib,
	       save->es.limit, save->es.base);
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "cs:",
	       save->cs.selector, save->cs.attrib,
	       save->cs.limit, save->cs.base);
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "ss:",
	       save->ss.selector, save->ss.attrib,
	       save->ss.limit, save->ss.base);
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "ds:",
	       save->ds.selector, save->ds.attrib,
	       save->ds.limit, save->ds.base);
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "fs:",
	       save->fs.selector, save->fs.attrib,
	       save->fs.limit, save->fs.base);
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "gs:",
	       save->gs.selector, save->gs.attrib,
	       save->gs.limit, save->gs.base);
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "gdtr:",
	       save->gdtr.selector, save->gdtr.attrib,
	       save->gdtr.limit, save->gdtr.base);
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "ldtr:",
	       save->ldtr.selector, save->ldtr.attrib,
	       save->ldtr.limit, save->ldtr.base);
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "idtr:",
	       save->idtr.selector, save->idtr.attrib,
	       save->idtr.limit, save->idtr.base);
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "tr:",
	       save->tr.selector, save->tr.attrib,
	       save->tr.limit, save->tr.base);
2865 2866
	pr_err("cpl:            %d                efer:         %016llx\n",
		save->cpl, save->efer);
2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "cr0:", save->cr0, "cr2:", save->cr2);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "cr3:", save->cr3, "cr4:", save->cr4);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "dr6:", save->dr6, "dr7:", save->dr7);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "rip:", save->rip, "rflags:", save->rflags);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "rsp:", save->rsp, "rax:", save->rax);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "star:", save->star, "lstar:", save->lstar);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "cstar:", save->cstar, "sfmask:", save->sfmask);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "kernel_gs_base:", save->kernel_gs_base,
	       "sysenter_cs:", save->sysenter_cs);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "sysenter_esp:", save->sysenter_esp,
	       "sysenter_eip:", save->sysenter_eip);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "br_from:", save->br_from, "br_to:", save->br_to);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "excp_from:", save->last_excp_from,
	       "excp_to:", save->last_excp_to);
2894 2895
}

2896 2897 2898 2899 2900 2901 2902 2903
static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
{
	struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;

	*info1 = control->exit_info_1;
	*info2 = control->exit_info_2;
}

2904 2905
static int handle_exit(struct kvm_vcpu *vcpu,
	enum exit_fastpath_completion exit_fastpath)
A
Avi Kivity 已提交
2906
{
2907
	struct vcpu_svm *svm = to_svm(vcpu);
A
Avi Kivity 已提交
2908
	struct kvm_run *kvm_run = vcpu->run;
2909
	u32 exit_code = svm->vmcb->control.exit_code;
A
Avi Kivity 已提交
2910

2911 2912
	trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);

2913
	if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
2914 2915 2916
		vcpu->arch.cr0 = svm->vmcb->save.cr0;
	if (npt_enabled)
		vcpu->arch.cr3 = svm->vmcb->save.cr3;
2917

2918 2919 2920 2921 2922 2923 2924
	if (unlikely(svm->nested.exit_required)) {
		nested_svm_vmexit(svm);
		svm->nested.exit_required = false;

		return 1;
	}

2925
	if (is_guest_mode(vcpu)) {
2926 2927
		int vmexit;

2928 2929 2930 2931
		trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
					svm->vmcb->control.exit_info_1,
					svm->vmcb->control.exit_info_2,
					svm->vmcb->control.exit_int_info,
2932 2933
					svm->vmcb->control.exit_int_info_err,
					KVM_ISA_SVM);
2934

2935 2936 2937 2938 2939 2940
		vmexit = nested_svm_exit_special(svm);

		if (vmexit == NESTED_EXIT_CONTINUE)
			vmexit = nested_svm_exit_handled(svm);

		if (vmexit == NESTED_EXIT_DONE)
2941 2942 2943
			return 1;
	}

2944 2945
	svm_complete_interrupts(svm);

2946 2947 2948 2949
	if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
		kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
		kvm_run->fail_entry.hardware_entry_failure_reason
			= svm->vmcb->control.exit_code;
2950
		dump_vmcb(vcpu);
2951 2952 2953
		return 0;
	}

2954
	if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
2955
	    exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
2956 2957
	    exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
	    exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
2958
		printk(KERN_ERR "%s: unexpected exit_int_info 0x%x "
A
Avi Kivity 已提交
2959
		       "exit_code 0x%x\n",
2960
		       __func__, svm->vmcb->control.exit_int_info,
A
Avi Kivity 已提交
2961 2962
		       exit_code);

2963 2964 2965 2966
	if (exit_fastpath == EXIT_FASTPATH_SKIP_EMUL_INS) {
		kvm_skip_emulated_instruction(vcpu);
		return 1;
	} else if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
J
Joe Perches 已提交
2967
	    || !svm_exit_handlers[exit_code]) {
2968 2969 2970 2971 2972 2973 2974 2975
		vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%x\n", exit_code);
		dump_vmcb(vcpu);
		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
		vcpu->run->internal.suberror =
			KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
		vcpu->run->internal.ndata = 1;
		vcpu->run->internal.data[0] = exit_code;
		return 0;
A
Avi Kivity 已提交
2976 2977
	}

2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989
#ifdef CONFIG_RETPOLINE
	if (exit_code == SVM_EXIT_MSR)
		return msr_interception(svm);
	else if (exit_code == SVM_EXIT_VINTR)
		return interrupt_window_interception(svm);
	else if (exit_code == SVM_EXIT_INTR)
		return intr_interception(svm);
	else if (exit_code == SVM_EXIT_HLT)
		return halt_interception(svm);
	else if (exit_code == SVM_EXIT_NPF)
		return npf_interception(svm);
#endif
A
Avi Kivity 已提交
2990
	return svm_exit_handlers[exit_code](svm);
A
Avi Kivity 已提交
2991 2992 2993 2994 2995 2996
}

static void reload_tss(struct kvm_vcpu *vcpu)
{
	int cpu = raw_smp_processor_id();

2997 2998
	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
	sd->tss_desc->type = 9; /* available 32/64-bit TSS */
A
Avi Kivity 已提交
2999 3000 3001
	load_TR_desc();
}

R
Rusty Russell 已提交
3002
static void pre_svm_run(struct vcpu_svm *svm)
A
Avi Kivity 已提交
3003 3004 3005
{
	int cpu = raw_smp_processor_id();

3006
	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
A
Avi Kivity 已提交
3007

3008 3009 3010
	if (sev_guest(svm->vcpu.kvm))
		return pre_sev_run(svm, cpu);

3011
	/* FIXME: handle wraparound of asid_generation */
3012 3013
	if (svm->asid_generation != sd->asid_generation)
		new_asid(svm, sd);
A
Avi Kivity 已提交
3014 3015
}

3016 3017 3018 3019 3020 3021
static void svm_inject_nmi(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
	vcpu->arch.hflags |= HF_NMI_MASK;
3022
	set_intercept(svm, INTERCEPT_IRET);
3023 3024
	++vcpu->stat.nmi_injections;
}
A
Avi Kivity 已提交
3025

3026
static void svm_set_irq(struct kvm_vcpu *vcpu)
E
Eddie Dong 已提交
3027 3028 3029
{
	struct vcpu_svm *svm = to_svm(vcpu);

3030
	BUG_ON(!(gif_set(svm)));
3031

3032 3033 3034
	trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
	++vcpu->stat.irq_injections;

3035 3036
	svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
		SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
E
Eddie Dong 已提交
3037 3038
}

3039
static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3040 3041 3042
{
	struct vcpu_svm *svm = to_svm(vcpu);

3043
	if (svm_nested_virtualize_tpr(vcpu))
3044 3045
		return;

3046 3047
	clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);

3048
	if (irr == -1)
3049 3050
		return;

3051
	if (tpr >= irr)
3052
		set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
3053
}
3054

3055
bool svm_nmi_blocked(struct kvm_vcpu *vcpu)
3056 3057 3058
{
	struct vcpu_svm *svm = to_svm(vcpu);
	struct vmcb *vmcb = svm->vmcb;
3059
	bool ret;
3060

3061
	if (!gif_set(svm))
3062 3063
		return true;

3064 3065 3066 3067 3068
	if (is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
		return false;

	ret = (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
	      (svm->vcpu.arch.hflags & HF_NMI_MASK);
J
Joerg Roedel 已提交
3069 3070

	return ret;
3071 3072
}

3073
static bool svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
3074 3075 3076 3077 3078
{
	struct vcpu_svm *svm = to_svm(vcpu);
	if (svm->nested.nested_run_pending)
		return false;

3079 3080 3081 3082 3083
	/* An NMI must not be injected into L2 if it's supposed to VM-Exit.  */
	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
		return false;

	return !svm_nmi_blocked(vcpu);
3084 3085
}

J
Jan Kiszka 已提交
3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098
static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
}

static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	if (masked) {
		svm->vcpu.arch.hflags |= HF_NMI_MASK;
3099
		set_intercept(svm, INTERCEPT_IRET);
J
Jan Kiszka 已提交
3100 3101
	} else {
		svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
3102
		clr_intercept(svm, INTERCEPT_IRET);
J
Jan Kiszka 已提交
3103 3104 3105
	}
}

3106
bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
3107 3108 3109
{
	struct vcpu_svm *svm = to_svm(vcpu);
	struct vmcb *vmcb = svm->vmcb;
3110

3111
	if (!gif_set(svm))
3112
		return true;
3113

3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129
	if (is_guest_mode(vcpu)) {
		/* As long as interrupts are being delivered...  */
		if ((svm->vcpu.arch.hflags & HF_VINTR_MASK)
		    ? !(svm->vcpu.arch.hflags & HF_HIF_MASK)
		    : !(kvm_get_rflags(vcpu) & X86_EFLAGS_IF))
			return true;

		/* ... vmexits aren't blocked by the interrupt shadow  */
		if (nested_exit_on_intr(svm))
			return false;
	} else {
		if (!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF))
			return true;
	}

	return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK);
3130 3131
}

3132
static bool svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
3133 3134 3135 3136 3137
{
	struct vcpu_svm *svm = to_svm(vcpu);
	if (svm->nested.nested_run_pending)
		return false;

3138 3139 3140 3141 3142 3143 3144 3145
	/*
	 * An IRQ must not be injected into L2 if it's supposed to VM-Exit,
	 * e.g. if the IRQ arrived asynchronously after checking nested events.
	 */
	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm))
		return false;

	return !svm_interrupt_blocked(vcpu);
3146 3147
}

3148
static void enable_irq_window(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
3149
{
3150 3151
	struct vcpu_svm *svm = to_svm(vcpu);

J
Joerg Roedel 已提交
3152 3153 3154 3155
	/*
	 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
	 * 1, because that's a separate STGI/VMRUN intercept.  The next time we
	 * get that intercept, this function will be called again though and
3156 3157 3158
	 * we'll get the vintr intercept. However, if the vGIF feature is
	 * enabled, the STGI interception will not occur. Enable the irq
	 * window under the assumption that the hardware will set the GIF.
J
Joerg Roedel 已提交
3159
	 */
3160
	if (vgif_enabled(svm) || gif_set(svm)) {
3161 3162 3163 3164 3165 3166 3167
		/*
		 * IRQ window is not needed when AVIC is enabled,
		 * unless we have pending ExtINT since it cannot be injected
		 * via AVIC. In such case, we need to temporarily disable AVIC,
		 * and fallback to injecting IRQ via V_IRQ.
		 */
		svm_toggle_avic_for_irq_window(vcpu, false);
3168 3169
		svm_set_vintr(svm);
	}
3170 3171
}

3172
static void enable_nmi_window(struct kvm_vcpu *vcpu)
3173
{
3174
	struct vcpu_svm *svm = to_svm(vcpu);
3175

3176 3177
	if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
	    == HF_NMI_MASK)
3178
		return; /* IRET will cause a vm exit */
3179

3180 3181 3182
	if (!gif_set(svm)) {
		if (vgif_enabled(svm))
			set_intercept(svm, INTERCEPT_STGI);
3183
		return; /* STGI will cause a vm exit */
3184
	}
3185

J
Joerg Roedel 已提交
3186 3187 3188 3189
	/*
	 * Something prevents NMI from been injected. Single step over possible
	 * problem (IRET or exception injection or interrupt shadow)
	 */
3190
	svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu);
J
Jan Kiszka 已提交
3191
	svm->nmi_singlestep = true;
3192
	svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
3193 3194
}

3195 3196 3197 3198 3199
static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
{
	return 0;
}

3200 3201 3202 3203 3204
static int svm_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
{
	return 0;
}

3205
void svm_flush_tlb(struct kvm_vcpu *vcpu)
3206
{
3207 3208
	struct vcpu_svm *svm = to_svm(vcpu);

3209 3210 3211 3212 3213 3214 3215
	/*
	 * Flush only the current ASID even if the TLB flush was invoked via
	 * kvm_flush_remote_tlbs().  Although flushing remote TLBs requires all
	 * ASIDs to be flushed, KVM uses a single ASID for L1 and L2, and
	 * unconditionally does a TLB flush on both nested VM-Enter and nested
	 * VM-Exit (via kvm_mmu_reset_context()).
	 */
3216 3217 3218 3219
	if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
		svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
	else
		svm->asid_generation--;
3220 3221
}

3222 3223 3224 3225 3226 3227 3228
static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	invlpga(gva, svm->vmcb->control.asid);
}

3229 3230 3231 3232
static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
{
}

3233 3234 3235 3236
static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);

3237
	if (svm_nested_virtualize_tpr(vcpu))
3238 3239
		return;

3240
	if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
3241
		int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
3242
		kvm_set_cr8(vcpu, cr8);
3243 3244 3245
	}
}

3246 3247 3248 3249 3250
static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	u64 cr8;

3251 3252
	if (svm_nested_virtualize_tpr(vcpu) ||
	    kvm_vcpu_apicv_active(vcpu))
3253 3254
		return;

3255 3256 3257 3258 3259
	cr8 = kvm_get_cr8(vcpu);
	svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
	svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
}

3260 3261 3262 3263 3264
static void svm_complete_interrupts(struct vcpu_svm *svm)
{
	u8 vector;
	int type;
	u32 exitintinfo = svm->vmcb->control.exit_int_info;
3265 3266 3267
	unsigned int3_injected = svm->int3_injected;

	svm->int3_injected = 0;
3268

3269 3270 3271 3272 3273 3274
	/*
	 * If we've made progress since setting HF_IRET_MASK, we've
	 * executed an IRET and can allow NMI injection.
	 */
	if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
	    && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
3275
		svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
3276 3277
		kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
	}
3278

3279 3280 3281 3282 3283 3284 3285
	svm->vcpu.arch.nmi_injected = false;
	kvm_clear_exception_queue(&svm->vcpu);
	kvm_clear_interrupt_queue(&svm->vcpu);

	if (!(exitintinfo & SVM_EXITINTINFO_VALID))
		return;

3286 3287
	kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);

3288 3289 3290 3291 3292 3293 3294 3295
	vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
	type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;

	switch (type) {
	case SVM_EXITINTINFO_TYPE_NMI:
		svm->vcpu.arch.nmi_injected = true;
		break;
	case SVM_EXITINTINFO_TYPE_EXEPT:
3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306
		/*
		 * In case of software exceptions, do not reinject the vector,
		 * but re-execute the instruction instead. Rewind RIP first
		 * if we emulated INT3 before.
		 */
		if (kvm_exception_is_soft(vector)) {
			if (vector == BP_VECTOR && int3_injected &&
			    kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
				kvm_rip_write(&svm->vcpu,
					      kvm_rip_read(&svm->vcpu) -
					      int3_injected);
3307
			break;
3308
		}
3309 3310
		if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
			u32 err = svm->vmcb->control.exit_int_info_err;
3311
			kvm_requeue_exception_e(&svm->vcpu, vector, err);
3312 3313

		} else
3314
			kvm_requeue_exception(&svm->vcpu, vector);
3315 3316
		break;
	case SVM_EXITINTINFO_TYPE_INTR:
3317
		kvm_queue_interrupt(&svm->vcpu, vector, false);
3318 3319 3320 3321 3322 3323
		break;
	default:
		break;
	}
}

A
Avi Kivity 已提交
3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334
static void svm_cancel_injection(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	struct vmcb_control_area *control = &svm->vmcb->control;

	control->exit_int_info = control->event_inj;
	control->exit_int_info_err = control->event_inj_err;
	control->event_inj = 0;
	svm_complete_interrupts(svm);
}

3335 3336 3337 3338 3339 3340 3341 3342 3343 3344
static enum exit_fastpath_completion svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
{
	if (!is_guest_mode(vcpu) &&
	    to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
	    to_svm(vcpu)->vmcb->control.exit_info_1)
		return handle_fastpath_set_msr_irqoff(vcpu);

	return EXIT_FASTPATH_NONE;
}

3345
void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
3346

3347
static enum exit_fastpath_completion svm_vcpu_run(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
3348
{
3349
	enum exit_fastpath_completion exit_fastpath;
3350
	struct vcpu_svm *svm = to_svm(vcpu);
3351

3352 3353 3354 3355
	svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
	svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
	svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];

3356 3357 3358 3359 3360
	/*
	 * A vmexit emulation is required before the vcpu can be executed
	 * again.
	 */
	if (unlikely(svm->nested.exit_required))
3361
		return EXIT_FASTPATH_NONE;
3362

3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378
	/*
	 * Disable singlestep if we're injecting an interrupt/exception.
	 * We don't want our modified rflags to be pushed on the stack where
	 * we might not be able to easily reset them if we disabled NMI
	 * singlestep later.
	 */
	if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
		/*
		 * Event injection happens before external interrupts cause a
		 * vmexit and interrupts are disabled here, so smp_send_reschedule
		 * is enough to force an immediate vmexit.
		 */
		disable_nmi_singlestep(svm);
		smp_send_reschedule(vcpu->cpu);
	}

R
Rusty Russell 已提交
3379
	pre_svm_run(svm);
A
Avi Kivity 已提交
3380

3381 3382
	sync_lapic_to_cr8(vcpu);

3383
	svm->vmcb->save.cr2 = vcpu->arch.cr2;
A
Avi Kivity 已提交
3384

3385 3386 3387 3388 3389 3390 3391 3392 3393
	/*
	 * Run with all-zero DR6 unless needed, so that we can get the exact cause
	 * of a #DB.
	 */
	if (unlikely(svm->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
		svm_set_dr6(svm, vcpu->arch.dr6);
	else
		svm_set_dr6(svm, DR6_FIXED_1 | DR6_RTM);

3394
	clgi();
3395
	kvm_load_guest_xsave_state(vcpu);
3396

3397 3398 3399 3400
	if (lapic_in_kernel(vcpu) &&
		vcpu->arch.apic->lapic_timer.timer_advance_ns)
		kvm_wait_lapic_expire(vcpu);

3401 3402 3403 3404 3405 3406
	/*
	 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
	 * it's non-zero. Since vmentry is serialising on affected CPUs, there
	 * is no need to worry about the conditional branch over the wrmsr
	 * being speculatively taken.
	 */
3407
	x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
3408

3409
	__svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
A
Avi Kivity 已提交
3410

3411 3412 3413 3414 3415 3416 3417 3418 3419
#ifdef CONFIG_X86_64
	wrmsrl(MSR_GS_BASE, svm->host.gs_base);
#else
	loadsegment(fs, svm->host.fs);
#ifndef CONFIG_X86_32_LAZY_GS
	loadsegment(gs, svm->host.gs);
#endif
#endif

3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434
	/*
	 * We do not use IBRS in the kernel. If this vCPU has used the
	 * SPEC_CTRL MSR it may have left it on; save the value and
	 * turn it off. This is much more efficient than blindly adding
	 * it to the atomic save/restore list. Especially as the former
	 * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
	 *
	 * For non-nested case:
	 * If the L01 MSR bitmap does not intercept the MSR, then we need to
	 * save it.
	 *
	 * For nested case:
	 * If the L02 MSR bitmap does not intercept the MSR, then we need to
	 * save it.
	 */
3435
	if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
3436
		svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
3437

A
Avi Kivity 已提交
3438 3439
	reload_tss(vcpu);

3440 3441
	x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);

3442 3443 3444 3445 3446
	vcpu->arch.cr2 = svm->vmcb->save.cr2;
	vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
	vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
	vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;

3447
	if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3448
		kvm_before_interrupt(&svm->vcpu);
3449

3450
	kvm_load_host_xsave_state(vcpu);
3451 3452 3453
	stgi();

	/* Any pending NMI will happen here */
3454
	exit_fastpath = svm_exit_handlers_fastpath(vcpu);
3455 3456

	if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3457
		kvm_after_interrupt(&svm->vcpu);
3458

3459 3460
	sync_cr8_to_lapic(vcpu);

3461
	svm->next_rip = 0;
3462
	svm->nested.nested_run_pending = 0;
3463

3464 3465
	svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;

G
Gleb Natapov 已提交
3466 3467
	/* if exit due to PF check for async PF */
	if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
3468
		svm->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
G
Gleb Natapov 已提交
3469

A
Avi Kivity 已提交
3470 3471 3472 3473
	if (npt_enabled) {
		vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
		vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
	}
3474 3475 3476 3477 3478 3479 3480 3481

	/*
	 * We need to handle MC intercepts here before the vcpu has a chance to
	 * change the physical cpu
	 */
	if (unlikely(svm->vmcb->control.exit_code ==
		     SVM_EXIT_EXCP_BASE + MC_VECTOR))
		svm_handle_mce(svm);
3482 3483

	mark_all_clean(svm->vmcb);
3484
	return exit_fastpath;
A
Avi Kivity 已提交
3485 3486
}

3487
static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long root)
A
Avi Kivity 已提交
3488
{
3489
	struct vcpu_svm *svm = to_svm(vcpu);
3490 3491
	bool update_guest_cr3 = true;
	unsigned long cr3;
3492

3493 3494 3495 3496
	cr3 = __sme_set(root);
	if (npt_enabled) {
		svm->vmcb->control.nested_cr3 = cr3;
		mark_dirty(svm->vmcb, VMCB_NPT);
3497

3498 3499 3500 3501 3502 3503 3504 3505
		/* Loading L2's CR3 is handled by enter_svm_guest_mode.  */
		if (is_guest_mode(vcpu))
			update_guest_cr3 = false;
		else if (test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
			cr3 = vcpu->arch.cr3;
		else /* CR3 is already up-to-date.  */
			update_guest_cr3 = false;
	}
3506

3507 3508 3509 3510
	if (update_guest_cr3) {
		svm->vmcb->save.cr3 = cr3;
		mark_dirty(svm->vmcb, VMCB_CR);
	}
3511 3512
}

A
Avi Kivity 已提交
3513 3514
static int is_disabled(void)
{
3515 3516 3517 3518 3519 3520
	u64 vm_cr;

	rdmsrl(MSR_VM_CR, vm_cr);
	if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
		return 1;

A
Avi Kivity 已提交
3521 3522 3523
	return 0;
}

I
Ingo Molnar 已提交
3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534
static void
svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
{
	/*
	 * Patch in the VMMCALL instruction:
	 */
	hypercall[0] = 0x0f;
	hypercall[1] = 0x01;
	hypercall[2] = 0xd9;
}

3535
static int __init svm_check_processor_compat(void)
Y
Yang, Sheng 已提交
3536
{
3537
	return 0;
Y
Yang, Sheng 已提交
3538 3539
}

3540 3541 3542 3543 3544
static bool svm_cpu_has_accelerated_tpr(void)
{
	return false;
}

3545
static bool svm_has_emulated_msr(int index)
3546
{
3547 3548
	switch (index) {
	case MSR_IA32_MCG_EXT_CTL:
3549
	case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
3550 3551 3552 3553 3554
		return false;
	default:
		break;
	}

3555 3556 3557
	return true;
}

3558 3559 3560 3561 3562
static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
{
	return 0;
}

3563 3564
static void svm_cpuid_update(struct kvm_vcpu *vcpu)
{
3565 3566
	struct vcpu_svm *svm = to_svm(vcpu);

3567
	vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
3568
				    boot_cpu_has(X86_FEATURE_XSAVE) &&
3569 3570
				    boot_cpu_has(X86_FEATURE_XSAVES);

3571
	/* Update nrips enabled cache */
3572 3573
	svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) &&
			     guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
3574 3575 3576 3577

	if (!kvm_vcpu_apicv_active(vcpu))
		return;

3578 3579 3580 3581 3582 3583 3584
	/*
	 * AVIC does not work with an x2APIC mode guest. If the X2APIC feature
	 * is exposed to the guest, disable AVIC.
	 */
	if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC))
		kvm_request_apicv_update(vcpu->kvm, false,
					 APICV_INHIBIT_REASON_X2APIC);
3585 3586 3587 3588 3589 3590 3591 3592

	/*
	 * Currently, AVIC does not work with nested virtualization.
	 * So, we disable AVIC when cpuid for SVM is set in the L1 guest.
	 */
	if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM))
		kvm_request_apicv_update(vcpu->kvm, false,
					 APICV_INHIBIT_REASON_NESTED);
3593 3594
}

3595 3596 3597 3598 3599
static bool svm_has_wbinvd_exit(void)
{
	return true;
}

3600
#define PRE_EX(exit)  { .exit_code = (exit), \
3601
			.stage = X86_ICPT_PRE_EXCEPT, }
3602
#define POST_EX(exit) { .exit_code = (exit), \
3603
			.stage = X86_ICPT_POST_EXCEPT, }
3604
#define POST_MEM(exit) { .exit_code = (exit), \
3605
			.stage = X86_ICPT_POST_MEMACCESS, }
3606

3607
static const struct __x86_intercept {
3608 3609 3610 3611 3612 3613 3614 3615
	u32 exit_code;
	enum x86_intercept_stage stage;
} x86_intercept_map[] = {
	[x86_intercept_cr_read]		= POST_EX(SVM_EXIT_READ_CR0),
	[x86_intercept_cr_write]	= POST_EX(SVM_EXIT_WRITE_CR0),
	[x86_intercept_clts]		= POST_EX(SVM_EXIT_WRITE_CR0),
	[x86_intercept_lmsw]		= POST_EX(SVM_EXIT_WRITE_CR0),
	[x86_intercept_smsw]		= POST_EX(SVM_EXIT_READ_CR0),
3616 3617
	[x86_intercept_dr_read]		= POST_EX(SVM_EXIT_READ_DR0),
	[x86_intercept_dr_write]	= POST_EX(SVM_EXIT_WRITE_DR0),
3618 3619 3620 3621 3622 3623 3624 3625
	[x86_intercept_sldt]		= POST_EX(SVM_EXIT_LDTR_READ),
	[x86_intercept_str]		= POST_EX(SVM_EXIT_TR_READ),
	[x86_intercept_lldt]		= POST_EX(SVM_EXIT_LDTR_WRITE),
	[x86_intercept_ltr]		= POST_EX(SVM_EXIT_TR_WRITE),
	[x86_intercept_sgdt]		= POST_EX(SVM_EXIT_GDTR_READ),
	[x86_intercept_sidt]		= POST_EX(SVM_EXIT_IDTR_READ),
	[x86_intercept_lgdt]		= POST_EX(SVM_EXIT_GDTR_WRITE),
	[x86_intercept_lidt]		= POST_EX(SVM_EXIT_IDTR_WRITE),
3626 3627 3628 3629 3630 3631 3632 3633
	[x86_intercept_vmrun]		= POST_EX(SVM_EXIT_VMRUN),
	[x86_intercept_vmmcall]		= POST_EX(SVM_EXIT_VMMCALL),
	[x86_intercept_vmload]		= POST_EX(SVM_EXIT_VMLOAD),
	[x86_intercept_vmsave]		= POST_EX(SVM_EXIT_VMSAVE),
	[x86_intercept_stgi]		= POST_EX(SVM_EXIT_STGI),
	[x86_intercept_clgi]		= POST_EX(SVM_EXIT_CLGI),
	[x86_intercept_skinit]		= POST_EX(SVM_EXIT_SKINIT),
	[x86_intercept_invlpga]		= POST_EX(SVM_EXIT_INVLPGA),
3634 3635 3636
	[x86_intercept_rdtscp]		= POST_EX(SVM_EXIT_RDTSCP),
	[x86_intercept_monitor]		= POST_MEM(SVM_EXIT_MONITOR),
	[x86_intercept_mwait]		= POST_EX(SVM_EXIT_MWAIT),
3637 3638 3639 3640 3641 3642 3643 3644 3645
	[x86_intercept_invlpg]		= POST_EX(SVM_EXIT_INVLPG),
	[x86_intercept_invd]		= POST_EX(SVM_EXIT_INVD),
	[x86_intercept_wbinvd]		= POST_EX(SVM_EXIT_WBINVD),
	[x86_intercept_wrmsr]		= POST_EX(SVM_EXIT_MSR),
	[x86_intercept_rdtsc]		= POST_EX(SVM_EXIT_RDTSC),
	[x86_intercept_rdmsr]		= POST_EX(SVM_EXIT_MSR),
	[x86_intercept_rdpmc]		= POST_EX(SVM_EXIT_RDPMC),
	[x86_intercept_cpuid]		= PRE_EX(SVM_EXIT_CPUID),
	[x86_intercept_rsm]		= PRE_EX(SVM_EXIT_RSM),
3646 3647 3648 3649 3650 3651 3652
	[x86_intercept_pause]		= PRE_EX(SVM_EXIT_PAUSE),
	[x86_intercept_pushf]		= PRE_EX(SVM_EXIT_PUSHF),
	[x86_intercept_popf]		= PRE_EX(SVM_EXIT_POPF),
	[x86_intercept_intn]		= PRE_EX(SVM_EXIT_SWINT),
	[x86_intercept_iret]		= PRE_EX(SVM_EXIT_IRET),
	[x86_intercept_icebp]		= PRE_EX(SVM_EXIT_ICEBP),
	[x86_intercept_hlt]		= POST_EX(SVM_EXIT_HLT),
3653 3654 3655 3656
	[x86_intercept_in]		= POST_EX(SVM_EXIT_IOIO),
	[x86_intercept_ins]		= POST_EX(SVM_EXIT_IOIO),
	[x86_intercept_out]		= POST_EX(SVM_EXIT_IOIO),
	[x86_intercept_outs]		= POST_EX(SVM_EXIT_IOIO),
3657
	[x86_intercept_xsetbv]		= PRE_EX(SVM_EXIT_XSETBV),
3658 3659
};

3660
#undef PRE_EX
3661
#undef POST_EX
3662
#undef POST_MEM
3663

3664 3665
static int svm_check_intercept(struct kvm_vcpu *vcpu,
			       struct x86_instruction_info *info,
3666 3667
			       enum x86_intercept_stage stage,
			       struct x86_exception *exception)
3668
{
3669 3670 3671 3672 3673 3674 3675 3676 3677 3678
	struct vcpu_svm *svm = to_svm(vcpu);
	int vmexit, ret = X86EMUL_CONTINUE;
	struct __x86_intercept icpt_info;
	struct vmcb *vmcb = svm->vmcb;

	if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
		goto out;

	icpt_info = x86_intercept_map[info->intercept];

3679
	if (stage != icpt_info.stage)
3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693
		goto out;

	switch (icpt_info.exit_code) {
	case SVM_EXIT_READ_CR0:
		if (info->intercept == x86_intercept_cr_read)
			icpt_info.exit_code += info->modrm_reg;
		break;
	case SVM_EXIT_WRITE_CR0: {
		unsigned long cr0, val;
		u64 intercept;

		if (info->intercept == x86_intercept_cr_write)
			icpt_info.exit_code += info->modrm_reg;

3694 3695
		if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
		    info->intercept == x86_intercept_clts)
3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718
			break;

		intercept = svm->nested.intercept;

		if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
			break;

		cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
		val = info->src_val  & ~SVM_CR0_SELECTIVE_MASK;

		if (info->intercept == x86_intercept_lmsw) {
			cr0 &= 0xfUL;
			val &= 0xfUL;
			/* lmsw can't clear PE - catch this here */
			if (cr0 & X86_CR0_PE)
				val |= X86_CR0_PE;
		}

		if (cr0 ^ val)
			icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;

		break;
	}
3719 3720 3721 3722
	case SVM_EXIT_READ_DR0:
	case SVM_EXIT_WRITE_DR0:
		icpt_info.exit_code += info->modrm_reg;
		break;
3723 3724 3725 3726 3727 3728
	case SVM_EXIT_MSR:
		if (info->intercept == x86_intercept_wrmsr)
			vmcb->control.exit_info_1 = 1;
		else
			vmcb->control.exit_info_1 = 0;
		break;
3729 3730 3731 3732 3733 3734 3735
	case SVM_EXIT_PAUSE:
		/*
		 * We get this for NOP only, but pause
		 * is rep not, check this here
		 */
		if (info->rep_prefix != REPE_PREFIX)
			goto out;
3736
		break;
3737 3738 3739 3740 3741 3742
	case SVM_EXIT_IOIO: {
		u64 exit_info;
		u32 bytes;

		if (info->intercept == x86_intercept_in ||
		    info->intercept == x86_intercept_ins) {
3743 3744
			exit_info = ((info->src_val & 0xffff) << 16) |
				SVM_IOIO_TYPE_MASK;
3745
			bytes = info->dst_bytes;
3746
		} else {
3747
			exit_info = (info->dst_val & 0xffff) << 16;
3748
			bytes = info->src_bytes;
3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768
		}

		if (info->intercept == x86_intercept_outs ||
		    info->intercept == x86_intercept_ins)
			exit_info |= SVM_IOIO_STR_MASK;

		if (info->rep_prefix)
			exit_info |= SVM_IOIO_REP_MASK;

		bytes = min(bytes, 4u);

		exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;

		exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);

		vmcb->control.exit_info_1 = exit_info;
		vmcb->control.exit_info_2 = info->next_rip;

		break;
	}
3769 3770 3771 3772
	default:
		break;
	}

3773 3774 3775
	/* TODO: Advertise NRIPS to guest hypervisor unconditionally */
	if (static_cpu_has(X86_FEATURE_NRIPS))
		vmcb->control.next_rip  = info->next_rip;
3776 3777 3778 3779 3780 3781 3782 3783
	vmcb->control.exit_code = icpt_info.exit_code;
	vmexit = nested_svm_exit_handled(svm);

	ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
					   : X86EMUL_CONTINUE;

out:
	return ret;
3784 3785
}

3786
static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
3787 3788 3789
{
}

3790 3791
static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
{
3792 3793
	if (pause_filter_thresh)
		shrink_ple_window(vcpu);
3794 3795
}

3796 3797 3798 3799 3800 3801
static void svm_setup_mce(struct kvm_vcpu *vcpu)
{
	/* [63:9] are reserved. */
	vcpu->arch.mcg_cap &= 0x1ff;
}

3802
bool svm_smi_blocked(struct kvm_vcpu *vcpu)
3803
{
3804 3805 3806 3807
	struct vcpu_svm *svm = to_svm(vcpu);

	/* Per APM Vol.2 15.22.2 "Response to SMI" */
	if (!gif_set(svm))
3808 3809 3810 3811 3812
		return true;

	return is_smm(vcpu);
}

3813
static bool svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
3814 3815 3816
{
	struct vcpu_svm *svm = to_svm(vcpu);
	if (svm->nested.nested_run_pending)
3817
		return false;
3818

3819 3820 3821 3822
	/* An SMI must not be injected into L2 if it's supposed to VM-Exit.  */
	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm))
		return false;

3823
	return !svm_smi_blocked(vcpu);
3824 3825
}

3826 3827
static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
{
3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844
	struct vcpu_svm *svm = to_svm(vcpu);
	int ret;

	if (is_guest_mode(vcpu)) {
		/* FED8h - SVM Guest */
		put_smstate(u64, smstate, 0x7ed8, 1);
		/* FEE0h - SVM Guest VMCB Physical Address */
		put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb);

		svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
		svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
		svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];

		ret = nested_svm_vmexit(svm);
		if (ret)
			return ret;
	}
3845 3846 3847
	return 0;
}

3848
static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
3849
{
3850 3851
	struct vcpu_svm *svm = to_svm(vcpu);
	struct vmcb *nested_vmcb;
3852
	struct kvm_host_map map;
3853 3854
	u64 guest;
	u64 vmcb;
3855

3856 3857
	guest = GET_SMSTATE(u64, smstate, 0x7ed8);
	vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
3858

3859
	if (guest) {
3860
		if (kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb), &map) == -EINVAL)
3861
			return 1;
3862 3863
		nested_vmcb = map.hva;
		enter_svm_guest_mode(svm, vmcb, nested_vmcb, &map);
3864
	}
3865
	return 0;
3866 3867
}

3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880
static int enable_smi_window(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	if (!gif_set(svm)) {
		if (vgif_enabled(svm))
			set_intercept(svm, INTERCEPT_STGI);
		/* STGI will cause a vm exit */
		return 1;
	}
	return 0;
}

3881 3882
static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
{
3883 3884 3885 3886
	unsigned long cr4 = kvm_read_cr4(vcpu);
	bool smep = cr4 & X86_CR4_SMEP;
	bool smap = cr4 & X86_CR4_SMAP;
	bool is_user = svm_get_cpl(vcpu) == 3;
3887

3888 3889 3890 3891 3892 3893 3894
	/*
	 * If RIP is invalid, go ahead with emulation which will cause an
	 * internal error exit.
	 */
	if (!kvm_vcpu_gfn_to_memslot(vcpu, kvm_rip_read(vcpu) >> PAGE_SHIFT))
		return true;

3895
	/*
3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924
	 * Detect and workaround Errata 1096 Fam_17h_00_0Fh.
	 *
	 * Errata:
	 * When CPU raise #NPF on guest data access and vCPU CR4.SMAP=1, it is
	 * possible that CPU microcode implementing DecodeAssist will fail
	 * to read bytes of instruction which caused #NPF. In this case,
	 * GuestIntrBytes field of the VMCB on a VMEXIT will incorrectly
	 * return 0 instead of the correct guest instruction bytes.
	 *
	 * This happens because CPU microcode reading instruction bytes
	 * uses a special opcode which attempts to read data using CPL=0
	 * priviledges. The microcode reads CS:RIP and if it hits a SMAP
	 * fault, it gives up and returns no instruction bytes.
	 *
	 * Detection:
	 * We reach here in case CPU supports DecodeAssist, raised #NPF and
	 * returned 0 in GuestIntrBytes field of the VMCB.
	 * First, errata can only be triggered in case vCPU CR4.SMAP=1.
	 * Second, if vCPU CR4.SMEP=1, errata could only be triggered
	 * in case vCPU CPL==3 (Because otherwise guest would have triggered
	 * a SMEP fault instead of #NPF).
	 * Otherwise, vCPU CR4.SMEP=0, errata could be triggered by any vCPU CPL.
	 * As most guests enable SMAP if they have also enabled SMEP, use above
	 * logic in order to attempt minimize false-positive of detecting errata
	 * while still preserving all cases semantic correctness.
	 *
	 * Workaround:
	 * To determine what instruction the guest was executing, the hypervisor
	 * will have to decode the instruction at the instruction pointer.
3925 3926 3927 3928 3929 3930 3931 3932 3933 3934
	 *
	 * In non SEV guest, hypervisor will be able to read the guest
	 * memory to decode the instruction pointer when insn_len is zero
	 * so we return true to indicate that decoding is possible.
	 *
	 * But in the SEV guest, the guest memory is encrypted with the
	 * guest specific key and hypervisor will not be able to decode the
	 * instruction pointer so we will not able to workaround it. Lets
	 * print the error and request to kill the guest.
	 */
3935
	if (smap && (!smep || is_user)) {
3936 3937 3938
		if (!sev_guest(vcpu->kvm))
			return true;

3939
		pr_err_ratelimited("KVM: SEV Guest triggered AMD Erratum 1096\n");
3940 3941 3942 3943 3944 3945
		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
	}

	return false;
}

3946 3947 3948 3949 3950 3951 3952
static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	/*
	 * TODO: Last condition latch INIT signals on vCPU when
	 * vCPU is in guest-mode and vmcb12 defines intercept on INIT.
3953 3954 3955
	 * To properly emulate the INIT intercept,
	 * svm_check_nested_events() should call nested_svm_vmexit()
	 * if an INIT signal is pending.
3956 3957 3958 3959 3960
	 */
	return !gif_set(svm) ||
		   (svm->vmcb->control.intercept & (1ULL << INTERCEPT_INIT));
}

3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978
static void svm_vm_destroy(struct kvm *kvm)
{
	avic_vm_destroy(kvm);
	sev_vm_destroy(kvm);
}

static int svm_vm_init(struct kvm *kvm)
{
	if (avic) {
		int ret = avic_vm_init(kvm);
		if (ret)
			return ret;
	}

	kvm_apicv_init(kvm, avic);
	return 0;
}

3979
static struct kvm_x86_ops svm_x86_ops __initdata = {
3980
	.hardware_unsetup = svm_hardware_teardown,
A
Avi Kivity 已提交
3981 3982
	.hardware_enable = svm_hardware_enable,
	.hardware_disable = svm_hardware_disable,
3983
	.cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
3984
	.has_emulated_msr = svm_has_emulated_msr,
A
Avi Kivity 已提交
3985 3986 3987

	.vcpu_create = svm_create_vcpu,
	.vcpu_free = svm_free_vcpu,
3988
	.vcpu_reset = svm_vcpu_reset,
A
Avi Kivity 已提交
3989

3990
	.vm_size = sizeof(struct kvm_svm),
3991
	.vm_init = svm_vm_init,
B
Brijesh Singh 已提交
3992
	.vm_destroy = svm_vm_destroy,
3993

3994
	.prepare_guest_switch = svm_prepare_guest_switch,
A
Avi Kivity 已提交
3995 3996
	.vcpu_load = svm_vcpu_load,
	.vcpu_put = svm_vcpu_put,
3997 3998
	.vcpu_blocking = svm_vcpu_blocking,
	.vcpu_unblocking = svm_vcpu_unblocking,
A
Avi Kivity 已提交
3999

4000
	.update_bp_intercept = update_bp_intercept,
4001
	.get_msr_feature = svm_get_msr_feature,
A
Avi Kivity 已提交
4002 4003 4004 4005 4006
	.get_msr = svm_get_msr,
	.set_msr = svm_set_msr,
	.get_segment_base = svm_get_segment_base,
	.get_segment = svm_get_segment,
	.set_segment = svm_set_segment,
4007
	.get_cpl = svm_get_cpl,
4008
	.get_cs_db_l_bits = kvm_get_cs_db_l_bits,
4009
	.decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
4010
	.decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
A
Avi Kivity 已提交
4011 4012 4013 4014 4015 4016 4017
	.set_cr0 = svm_set_cr0,
	.set_cr4 = svm_set_cr4,
	.set_efer = svm_set_efer,
	.get_idt = svm_get_idt,
	.set_idt = svm_set_idt,
	.get_gdt = svm_get_gdt,
	.set_gdt = svm_set_gdt,
4018
	.set_dr7 = svm_set_dr7,
4019
	.sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
A
Avi Kivity 已提交
4020
	.cache_reg = svm_cache_reg,
A
Avi Kivity 已提交
4021 4022
	.get_rflags = svm_get_rflags,
	.set_rflags = svm_set_rflags,
4023

4024
	.tlb_flush_all = svm_flush_tlb,
4025
	.tlb_flush_current = svm_flush_tlb,
4026
	.tlb_flush_gva = svm_flush_tlb_gva,
4027
	.tlb_flush_guest = svm_flush_tlb,
A
Avi Kivity 已提交
4028 4029

	.run = svm_vcpu_run,
4030
	.handle_exit = handle_exit,
A
Avi Kivity 已提交
4031
	.skip_emulated_instruction = skip_emulated_instruction,
4032
	.update_emulated_instruction = NULL,
4033 4034
	.set_interrupt_shadow = svm_set_interrupt_shadow,
	.get_interrupt_shadow = svm_get_interrupt_shadow,
I
Ingo Molnar 已提交
4035
	.patch_hypercall = svm_patch_hypercall,
E
Eddie Dong 已提交
4036
	.set_irq = svm_set_irq,
4037
	.set_nmi = svm_inject_nmi,
4038
	.queue_exception = svm_queue_exception,
A
Avi Kivity 已提交
4039
	.cancel_injection = svm_cancel_injection,
4040
	.interrupt_allowed = svm_interrupt_allowed,
4041
	.nmi_allowed = svm_nmi_allowed,
J
Jan Kiszka 已提交
4042 4043
	.get_nmi_mask = svm_get_nmi_mask,
	.set_nmi_mask = svm_set_nmi_mask,
4044 4045 4046
	.enable_nmi_window = enable_nmi_window,
	.enable_irq_window = enable_irq_window,
	.update_cr8_intercept = update_cr8_intercept,
4047
	.set_virtual_apic_mode = svm_set_virtual_apic_mode,
4048
	.refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
4049
	.check_apicv_inhibit_reasons = svm_check_apicv_inhibit_reasons,
4050
	.pre_update_apicv_exec_ctrl = svm_pre_update_apicv_exec_ctrl,
4051
	.load_eoi_exitmap = svm_load_eoi_exitmap,
4052 4053
	.hwapic_irr_update = svm_hwapic_irr_update,
	.hwapic_isr_update = svm_hwapic_isr_update,
4054
	.sync_pir_to_irr = kvm_lapic_find_highest_irr,
4055
	.apicv_post_state_restore = avic_post_state_restore,
4056 4057

	.set_tss_addr = svm_set_tss_addr,
4058
	.set_identity_map_addr = svm_set_identity_map_addr,
4059
	.get_tdp_level = get_npt_level,
4060
	.get_mt_mask = svm_get_mt_mask,
4061

4062 4063
	.get_exit_info = svm_get_exit_info,

4064
	.cpuid_update = svm_cpuid_update,
4065

4066
	.has_wbinvd_exit = svm_has_wbinvd_exit,
4067

4068
	.write_l1_tsc_offset = svm_write_l1_tsc_offset,
4069

4070
	.load_mmu_pgd = svm_load_mmu_pgd,
4071 4072

	.check_intercept = svm_check_intercept,
4073
	.handle_exit_irqoff = svm_handle_exit_irqoff,
4074

4075 4076
	.request_immediate_exit = __kvm_request_immediate_exit,

4077
	.sched_in = svm_sched_in,
4078 4079

	.pmu_ops = &amd_pmu_ops,
4080 4081
	.nested_ops = &svm_nested_ops,

4082
	.deliver_posted_interrupt = svm_deliver_avic_intr,
4083
	.dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
4084
	.update_pi_irte = svm_update_pi_irte,
4085
	.setup_mce = svm_setup_mce,
4086

4087
	.smi_allowed = svm_smi_allowed,
4088 4089
	.pre_enter_smm = svm_pre_enter_smm,
	.pre_leave_smm = svm_pre_leave_smm,
4090
	.enable_smi_window = enable_smi_window,
B
Brijesh Singh 已提交
4091 4092

	.mem_enc_op = svm_mem_enc_op,
4093 4094
	.mem_enc_reg_region = svm_register_enc_region,
	.mem_enc_unreg_region = svm_unregister_enc_region,
4095

4096
	.need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
4097 4098

	.apic_init_signal_blocked = svm_apic_init_signal_blocked,
A
Avi Kivity 已提交
4099 4100
};

4101 4102 4103 4104 4105 4106 4107
static struct kvm_x86_init_ops svm_init_ops __initdata = {
	.cpu_has_kvm_support = has_svm,
	.disabled_by_bios = is_disabled,
	.hardware_setup = svm_hardware_setup,
	.check_processor_compatibility = svm_check_processor_compat,

	.runtime_ops = &svm_x86_ops,
A
Avi Kivity 已提交
4108 4109 4110 4111
};

static int __init svm_init(void)
{
4112
	return kvm_init(&svm_init_ops, sizeof(struct vcpu_svm),
4113
			__alignof__(struct vcpu_svm), THIS_MODULE);
A
Avi Kivity 已提交
4114 4115 4116 4117
}

static void __exit svm_exit(void)
{
4118
	kvm_exit();
A
Avi Kivity 已提交
4119 4120 4121 4122
}

module_init(svm_init)
module_exit(svm_exit)