svm.c 113.4 KB
Newer Older
A
Avi Kivity 已提交
1 2 3 4 5 6
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * AMD SVM support
 *
 * Copyright (C) 2006 Qumranet, Inc.
N
Nicolas Kaiser 已提交
7
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
A
Avi Kivity 已提交
8 9 10 11 12 13 14 15 16
 *
 * Authors:
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *   Avi Kivity   <avi@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */
17 18
#include <linux/kvm_host.h>

19
#include "irq.h"
20
#include "mmu.h"
21
#include "kvm_cache_regs.h"
22
#include "x86.h"
23
#include "cpuid.h"
A
Avi Kivity 已提交
24

A
Avi Kivity 已提交
25
#include <linux/module.h>
26
#include <linux/mod_devicetable.h>
27
#include <linux/kernel.h>
A
Avi Kivity 已提交
28 29
#include <linux/vmalloc.h>
#include <linux/highmem.h>
A
Alexey Dobriyan 已提交
30
#include <linux/sched.h>
31
#include <linux/ftrace_event.h>
32
#include <linux/slab.h>
A
Avi Kivity 已提交
33

34
#include <asm/perf_event.h>
35
#include <asm/tlbflush.h>
A
Avi Kivity 已提交
36
#include <asm/desc.h>
37
#include <asm/debugreg.h>
G
Gleb Natapov 已提交
38
#include <asm/kvm_para.h>
A
Avi Kivity 已提交
39

40
#include <asm/virtext.h>
41
#include "trace.h"
42

43 44
#define __ex(x) __kvm_handle_fault_on_reboot(x)

A
Avi Kivity 已提交
45 46 47
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");

48 49 50 51 52 53
static const struct x86_cpu_id svm_cpu_id[] = {
	X86_FEATURE_MATCH(X86_FEATURE_SVM),
	{}
};
MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);

A
Avi Kivity 已提交
54 55 56 57 58 59
#define IOPM_ALLOC_ORDER 2
#define MSRPM_ALLOC_ORDER 1

#define SEG_TYPE_LDT 2
#define SEG_TYPE_BUSY_TSS16 3

60 61 62 63
#define SVM_FEATURE_NPT            (1 <<  0)
#define SVM_FEATURE_LBRV           (1 <<  1)
#define SVM_FEATURE_SVML           (1 <<  2)
#define SVM_FEATURE_NRIP           (1 <<  3)
64 65 66 67
#define SVM_FEATURE_TSC_RATE       (1 <<  4)
#define SVM_FEATURE_VMCB_CLEAN     (1 <<  5)
#define SVM_FEATURE_FLUSH_ASID     (1 <<  6)
#define SVM_FEATURE_DECODE_ASSIST  (1 <<  7)
68
#define SVM_FEATURE_PAUSE_FILTER   (1 << 10)
69

70 71 72 73
#define NESTED_EXIT_HOST	0	/* Exit handled on host level */
#define NESTED_EXIT_DONE	1	/* Exit caused nested vmexit  */
#define NESTED_EXIT_CONTINUE	2	/* Further checks needed      */

74 75
#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))

76
#define TSC_RATIO_RSVD          0xffffff0000000000ULL
77 78
#define TSC_RATIO_MIN		0x0000000000000001ULL
#define TSC_RATIO_MAX		0x000000ffffffffffULL
79

80 81
static bool erratum_383_found __read_mostly;

A
Avi Kivity 已提交
82 83 84 85 86 87 88 89 90 91 92 93
static const u32 host_save_user_msrs[] = {
#ifdef CONFIG_X86_64
	MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
	MSR_FS_BASE,
#endif
	MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
};

#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)

struct kvm_vcpu;

94 95 96
struct nested_state {
	struct vmcb *hsave;
	u64 hsave_msr;
97
	u64 vm_cr_msr;
98 99 100 101 102 103 104
	u64 vmcb;

	/* These are the merged vectors */
	u32 *msrpm;

	/* gpa pointers to the real vectors */
	u64 vmcb_msrpm;
105
	u64 vmcb_iopm;
J
Joerg Roedel 已提交
106

107 108 109
	/* A VMEXIT is required but not yet emulated */
	bool exit_required;

J
Joerg Roedel 已提交
110
	/* cache for intercepts of the guest */
111
	u32 intercept_cr;
112
	u32 intercept_dr;
J
Joerg Roedel 已提交
113 114 115
	u32 intercept_exceptions;
	u64 intercept;

116 117
	/* Nested Paging related state */
	u64 nested_cr3;
118 119
};

120 121 122
#define MSRPM_OFFSETS	16
static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;

123 124 125 126 127 128
/*
 * Set osvw_len to higher value when updated Revision Guides
 * are published and we know what the new status bits are
 */
static uint64_t osvw_len = 4, osvw_status;

A
Avi Kivity 已提交
129 130 131 132 133 134 135 136 137 138 139 140
struct vcpu_svm {
	struct kvm_vcpu vcpu;
	struct vmcb *vmcb;
	unsigned long vmcb_pa;
	struct svm_cpu_data *svm_data;
	uint64_t asid_generation;
	uint64_t sysenter_esp;
	uint64_t sysenter_eip;

	u64 next_rip;

	u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
141
	struct {
142 143 144
		u16 fs;
		u16 gs;
		u16 ldt;
145 146
		u64 gs_base;
	} host;
A
Avi Kivity 已提交
147 148 149

	u32 *msrpm;

150 151
	ulong nmi_iret_rip;

152
	struct nested_state nested;
J
Jan Kiszka 已提交
153 154

	bool nmi_singlestep;
155 156 157

	unsigned int3_injected;
	unsigned long int3_rip;
G
Gleb Natapov 已提交
158
	u32 apf_reason;
159 160

	u64  tsc_ratio;
A
Avi Kivity 已提交
161 162
};

163 164 165
static DEFINE_PER_CPU(u64, current_tsc_ratio);
#define TSC_RATIO_DEFAULT	0x0100000000ULL

166 167
#define MSR_INVALID			0xffffffffU

168
static const struct svm_direct_access_msrs {
169 170 171
	u32 index;   /* Index of the MSR */
	bool always; /* True if intercept is always on */
} direct_access_msrs[] = {
B
Brian Gerst 已提交
172
	{ .index = MSR_STAR,				.always = true  },
173 174 175 176 177 178 179 180 181 182 183 184 185 186
	{ .index = MSR_IA32_SYSENTER_CS,		.always = true  },
#ifdef CONFIG_X86_64
	{ .index = MSR_GS_BASE,				.always = true  },
	{ .index = MSR_FS_BASE,				.always = true  },
	{ .index = MSR_KERNEL_GS_BASE,			.always = true  },
	{ .index = MSR_LSTAR,				.always = true  },
	{ .index = MSR_CSTAR,				.always = true  },
	{ .index = MSR_SYSCALL_MASK,			.always = true  },
#endif
	{ .index = MSR_IA32_LASTBRANCHFROMIP,		.always = false },
	{ .index = MSR_IA32_LASTBRANCHTOIP,		.always = false },
	{ .index = MSR_IA32_LASTINTFROMIP,		.always = false },
	{ .index = MSR_IA32_LASTINTTOIP,		.always = false },
	{ .index = MSR_INVALID,				.always = false },
A
Avi Kivity 已提交
187 188
};

189 190 191 192
/* enable NPT for AMD64 and X86 with PAE */
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
static bool npt_enabled = true;
#else
J
Joerg Roedel 已提交
193
static bool npt_enabled;
194
#endif
195

196 197
/* allow nested paging (virtualized MMU) for all guests */
static int npt = true;
198
module_param(npt, int, S_IRUGO);
199

200 201
/* allow nested virtualization in KVM/SVM */
static int nested = true;
202 203
module_param(nested, int, S_IRUGO);

204
static void svm_flush_tlb(struct kvm_vcpu *vcpu);
205
static void svm_complete_interrupts(struct vcpu_svm *svm);
206

207
static int nested_svm_exit_handled(struct vcpu_svm *svm);
208
static int nested_svm_intercept(struct vcpu_svm *svm);
209 210 211
static int nested_svm_vmexit(struct vcpu_svm *svm);
static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
				      bool has_error_code, u32 error_code);
212
static u64 __scale_tsc(u64 ratio, u64 tsc);
213

214
enum {
215 216
	VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
			    pause filter count */
217
	VMCB_PERM_MAP,   /* IOPM Base and MSRPM Base */
218
	VMCB_ASID,	 /* ASID */
219
	VMCB_INTR,	 /* int_ctl, int_vector */
220
	VMCB_NPT,        /* npt_en, nCR3, gPAT */
221
	VMCB_CR,	 /* CR0, CR3, CR4, EFER */
222
	VMCB_DR,         /* DR6, DR7 */
223
	VMCB_DT,         /* GDT, IDT */
224
	VMCB_SEG,        /* CS, DS, SS, ES, CPL */
225
	VMCB_CR2,        /* CR2 only */
226
	VMCB_LBR,        /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
227 228 229
	VMCB_DIRTY_MAX,
};

230 231
/* TPR and CR2 are always written before VMRUN */
#define VMCB_ALWAYS_DIRTY_MASK	((1U << VMCB_INTR) | (1U << VMCB_CR2))
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248

static inline void mark_all_dirty(struct vmcb *vmcb)
{
	vmcb->control.clean = 0;
}

static inline void mark_all_clean(struct vmcb *vmcb)
{
	vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
			       & ~VMCB_ALWAYS_DIRTY_MASK;
}

static inline void mark_dirty(struct vmcb *vmcb, int bit)
{
	vmcb->control.clean &= ~(1 << bit);
}

249 250
static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
{
R
Rusty Russell 已提交
251
	return container_of(vcpu, struct vcpu_svm, vcpu);
252 253
}

254 255 256 257 258
static void recalc_intercepts(struct vcpu_svm *svm)
{
	struct vmcb_control_area *c, *h;
	struct nested_state *g;

259 260
	mark_dirty(svm->vmcb, VMCB_INTERCEPTS);

261 262 263 264 265 266 267
	if (!is_guest_mode(&svm->vcpu))
		return;

	c = &svm->vmcb->control;
	h = &svm->nested.hsave->control;
	g = &svm->nested;

268
	c->intercept_cr = h->intercept_cr | g->intercept_cr;
269
	c->intercept_dr = h->intercept_dr | g->intercept_dr;
270 271 272 273
	c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
	c->intercept = h->intercept | g->intercept;
}

274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
{
	if (is_guest_mode(&svm->vcpu))
		return svm->nested.hsave;
	else
		return svm->vmcb;
}

static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
{
	struct vmcb *vmcb = get_host_vmcb(svm);

	vmcb->control.intercept_cr |= (1U << bit);

	recalc_intercepts(svm);
}

static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
{
	struct vmcb *vmcb = get_host_vmcb(svm);

	vmcb->control.intercept_cr &= ~(1U << bit);

	recalc_intercepts(svm);
}

static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
{
	struct vmcb *vmcb = get_host_vmcb(svm);

	return vmcb->control.intercept_cr & (1U << bit);
}

307
static inline void set_dr_intercepts(struct vcpu_svm *svm)
308 309 310
{
	struct vmcb *vmcb = get_host_vmcb(svm);

311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
	vmcb->control.intercept_dr = (1 << INTERCEPT_DR0_READ)
		| (1 << INTERCEPT_DR1_READ)
		| (1 << INTERCEPT_DR2_READ)
		| (1 << INTERCEPT_DR3_READ)
		| (1 << INTERCEPT_DR4_READ)
		| (1 << INTERCEPT_DR5_READ)
		| (1 << INTERCEPT_DR6_READ)
		| (1 << INTERCEPT_DR7_READ)
		| (1 << INTERCEPT_DR0_WRITE)
		| (1 << INTERCEPT_DR1_WRITE)
		| (1 << INTERCEPT_DR2_WRITE)
		| (1 << INTERCEPT_DR3_WRITE)
		| (1 << INTERCEPT_DR4_WRITE)
		| (1 << INTERCEPT_DR5_WRITE)
		| (1 << INTERCEPT_DR6_WRITE)
		| (1 << INTERCEPT_DR7_WRITE);
327 328 329 330

	recalc_intercepts(svm);
}

331
static inline void clr_dr_intercepts(struct vcpu_svm *svm)
332 333 334
{
	struct vmcb *vmcb = get_host_vmcb(svm);

335
	vmcb->control.intercept_dr = 0;
336 337 338 339

	recalc_intercepts(svm);
}

340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
{
	struct vmcb *vmcb = get_host_vmcb(svm);

	vmcb->control.intercept_exceptions |= (1U << bit);

	recalc_intercepts(svm);
}

static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
{
	struct vmcb *vmcb = get_host_vmcb(svm);

	vmcb->control.intercept_exceptions &= ~(1U << bit);

	recalc_intercepts(svm);
}

358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
static inline void set_intercept(struct vcpu_svm *svm, int bit)
{
	struct vmcb *vmcb = get_host_vmcb(svm);

	vmcb->control.intercept |= (1ULL << bit);

	recalc_intercepts(svm);
}

static inline void clr_intercept(struct vcpu_svm *svm, int bit)
{
	struct vmcb *vmcb = get_host_vmcb(svm);

	vmcb->control.intercept &= ~(1ULL << bit);

	recalc_intercepts(svm);
}

376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
static inline void enable_gif(struct vcpu_svm *svm)
{
	svm->vcpu.arch.hflags |= HF_GIF_MASK;
}

static inline void disable_gif(struct vcpu_svm *svm)
{
	svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
}

static inline bool gif_set(struct vcpu_svm *svm)
{
	return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
}

391
static unsigned long iopm_base;
A
Avi Kivity 已提交
392 393 394 395

struct kvm_ldttss_desc {
	u16 limit0;
	u16 base0;
J
Joerg Roedel 已提交
396 397
	unsigned base1:8, type:5, dpl:2, p:1;
	unsigned limit1:4, zero0:3, g:1, base2:8;
A
Avi Kivity 已提交
398 399 400 401 402 403 404
	u32 base3;
	u32 zero1;
} __attribute__((packed));

struct svm_cpu_data {
	int cpu;

A
Avi Kivity 已提交
405 406 407
	u64 asid_generation;
	u32 max_asid;
	u32 next_asid;
A
Avi Kivity 已提交
408 409 410 411 412 413 414 415 416 417 418 419
	struct kvm_ldttss_desc *tss_desc;

	struct page *save_area;
};

static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);

struct svm_init_data {
	int cpu;
	int r;
};

420
static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
A
Avi Kivity 已提交
421

422
#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
A
Avi Kivity 已提交
423 424 425
#define MSRS_RANGE_SIZE 2048
#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)

426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
static u32 svm_msrpm_offset(u32 msr)
{
	u32 offset;
	int i;

	for (i = 0; i < NUM_MSR_MAPS; i++) {
		if (msr < msrpm_ranges[i] ||
		    msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
			continue;

		offset  = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
		offset += (i * MSRS_RANGE_SIZE);       /* add range offset */

		/* Now we have the u8 offset - but need the u32 offset */
		return offset / 4;
	}

	/* MSR not in any range */
	return MSR_INVALID;
}

A
Avi Kivity 已提交
447 448 449 450
#define MAX_INST_SIZE 15

static inline void clgi(void)
{
451
	asm volatile (__ex(SVM_CLGI));
A
Avi Kivity 已提交
452 453 454 455
}

static inline void stgi(void)
{
456
	asm volatile (__ex(SVM_STGI));
A
Avi Kivity 已提交
457 458 459 460
}

static inline void invlpga(unsigned long addr, u32 asid)
{
J
Joerg Roedel 已提交
461
	asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
A
Avi Kivity 已提交
462 463
}

464 465 466 467 468 469 470 471 472
static int get_npt_level(void)
{
#ifdef CONFIG_X86_64
	return PT64_ROOT_LEVEL;
#else
	return PT32E_ROOT_LEVEL;
#endif
}

A
Avi Kivity 已提交
473 474
static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
475
	vcpu->arch.efer = efer;
476
	if (!npt_enabled && !(efer & EFER_LMA))
477
		efer &= ~EFER_LME;
A
Avi Kivity 已提交
478

479
	to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
480
	mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
A
Avi Kivity 已提交
481 482 483 484 485 486 487 488
}

static int is_external_interrupt(u32 info)
{
	info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
	return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
}

489
static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
490 491 492 493 494
{
	struct vcpu_svm *svm = to_svm(vcpu);
	u32 ret = 0;

	if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
495 496
		ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
	return ret;
497 498 499 500 501 502 503 504 505 506 507 508 509
}

static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	if (mask == 0)
		svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
	else
		svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;

}

A
Avi Kivity 已提交
510 511
static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
{
512 513
	struct vcpu_svm *svm = to_svm(vcpu);

514 515 516
	if (svm->vmcb->control.next_rip != 0)
		svm->next_rip = svm->vmcb->control.next_rip;

517
	if (!svm->next_rip) {
518
		if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
519 520
				EMULATE_DONE)
			printk(KERN_DEBUG "%s: NOP\n", __func__);
A
Avi Kivity 已提交
521 522
		return;
	}
523 524 525
	if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
		printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
		       __func__, kvm_rip_read(vcpu), svm->next_rip);
A
Avi Kivity 已提交
526

527
	kvm_rip_write(vcpu, svm->next_rip);
528
	svm_set_interrupt_shadow(vcpu, 0);
A
Avi Kivity 已提交
529 530
}

J
Jan Kiszka 已提交
531
static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
532 533
				bool has_error_code, u32 error_code,
				bool reinject)
J
Jan Kiszka 已提交
534 535 536
{
	struct vcpu_svm *svm = to_svm(vcpu);

J
Joerg Roedel 已提交
537 538 539 540
	/*
	 * If we are within a nested VM we'd better #VMEXIT and let the guest
	 * handle the exception
	 */
541 542
	if (!reinject &&
	    nested_svm_check_exception(svm, nr, has_error_code, error_code))
J
Jan Kiszka 已提交
543 544
		return;

545
	if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) {
546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
		unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);

		/*
		 * For guest debugging where we have to reinject #BP if some
		 * INT3 is guest-owned:
		 * Emulate nRIP by moving RIP forward. Will fail if injection
		 * raises a fault that is not intercepted. Still better than
		 * failing in all cases.
		 */
		skip_emulated_instruction(&svm->vcpu);
		rip = kvm_rip_read(&svm->vcpu);
		svm->int3_rip = rip + svm->vmcb->save.cs.base;
		svm->int3_injected = rip - old_rip;
	}

J
Jan Kiszka 已提交
561 562 563 564 565 566 567
	svm->vmcb->control.event_inj = nr
		| SVM_EVTINJ_VALID
		| (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
		| SVM_EVTINJ_TYPE_EXEPT;
	svm->vmcb->control.event_inj_err = error_code;
}

568 569 570 571 572 573
static void svm_init_erratum_383(void)
{
	u32 low, high;
	int err;
	u64 val;

574
	if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
		return;

	/* Use _safe variants to not break nested virtualization */
	val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
	if (err)
		return;

	val |= (1ULL << 47);

	low  = lower_32_bits(val);
	high = upper_32_bits(val);

	native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);

	erratum_383_found = true;
}

592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612
static void svm_init_osvw(struct kvm_vcpu *vcpu)
{
	/*
	 * Guests should see errata 400 and 415 as fixed (assuming that
	 * HLT and IO instructions are intercepted).
	 */
	vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
	vcpu->arch.osvw.status = osvw_status & ~(6ULL);

	/*
	 * By increasing VCPU's osvw.length to 3 we are telling the guest that
	 * all osvw.status bits inside that length, including bit 0 (which is
	 * reserved for erratum 298), are valid. However, if host processor's
	 * osvw_len is 0 then osvw_status[0] carries no information. We need to
	 * be conservative here and therefore we tell the guest that erratum 298
	 * is present (because we really don't know).
	 */
	if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
		vcpu->arch.osvw.status |= 1;
}

A
Avi Kivity 已提交
613 614
static int has_svm(void)
{
615
	const char *msg;
A
Avi Kivity 已提交
616

617
	if (!cpu_has_svm(&msg)) {
J
Joe Perches 已提交
618
		printk(KERN_INFO "has_svm: %s\n", msg);
A
Avi Kivity 已提交
619 620 621 622 623 624
		return 0;
	}

	return 1;
}

625
static void svm_hardware_disable(void)
A
Avi Kivity 已提交
626
{
627 628 629 630
	/* Make sure we clean up behind us */
	if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
		wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);

631
	cpu_svm_disable();
632 633

	amd_pmu_disable_virt();
A
Avi Kivity 已提交
634 635
}

636
static int svm_hardware_enable(void)
A
Avi Kivity 已提交
637 638
{

639
	struct svm_cpu_data *sd;
A
Avi Kivity 已提交
640
	uint64_t efer;
641
	struct desc_ptr gdt_descr;
A
Avi Kivity 已提交
642 643 644
	struct desc_struct *gdt;
	int me = raw_smp_processor_id();

645 646 647 648
	rdmsrl(MSR_EFER, efer);
	if (efer & EFER_SVME)
		return -EBUSY;

A
Avi Kivity 已提交
649
	if (!has_svm()) {
650
		pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
651
		return -EINVAL;
A
Avi Kivity 已提交
652
	}
653 654
	sd = per_cpu(svm_data, me);
	if (!sd) {
655
		pr_err("%s: svm_data is NULL on %d\n", __func__, me);
656
		return -EINVAL;
A
Avi Kivity 已提交
657 658
	}

659 660 661
	sd->asid_generation = 1;
	sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
	sd->next_asid = sd->max_asid + 1;
A
Avi Kivity 已提交
662

663
	native_store_gdt(&gdt_descr);
664
	gdt = (struct desc_struct *)gdt_descr.address;
665
	sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
A
Avi Kivity 已提交
666

667
	wrmsrl(MSR_EFER, efer | EFER_SVME);
A
Avi Kivity 已提交
668

669
	wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
670

671 672
	if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
		wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
673
		__this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
674 675
	}

676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705

	/*
	 * Get OSVW bits.
	 *
	 * Note that it is possible to have a system with mixed processor
	 * revisions and therefore different OSVW bits. If bits are not the same
	 * on different processors then choose the worst case (i.e. if erratum
	 * is present on one processor and not on another then assume that the
	 * erratum is present everywhere).
	 */
	if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
		uint64_t len, status = 0;
		int err;

		len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
		if (!err)
			status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
						      &err);

		if (err)
			osvw_status = osvw_len = 0;
		else {
			if (len < osvw_len)
				osvw_len = len;
			osvw_status |= status;
			osvw_status &= (1ULL << osvw_len) - 1;
		}
	} else
		osvw_status = osvw_len = 0;

706 707
	svm_init_erratum_383();

708 709
	amd_pmu_enable_virt();

710
	return 0;
A
Avi Kivity 已提交
711 712
}

713 714
static void svm_cpu_uninit(int cpu)
{
715
	struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
716

717
	if (!sd)
718 719 720
		return;

	per_cpu(svm_data, raw_smp_processor_id()) = NULL;
721 722
	__free_page(sd->save_area);
	kfree(sd);
723 724
}

A
Avi Kivity 已提交
725 726
static int svm_cpu_init(int cpu)
{
727
	struct svm_cpu_data *sd;
A
Avi Kivity 已提交
728 729
	int r;

730 731
	sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
	if (!sd)
A
Avi Kivity 已提交
732
		return -ENOMEM;
733 734
	sd->cpu = cpu;
	sd->save_area = alloc_page(GFP_KERNEL);
A
Avi Kivity 已提交
735
	r = -ENOMEM;
736
	if (!sd->save_area)
A
Avi Kivity 已提交
737 738
		goto err_1;

739
	per_cpu(svm_data, cpu) = sd;
A
Avi Kivity 已提交
740 741 742 743

	return 0;

err_1:
744
	kfree(sd);
A
Avi Kivity 已提交
745 746 747 748
	return r;

}

749 750 751 752 753 754 755 756 757 758 759
static bool valid_msr_intercept(u32 index)
{
	int i;

	for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
		if (direct_access_msrs[i].index == index)
			return true;

	return false;
}

760 761
static void set_msr_interception(u32 *msrpm, unsigned msr,
				 int read, int write)
A
Avi Kivity 已提交
762
{
763 764 765
	u8 bit_read, bit_write;
	unsigned long tmp;
	u32 offset;
A
Avi Kivity 已提交
766

767 768 769 770 771 772
	/*
	 * If this warning triggers extend the direct_access_msrs list at the
	 * beginning of the file
	 */
	WARN_ON(!valid_msr_intercept(msr));

773 774 775 776 777 778 779 780 781 782 783
	offset    = svm_msrpm_offset(msr);
	bit_read  = 2 * (msr & 0x0f);
	bit_write = 2 * (msr & 0x0f) + 1;
	tmp       = msrpm[offset];

	BUG_ON(offset == MSR_INVALID);

	read  ? clear_bit(bit_read,  &tmp) : set_bit(bit_read,  &tmp);
	write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);

	msrpm[offset] = tmp;
A
Avi Kivity 已提交
784 785
}

786
static void svm_vcpu_init_msrpm(u32 *msrpm)
A
Avi Kivity 已提交
787 788 789
{
	int i;

790 791
	memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));

792 793 794 795 796 797
	for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
		if (!direct_access_msrs[i].always)
			continue;

		set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
	}
798 799
}

800 801 802 803 804 805 806 807
static void add_msr_offset(u32 offset)
{
	int i;

	for (i = 0; i < MSRPM_OFFSETS; ++i) {

		/* Offset already in list? */
		if (msrpm_offsets[i] == offset)
808
			return;
809 810 811 812 813 814 815 816 817

		/* Slot used by another offset? */
		if (msrpm_offsets[i] != MSR_INVALID)
			continue;

		/* Add offset to list */
		msrpm_offsets[i] = offset;

		return;
A
Avi Kivity 已提交
818
	}
819 820 821 822 823

	/*
	 * If this BUG triggers the msrpm_offsets table has an overflow. Just
	 * increase MSRPM_OFFSETS in this case.
	 */
824
	BUG();
A
Avi Kivity 已提交
825 826
}

827
static void init_msrpm_offsets(void)
828
{
829
	int i;
830

831 832 833 834 835 836 837 838 839 840
	memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));

	for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
		u32 offset;

		offset = svm_msrpm_offset(direct_access_msrs[i].index);
		BUG_ON(offset == MSR_INVALID);

		add_msr_offset(offset);
	}
841 842
}

843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864
static void svm_enable_lbrv(struct vcpu_svm *svm)
{
	u32 *msrpm = svm->msrpm;

	svm->vmcb->control.lbr_ctl = 1;
	set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
	set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
	set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
	set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
}

static void svm_disable_lbrv(struct vcpu_svm *svm)
{
	u32 *msrpm = svm->msrpm;

	svm->vmcb->control.lbr_ctl = 0;
	set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
	set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
	set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
	set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
}

A
Avi Kivity 已提交
865 866 867 868
static __init int svm_hardware_setup(void)
{
	int cpu;
	struct page *iopm_pages;
869
	void *iopm_va;
A
Avi Kivity 已提交
870 871 872 873 874 875
	int r;

	iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);

	if (!iopm_pages)
		return -ENOMEM;
876 877 878

	iopm_va = page_address(iopm_pages);
	memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
A
Avi Kivity 已提交
879 880
	iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;

881 882
	init_msrpm_offsets();

883 884 885
	if (boot_cpu_has(X86_FEATURE_NX))
		kvm_enable_efer_bits(EFER_NX);

A
Alexander Graf 已提交
886 887 888
	if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
		kvm_enable_efer_bits(EFER_FFXSR);

889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905
	if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
		u64 max;

		kvm_has_tsc_control = true;

		/*
		 * Make sure the user can only configure tsc_khz values that
		 * fit into a signed integer.
		 * A min value is not calculated needed because it will always
		 * be 1 on all machines and a value of 0 is used to disable
		 * tsc-scaling for the vcpu.
		 */
		max = min(0x7fffffffULL, __scale_tsc(tsc_khz, TSC_RATIO_MAX));

		kvm_max_guest_tsc_khz = max;
	}

906 907
	if (nested) {
		printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
908
		kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
909 910
	}

Z
Zachary Amsden 已提交
911
	for_each_possible_cpu(cpu) {
A
Avi Kivity 已提交
912 913
		r = svm_cpu_init(cpu);
		if (r)
914
			goto err;
A
Avi Kivity 已提交
915
	}
916

917
	if (!boot_cpu_has(X86_FEATURE_NPT))
918 919
		npt_enabled = false;

920 921 922 923 924
	if (npt_enabled && !npt) {
		printk(KERN_INFO "kvm: Nested Paging disabled\n");
		npt_enabled = false;
	}

925
	if (npt_enabled) {
926
		printk(KERN_INFO "kvm: Nested Paging enabled\n");
927
		kvm_enable_tdp();
928 929
	} else
		kvm_disable_tdp();
930

A
Avi Kivity 已提交
931 932
	return 0;

933
err:
A
Avi Kivity 已提交
934 935 936 937 938 939 940
	__free_pages(iopm_pages, IOPM_ALLOC_ORDER);
	iopm_base = 0;
	return r;
}

static __exit void svm_hardware_unsetup(void)
{
941 942
	int cpu;

Z
Zachary Amsden 已提交
943
	for_each_possible_cpu(cpu)
944 945
		svm_cpu_uninit(cpu);

A
Avi Kivity 已提交
946
	__free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
947
	iopm_base = 0;
A
Avi Kivity 已提交
948 949 950 951 952 953
}

static void init_seg(struct vmcb_seg *seg)
{
	seg->selector = 0;
	seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
J
Joerg Roedel 已提交
954
		      SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
A
Avi Kivity 已提交
955 956 957 958 959 960 961 962 963 964 965 966
	seg->limit = 0xffff;
	seg->base = 0;
}

static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
{
	seg->selector = 0;
	seg->attrib = SVM_SELECTOR_P_MASK | type;
	seg->limit = 0xffff;
	seg->base = 0;
}

967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992
static u64 __scale_tsc(u64 ratio, u64 tsc)
{
	u64 mult, frac, _tsc;

	mult  = ratio >> 32;
	frac  = ratio & ((1ULL << 32) - 1);

	_tsc  = tsc;
	_tsc *= mult;
	_tsc += (tsc >> 32) * frac;
	_tsc += ((tsc & ((1ULL << 32) - 1)) * frac) >> 32;

	return _tsc;
}

static u64 svm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	u64 _tsc = tsc;

	if (svm->tsc_ratio != TSC_RATIO_DEFAULT)
		_tsc = __scale_tsc(svm->tsc_ratio, tsc);

	return _tsc;
}

993
static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
994 995 996 997 998
{
	struct vcpu_svm *svm = to_svm(vcpu);
	u64 ratio;
	u64 khz;

999 1000 1001
	/* Guest TSC same frequency as host TSC? */
	if (!scale) {
		svm->tsc_ratio = TSC_RATIO_DEFAULT;
1002
		return;
1003
	}
1004

1005 1006 1007 1008 1009 1010 1011
	/* TSC scaling supported? */
	if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
		if (user_tsc_khz > tsc_khz) {
			vcpu->arch.tsc_catchup = 1;
			vcpu->arch.tsc_always_catchup = 1;
		} else
			WARN(1, "user requested TSC rate below hardware speed\n");
1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
		return;
	}

	khz = user_tsc_khz;

	/* TSC scaling required  - calculate ratio */
	ratio = khz << 32;
	do_div(ratio, tsc_khz);

	if (ratio == 0 || ratio & TSC_RATIO_RSVD) {
		WARN_ONCE(1, "Invalid TSC ratio - virtual-tsc-khz=%u\n",
				user_tsc_khz);
		return;
	}
	svm->tsc_ratio             = ratio;
}

W
Will Auld 已提交
1029 1030 1031 1032 1033 1034 1035
static u64 svm_read_tsc_offset(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	return svm->vmcb->control.tsc_offset;
}

1036 1037 1038 1039 1040
static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	u64 g_tsc_offset = 0;

1041
	if (is_guest_mode(vcpu)) {
1042 1043 1044
		g_tsc_offset = svm->vmcb->control.tsc_offset -
			       svm->nested.hsave->control.tsc_offset;
		svm->nested.hsave->control.tsc_offset = offset;
1045 1046 1047 1048
	} else
		trace_kvm_write_tsc_offset(vcpu->vcpu_id,
					   svm->vmcb->control.tsc_offset,
					   offset);
1049 1050

	svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
1051 1052

	mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1053 1054
}

1055
static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
Z
Zachary Amsden 已提交
1056 1057 1058
{
	struct vcpu_svm *svm = to_svm(vcpu);

1059 1060 1061 1062 1063
	if (host) {
		if (svm->tsc_ratio != TSC_RATIO_DEFAULT)
			WARN_ON(adjustment < 0);
		adjustment = svm_scale_tsc(vcpu, (u64)adjustment);
	}
1064

Z
Zachary Amsden 已提交
1065
	svm->vmcb->control.tsc_offset += adjustment;
1066
	if (is_guest_mode(vcpu))
Z
Zachary Amsden 已提交
1067
		svm->nested.hsave->control.tsc_offset += adjustment;
1068 1069 1070 1071 1072
	else
		trace_kvm_write_tsc_offset(vcpu->vcpu_id,
				     svm->vmcb->control.tsc_offset - adjustment,
				     svm->vmcb->control.tsc_offset);

1073
	mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
Z
Zachary Amsden 已提交
1074 1075
}

1076 1077 1078 1079 1080 1081 1082 1083 1084
static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
{
	u64 tsc;

	tsc = svm_scale_tsc(vcpu, native_read_tsc());

	return target_tsc - tsc;
}

1085
static void init_vmcb(struct vcpu_svm *svm)
A
Avi Kivity 已提交
1086
{
1087 1088
	struct vmcb_control_area *control = &svm->vmcb->control;
	struct vmcb_save_area *save = &svm->vmcb->save;
A
Avi Kivity 已提交
1089

1090
	svm->vcpu.fpu_active = 1;
1091
	svm->vcpu.arch.hflags = 0;
1092

1093 1094 1095 1096 1097 1098 1099
	set_cr_intercept(svm, INTERCEPT_CR0_READ);
	set_cr_intercept(svm, INTERCEPT_CR3_READ);
	set_cr_intercept(svm, INTERCEPT_CR4_READ);
	set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
	set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
	set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
	set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
A
Avi Kivity 已提交
1100

1101
	set_dr_intercepts(svm);
A
Avi Kivity 已提交
1102

1103 1104 1105
	set_exception_intercept(svm, PF_VECTOR);
	set_exception_intercept(svm, UD_VECTOR);
	set_exception_intercept(svm, MC_VECTOR);
A
Avi Kivity 已提交
1106

1107 1108 1109 1110
	set_intercept(svm, INTERCEPT_INTR);
	set_intercept(svm, INTERCEPT_NMI);
	set_intercept(svm, INTERCEPT_SMI);
	set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
A
Avi Kivity 已提交
1111
	set_intercept(svm, INTERCEPT_RDPMC);
1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
	set_intercept(svm, INTERCEPT_CPUID);
	set_intercept(svm, INTERCEPT_INVD);
	set_intercept(svm, INTERCEPT_HLT);
	set_intercept(svm, INTERCEPT_INVLPG);
	set_intercept(svm, INTERCEPT_INVLPGA);
	set_intercept(svm, INTERCEPT_IOIO_PROT);
	set_intercept(svm, INTERCEPT_MSR_PROT);
	set_intercept(svm, INTERCEPT_TASK_SWITCH);
	set_intercept(svm, INTERCEPT_SHUTDOWN);
	set_intercept(svm, INTERCEPT_VMRUN);
	set_intercept(svm, INTERCEPT_VMMCALL);
	set_intercept(svm, INTERCEPT_VMLOAD);
	set_intercept(svm, INTERCEPT_VMSAVE);
	set_intercept(svm, INTERCEPT_STGI);
	set_intercept(svm, INTERCEPT_CLGI);
	set_intercept(svm, INTERCEPT_SKINIT);
	set_intercept(svm, INTERCEPT_WBINVD);
	set_intercept(svm, INTERCEPT_MONITOR);
	set_intercept(svm, INTERCEPT_MWAIT);
J
Joerg Roedel 已提交
1131
	set_intercept(svm, INTERCEPT_XSETBV);
A
Avi Kivity 已提交
1132 1133

	control->iopm_base_pa = iopm_base;
1134
	control->msrpm_base_pa = __pa(svm->msrpm);
A
Avi Kivity 已提交
1135 1136 1137 1138 1139 1140 1141 1142 1143
	control->int_ctl = V_INTR_MASKING_MASK;

	init_seg(&save->es);
	init_seg(&save->ss);
	init_seg(&save->ds);
	init_seg(&save->fs);
	init_seg(&save->gs);

	save->cs.selector = 0xf000;
1144
	save->cs.base = 0xffff0000;
A
Avi Kivity 已提交
1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
	/* Executable/Readable Code Segment */
	save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
		SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
	save->cs.limit = 0xffff;

	save->gdtr.limit = 0xffff;
	save->idtr.limit = 0xffff;

	init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
	init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);

1156
	svm_set_efer(&svm->vcpu, 0);
M
Mike Day 已提交
1157
	save->dr6 = 0xffff0ff0;
1158
	kvm_set_rflags(&svm->vcpu, 2);
A
Avi Kivity 已提交
1159
	save->rip = 0x0000fff0;
1160
	svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
A
Avi Kivity 已提交
1161

J
Joerg Roedel 已提交
1162 1163
	/*
	 * This is the guest-visible cr0 value.
1164
	 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
A
Avi Kivity 已提交
1165
	 */
1166 1167
	svm->vcpu.arch.cr0 = 0;
	(void)kvm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
1168

1169
	save->cr4 = X86_CR4_PAE;
A
Avi Kivity 已提交
1170
	/* rdx = ?? */
1171 1172 1173 1174

	if (npt_enabled) {
		/* Setup VMCB for Nested Paging */
		control->nested_ctl = 1;
1175
		clr_intercept(svm, INTERCEPT_INVLPG);
1176
		clr_exception_intercept(svm, PF_VECTOR);
1177 1178
		clr_cr_intercept(svm, INTERCEPT_CR3_READ);
		clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1179 1180 1181 1182
		save->g_pat = 0x0007040600070406ULL;
		save->cr3 = 0;
		save->cr4 = 0;
	}
1183
	svm->asid_generation = 0;
1184

1185
	svm->nested.vmcb = 0;
1186 1187
	svm->vcpu.arch.hflags = 0;

1188
	if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
1189
		control->pause_filter_count = 3000;
1190
		set_intercept(svm, INTERCEPT_PAUSE);
1191 1192
	}

1193 1194
	mark_all_dirty(svm->vmcb);

1195
	enable_gif(svm);
A
Avi Kivity 已提交
1196 1197
}

1198
static void svm_vcpu_reset(struct kvm_vcpu *vcpu)
1199 1200
{
	struct vcpu_svm *svm = to_svm(vcpu);
1201 1202
	u32 dummy;
	u32 eax = 1;
1203

1204
	init_vmcb(svm);
A
Avi Kivity 已提交
1205

1206 1207
	kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy);
	kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
1208 1209
}

R
Rusty Russell 已提交
1210
static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
A
Avi Kivity 已提交
1211
{
1212
	struct vcpu_svm *svm;
A
Avi Kivity 已提交
1213
	struct page *page;
1214
	struct page *msrpm_pages;
A
Alexander Graf 已提交
1215
	struct page *hsave_page;
A
Alexander Graf 已提交
1216
	struct page *nested_msrpm_pages;
R
Rusty Russell 已提交
1217
	int err;
A
Avi Kivity 已提交
1218

1219
	svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
R
Rusty Russell 已提交
1220 1221 1222 1223 1224
	if (!svm) {
		err = -ENOMEM;
		goto out;
	}

1225 1226
	svm->tsc_ratio = TSC_RATIO_DEFAULT;

R
Rusty Russell 已提交
1227 1228 1229 1230
	err = kvm_vcpu_init(&svm->vcpu, kvm, id);
	if (err)
		goto free_svm;

1231
	err = -ENOMEM;
A
Avi Kivity 已提交
1232
	page = alloc_page(GFP_KERNEL);
1233
	if (!page)
R
Rusty Russell 已提交
1234
		goto uninit;
A
Avi Kivity 已提交
1235

1236 1237
	msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
	if (!msrpm_pages)
1238
		goto free_page1;
A
Alexander Graf 已提交
1239 1240 1241

	nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
	if (!nested_msrpm_pages)
1242
		goto free_page2;
1243

A
Alexander Graf 已提交
1244 1245
	hsave_page = alloc_page(GFP_KERNEL);
	if (!hsave_page)
1246 1247
		goto free_page3;

1248
	svm->nested.hsave = page_address(hsave_page);
A
Alexander Graf 已提交
1249

1250 1251 1252
	svm->msrpm = page_address(msrpm_pages);
	svm_vcpu_init_msrpm(svm->msrpm);

1253
	svm->nested.msrpm = page_address(nested_msrpm_pages);
1254
	svm_vcpu_init_msrpm(svm->nested.msrpm);
A
Alexander Graf 已提交
1255

1256 1257 1258 1259
	svm->vmcb = page_address(page);
	clear_page(svm->vmcb);
	svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
	svm->asid_generation = 0;
1260
	init_vmcb(svm);
1261

1262 1263
	svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
				   MSR_IA32_APICBASE_ENABLE;
1264
	if (kvm_vcpu_is_bsp(&svm->vcpu))
1265
		svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
A
Avi Kivity 已提交
1266

1267 1268
	svm_init_osvw(&svm->vcpu);

R
Rusty Russell 已提交
1269
	return &svm->vcpu;
1270

1271 1272 1273 1274 1275 1276
free_page3:
	__free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
free_page2:
	__free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
free_page1:
	__free_page(page);
R
Rusty Russell 已提交
1277 1278 1279
uninit:
	kvm_vcpu_uninit(&svm->vcpu);
free_svm:
1280
	kmem_cache_free(kvm_vcpu_cache, svm);
R
Rusty Russell 已提交
1281 1282
out:
	return ERR_PTR(err);
A
Avi Kivity 已提交
1283 1284 1285 1286
}

static void svm_free_vcpu(struct kvm_vcpu *vcpu)
{
1287 1288
	struct vcpu_svm *svm = to_svm(vcpu);

R
Rusty Russell 已提交
1289
	__free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
1290
	__free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
1291 1292
	__free_page(virt_to_page(svm->nested.hsave));
	__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
R
Rusty Russell 已提交
1293
	kvm_vcpu_uninit(vcpu);
1294
	kmem_cache_free(kvm_vcpu_cache, svm);
A
Avi Kivity 已提交
1295 1296
}

1297
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
A
Avi Kivity 已提交
1298
{
1299
	struct vcpu_svm *svm = to_svm(vcpu);
1300
	int i;
1301 1302

	if (unlikely(cpu != vcpu->cpu)) {
1303
		svm->asid_generation = 0;
1304
		mark_all_dirty(svm->vmcb);
1305
	}
1306

1307 1308 1309
#ifdef CONFIG_X86_64
	rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
#endif
1310 1311 1312 1313
	savesegment(fs, svm->host.fs);
	savesegment(gs, svm->host.gs);
	svm->host.ldt = kvm_read_ldt();

1314
	for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
1315
		rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
1316 1317

	if (static_cpu_has(X86_FEATURE_TSCRATEMSR) &&
1318 1319
	    svm->tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
		__this_cpu_write(current_tsc_ratio, svm->tsc_ratio);
1320 1321
		wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio);
	}
A
Avi Kivity 已提交
1322 1323 1324 1325
}

static void svm_vcpu_put(struct kvm_vcpu *vcpu)
{
1326
	struct vcpu_svm *svm = to_svm(vcpu);
1327 1328
	int i;

1329
	++vcpu->stat.host_state_reload;
1330 1331 1332 1333
	kvm_load_ldt(svm->host.ldt);
#ifdef CONFIG_X86_64
	loadsegment(fs, svm->host.fs);
	wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
1334
	load_gs_index(svm->host.gs);
1335
#else
1336
#ifdef CONFIG_X86_32_LAZY_GS
1337
	loadsegment(gs, svm->host.gs);
1338
#endif
1339
#endif
1340
	for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
1341
		wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
A
Avi Kivity 已提交
1342 1343 1344 1345
}

static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
{
1346
	return to_svm(vcpu)->vmcb->save.rflags;
A
Avi Kivity 已提交
1347 1348 1349 1350
}

static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
P
Paolo Bonzini 已提交
1351 1352 1353 1354 1355
       /*
        * Any change of EFLAGS.VM is accompained by a reload of SS
        * (caused by either a task switch or an inter-privilege IRET),
        * so we do not need to update the CPL here.
        */
1356
	to_svm(vcpu)->vmcb->save.rflags = rflags;
A
Avi Kivity 已提交
1357 1358
}

A
Avi Kivity 已提交
1359 1360 1361 1362 1363
static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{
	switch (reg) {
	case VCPU_EXREG_PDPTR:
		BUG_ON(!npt_enabled);
1364
		load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
A
Avi Kivity 已提交
1365 1366 1367 1368 1369 1370
		break;
	default:
		BUG();
	}
}

1371 1372
static void svm_set_vintr(struct vcpu_svm *svm)
{
1373
	set_intercept(svm, INTERCEPT_VINTR);
1374 1375 1376 1377
}

static void svm_clear_vintr(struct vcpu_svm *svm)
{
1378
	clr_intercept(svm, INTERCEPT_VINTR);
1379 1380
}

A
Avi Kivity 已提交
1381 1382
static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
{
1383
	struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
A
Avi Kivity 已提交
1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395

	switch (seg) {
	case VCPU_SREG_CS: return &save->cs;
	case VCPU_SREG_DS: return &save->ds;
	case VCPU_SREG_ES: return &save->es;
	case VCPU_SREG_FS: return &save->fs;
	case VCPU_SREG_GS: return &save->gs;
	case VCPU_SREG_SS: return &save->ss;
	case VCPU_SREG_TR: return &save->tr;
	case VCPU_SREG_LDTR: return &save->ldtr;
	}
	BUG();
A
Al Viro 已提交
1396
	return NULL;
A
Avi Kivity 已提交
1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420
}

static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
{
	struct vmcb_seg *s = svm_seg(vcpu, seg);

	return s->base;
}

static void svm_get_segment(struct kvm_vcpu *vcpu,
			    struct kvm_segment *var, int seg)
{
	struct vmcb_seg *s = svm_seg(vcpu, seg);

	var->base = s->base;
	var->limit = s->limit;
	var->selector = s->selector;
	var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
	var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
	var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
	var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
	var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
	var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
	var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
1421 1422 1423 1424 1425 1426 1427 1428 1429 1430

	/*
	 * AMD CPUs circa 2014 track the G bit for all segments except CS.
	 * However, the SVM spec states that the G bit is not observed by the
	 * CPU, and some VMware virtual CPUs drop the G bit for all segments.
	 * So let's synthesize a legal G bit for all segments, this helps
	 * running KVM nested. It also helps cross-vendor migration, because
	 * Intel's vmentry has a check on the 'G' bit.
	 */
	var->g = s->limit > 0xfffff;
1431

J
Joerg Roedel 已提交
1432 1433
	/*
	 * AMD's VMCB does not have an explicit unusable field, so emulate it
1434 1435 1436 1437
	 * for cross vendor migration purposes by "not present"
	 */
	var->unusable = !var->present || (var->type == 0);

1438 1439 1440 1441 1442 1443
	switch (seg) {
	case VCPU_SREG_TR:
		/*
		 * Work around a bug where the busy flag in the tr selector
		 * isn't exposed
		 */
1444
		var->type |= 0x2;
1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459
		break;
	case VCPU_SREG_DS:
	case VCPU_SREG_ES:
	case VCPU_SREG_FS:
	case VCPU_SREG_GS:
		/*
		 * The accessed bit must always be set in the segment
		 * descriptor cache, although it can be cleared in the
		 * descriptor, the cached bit always remains at 1. Since
		 * Intel has a check on this, set it here to support
		 * cross-vendor migration.
		 */
		if (!var->unusable)
			var->type |= 0x1;
		break;
1460
	case VCPU_SREG_SS:
J
Joerg Roedel 已提交
1461 1462
		/*
		 * On AMD CPUs sometimes the DB bit in the segment
1463 1464 1465 1466 1467 1468
		 * descriptor is left as 1, although the whole segment has
		 * been made unusable. Clear it here to pass an Intel VMX
		 * entry check when cross vendor migrating.
		 */
		if (var->unusable)
			var->db = 0;
J
Jan Kiszka 已提交
1469
		var->dpl = to_svm(vcpu)->vmcb->save.cpl;
1470
		break;
1471
	}
A
Avi Kivity 已提交
1472 1473
}

1474 1475 1476 1477 1478 1479 1480
static int svm_get_cpl(struct kvm_vcpu *vcpu)
{
	struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;

	return save->cpl;
}

1481
static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
A
Avi Kivity 已提交
1482
{
1483 1484
	struct vcpu_svm *svm = to_svm(vcpu);

1485 1486
	dt->size = svm->vmcb->save.idtr.limit;
	dt->address = svm->vmcb->save.idtr.base;
A
Avi Kivity 已提交
1487 1488
}

1489
static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
A
Avi Kivity 已提交
1490
{
1491 1492
	struct vcpu_svm *svm = to_svm(vcpu);

1493 1494
	svm->vmcb->save.idtr.limit = dt->size;
	svm->vmcb->save.idtr.base = dt->address ;
1495
	mark_dirty(svm->vmcb, VMCB_DT);
A
Avi Kivity 已提交
1496 1497
}

1498
static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
A
Avi Kivity 已提交
1499
{
1500 1501
	struct vcpu_svm *svm = to_svm(vcpu);

1502 1503
	dt->size = svm->vmcb->save.gdtr.limit;
	dt->address = svm->vmcb->save.gdtr.base;
A
Avi Kivity 已提交
1504 1505
}

1506
static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
A
Avi Kivity 已提交
1507
{
1508 1509
	struct vcpu_svm *svm = to_svm(vcpu);

1510 1511
	svm->vmcb->save.gdtr.limit = dt->size;
	svm->vmcb->save.gdtr.base = dt->address ;
1512
	mark_dirty(svm->vmcb, VMCB_DT);
A
Avi Kivity 已提交
1513 1514
}

1515 1516 1517 1518
static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
{
}

1519 1520 1521 1522
static void svm_decache_cr3(struct kvm_vcpu *vcpu)
{
}

1523
static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
1524 1525 1526
{
}

A
Avi Kivity 已提交
1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537
static void update_cr0_intercept(struct vcpu_svm *svm)
{
	ulong gcr0 = svm->vcpu.arch.cr0;
	u64 *hcr0 = &svm->vmcb->save.cr0;

	if (!svm->vcpu.fpu_active)
		*hcr0 |= SVM_CR0_SELECTIVE_MASK;
	else
		*hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
			| (gcr0 & SVM_CR0_SELECTIVE_MASK);

1538
	mark_dirty(svm->vmcb, VMCB_CR);
A
Avi Kivity 已提交
1539 1540

	if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
1541 1542
		clr_cr_intercept(svm, INTERCEPT_CR0_READ);
		clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
A
Avi Kivity 已提交
1543
	} else {
1544 1545
		set_cr_intercept(svm, INTERCEPT_CR0_READ);
		set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
A
Avi Kivity 已提交
1546 1547 1548
	}
}

A
Avi Kivity 已提交
1549 1550
static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
1551 1552
	struct vcpu_svm *svm = to_svm(vcpu);

1553
#ifdef CONFIG_X86_64
1554
	if (vcpu->arch.efer & EFER_LME) {
1555
		if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
1556
			vcpu->arch.efer |= EFER_LMA;
1557
			svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
A
Avi Kivity 已提交
1558 1559
		}

M
Mike Day 已提交
1560
		if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
1561
			vcpu->arch.efer &= ~EFER_LMA;
1562
			svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
A
Avi Kivity 已提交
1563 1564 1565
		}
	}
#endif
1566
	vcpu->arch.cr0 = cr0;
1567 1568 1569

	if (!npt_enabled)
		cr0 |= X86_CR0_PG | X86_CR0_WP;
1570 1571

	if (!vcpu->fpu_active)
J
Joerg Roedel 已提交
1572
		cr0 |= X86_CR0_TS;
1573 1574 1575 1576 1577 1578
	/*
	 * re-enable caching here because the QEMU bios
	 * does not do it - this results in some delay at
	 * reboot
	 */
	cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1579
	svm->vmcb->save.cr0 = cr0;
1580
	mark_dirty(svm->vmcb, VMCB_CR);
A
Avi Kivity 已提交
1581
	update_cr0_intercept(svm);
A
Avi Kivity 已提交
1582 1583
}

1584
static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
A
Avi Kivity 已提交
1585
{
1586
	unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
1587 1588
	unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;

1589 1590 1591
	if (cr4 & X86_CR4_VMXE)
		return 1;

1592
	if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
1593
		svm_flush_tlb(vcpu);
1594

1595 1596 1597
	vcpu->arch.cr4 = cr4;
	if (!npt_enabled)
		cr4 |= X86_CR4_PAE;
1598
	cr4 |= host_cr4_mce;
1599
	to_svm(vcpu)->vmcb->save.cr4 = cr4;
1600
	mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
1601
	return 0;
A
Avi Kivity 已提交
1602 1603 1604 1605 1606
}

static void svm_set_segment(struct kvm_vcpu *vcpu,
			    struct kvm_segment *var, int seg)
{
1607
	struct vcpu_svm *svm = to_svm(vcpu);
A
Avi Kivity 已提交
1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624
	struct vmcb_seg *s = svm_seg(vcpu, seg);

	s->base = var->base;
	s->limit = var->limit;
	s->selector = var->selector;
	if (var->unusable)
		s->attrib = 0;
	else {
		s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
		s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
		s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
		s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
		s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
		s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
		s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
		s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
	}
P
Paolo Bonzini 已提交
1625 1626 1627 1628 1629 1630 1631 1632 1633

	/*
	 * This is always accurate, except if SYSRET returned to a segment
	 * with SS.DPL != 3.  Intel does not have this quirk, and always
	 * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
	 * would entail passing the CPL to userspace and back.
	 */
	if (seg == VCPU_SREG_SS)
		svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
A
Avi Kivity 已提交
1634

1635
	mark_dirty(svm->vmcb, VMCB_SEG);
A
Avi Kivity 已提交
1636 1637
}

1638
static void update_db_bp_intercept(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
1639
{
J
Jan Kiszka 已提交
1640 1641
	struct vcpu_svm *svm = to_svm(vcpu);

1642 1643
	clr_exception_intercept(svm, DB_VECTOR);
	clr_exception_intercept(svm, BP_VECTOR);
1644

J
Jan Kiszka 已提交
1645
	if (svm->nmi_singlestep)
1646
		set_exception_intercept(svm, DB_VECTOR);
1647

J
Jan Kiszka 已提交
1648 1649 1650
	if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
		if (vcpu->guest_debug &
		    (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
1651
			set_exception_intercept(svm, DB_VECTOR);
J
Jan Kiszka 已提交
1652
		if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1653
			set_exception_intercept(svm, BP_VECTOR);
J
Jan Kiszka 已提交
1654 1655
	} else
		vcpu->guest_debug = 0;
1656 1657
}

1658
static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
A
Avi Kivity 已提交
1659
{
1660 1661 1662
	if (sd->next_asid > sd->max_asid) {
		++sd->asid_generation;
		sd->next_asid = 1;
1663
		svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
A
Avi Kivity 已提交
1664 1665
	}

1666 1667
	svm->asid_generation = sd->asid_generation;
	svm->vmcb->control.asid = sd->next_asid++;
1668 1669

	mark_dirty(svm->vmcb, VMCB_ASID);
A
Avi Kivity 已提交
1670 1671
}

J
Jan Kiszka 已提交
1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684
static u64 svm_get_dr6(struct kvm_vcpu *vcpu)
{
	return to_svm(vcpu)->vmcb->save.dr6;
}

static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	svm->vmcb->save.dr6 = value;
	mark_dirty(svm->vmcb, VMCB_DR);
}

1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699
static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	get_debugreg(vcpu->arch.db[0], 0);
	get_debugreg(vcpu->arch.db[1], 1);
	get_debugreg(vcpu->arch.db[2], 2);
	get_debugreg(vcpu->arch.db[3], 3);
	vcpu->arch.dr6 = svm_get_dr6(vcpu);
	vcpu->arch.dr7 = svm->vmcb->save.dr7;

	vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
	set_dr_intercepts(svm);
}

1700
static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
A
Avi Kivity 已提交
1701
{
1702 1703
	struct vcpu_svm *svm = to_svm(vcpu);

1704
	svm->vmcb->save.dr7 = value;
1705
	mark_dirty(svm->vmcb, VMCB_DR);
A
Avi Kivity 已提交
1706 1707
}

A
Avi Kivity 已提交
1708
static int pf_interception(struct vcpu_svm *svm)
A
Avi Kivity 已提交
1709
{
G
Gleb Natapov 已提交
1710
	u64 fault_address = svm->vmcb->control.exit_info_2;
A
Avi Kivity 已提交
1711
	u32 error_code;
G
Gleb Natapov 已提交
1712
	int r = 1;
A
Avi Kivity 已提交
1713

G
Gleb Natapov 已提交
1714 1715 1716
	switch (svm->apf_reason) {
	default:
		error_code = svm->vmcb->control.exit_info_1;
1717

G
Gleb Natapov 已提交
1718 1719 1720
		trace_kvm_page_fault(fault_address, error_code);
		if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
			kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
1721 1722 1723
		r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
			svm->vmcb->control.insn_bytes,
			svm->vmcb->control.insn_len);
G
Gleb Natapov 已提交
1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738
		break;
	case KVM_PV_REASON_PAGE_NOT_PRESENT:
		svm->apf_reason = 0;
		local_irq_disable();
		kvm_async_pf_task_wait(fault_address);
		local_irq_enable();
		break;
	case KVM_PV_REASON_PAGE_READY:
		svm->apf_reason = 0;
		local_irq_disable();
		kvm_async_pf_task_wake(fault_address);
		local_irq_enable();
		break;
	}
	return r;
A
Avi Kivity 已提交
1739 1740
}

A
Avi Kivity 已提交
1741
static int db_interception(struct vcpu_svm *svm)
J
Jan Kiszka 已提交
1742
{
A
Avi Kivity 已提交
1743 1744
	struct kvm_run *kvm_run = svm->vcpu.run;

J
Jan Kiszka 已提交
1745
	if (!(svm->vcpu.guest_debug &
1746
	      (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
J
Jan Kiszka 已提交
1747
		!svm->nmi_singlestep) {
J
Jan Kiszka 已提交
1748 1749 1750
		kvm_queue_exception(&svm->vcpu, DB_VECTOR);
		return 1;
	}
1751

J
Jan Kiszka 已提交
1752 1753
	if (svm->nmi_singlestep) {
		svm->nmi_singlestep = false;
1754 1755 1756
		if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
			svm->vmcb->save.rflags &=
				~(X86_EFLAGS_TF | X86_EFLAGS_RF);
1757
		update_db_bp_intercept(&svm->vcpu);
1758 1759 1760
	}

	if (svm->vcpu.guest_debug &
J
Joerg Roedel 已提交
1761
	    (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
1762 1763 1764 1765 1766 1767 1768 1769
		kvm_run->exit_reason = KVM_EXIT_DEBUG;
		kvm_run->debug.arch.pc =
			svm->vmcb->save.cs.base + svm->vmcb->save.rip;
		kvm_run->debug.arch.exception = DB_VECTOR;
		return 0;
	}

	return 1;
J
Jan Kiszka 已提交
1770 1771
}

A
Avi Kivity 已提交
1772
static int bp_interception(struct vcpu_svm *svm)
J
Jan Kiszka 已提交
1773
{
A
Avi Kivity 已提交
1774 1775
	struct kvm_run *kvm_run = svm->vcpu.run;

J
Jan Kiszka 已提交
1776 1777 1778 1779 1780 1781
	kvm_run->exit_reason = KVM_EXIT_DEBUG;
	kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
	kvm_run->debug.arch.exception = BP_VECTOR;
	return 0;
}

A
Avi Kivity 已提交
1782
static int ud_interception(struct vcpu_svm *svm)
1783 1784 1785
{
	int er;

1786
	er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
1787
	if (er != EMULATE_DONE)
1788
		kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1789 1790 1791
	return 1;
}

A
Avi Kivity 已提交
1792
static void svm_fpu_activate(struct kvm_vcpu *vcpu)
A
Anthony Liguori 已提交
1793
{
A
Avi Kivity 已提交
1794
	struct vcpu_svm *svm = to_svm(vcpu);
1795

1796
	clr_exception_intercept(svm, NM_VECTOR);
1797

R
Rusty Russell 已提交
1798
	svm->vcpu.fpu_active = 1;
A
Avi Kivity 已提交
1799
	update_cr0_intercept(svm);
A
Avi Kivity 已提交
1800
}
1801

A
Avi Kivity 已提交
1802 1803 1804
static int nm_interception(struct vcpu_svm *svm)
{
	svm_fpu_activate(&svm->vcpu);
1805
	return 1;
A
Anthony Liguori 已提交
1806 1807
}

1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846
static bool is_erratum_383(void)
{
	int err, i;
	u64 value;

	if (!erratum_383_found)
		return false;

	value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
	if (err)
		return false;

	/* Bit 62 may or may not be set for this mce */
	value &= ~(1ULL << 62);

	if (value != 0xb600000000010015ULL)
		return false;

	/* Clear MCi_STATUS registers */
	for (i = 0; i < 6; ++i)
		native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);

	value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
	if (!err) {
		u32 low, high;

		value &= ~(1ULL << 2);
		low    = lower_32_bits(value);
		high   = upper_32_bits(value);

		native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
	}

	/* Flush tlb to evict multi-match entries */
	__flush_tlb_all();

	return true;
}

1847
static void svm_handle_mce(struct vcpu_svm *svm)
1848
{
1849 1850 1851 1852 1853 1854 1855
	if (is_erratum_383()) {
		/*
		 * Erratum 383 triggered. Guest state is corrupt so kill the
		 * guest.
		 */
		pr_err("KVM: Guest triggered AMD Erratum 383\n");

1856
		kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
1857 1858 1859 1860

		return;
	}

1861 1862 1863 1864 1865 1866 1867 1868
	/*
	 * On an #MC intercept the MCE handler is not called automatically in
	 * the host. So do it by hand here.
	 */
	asm volatile (
		"int $0x12\n");
	/* not sure if we ever come back to this point */

1869 1870 1871 1872 1873
	return;
}

static int mc_interception(struct vcpu_svm *svm)
{
1874 1875 1876
	return 1;
}

A
Avi Kivity 已提交
1877
static int shutdown_interception(struct vcpu_svm *svm)
1878
{
A
Avi Kivity 已提交
1879 1880
	struct kvm_run *kvm_run = svm->vcpu.run;

1881 1882 1883 1884
	/*
	 * VMCB is undefined after a SHUTDOWN intercept
	 * so reinitialize it.
	 */
1885
	clear_page(svm->vmcb);
1886
	init_vmcb(svm);
1887 1888 1889 1890 1891

	kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
	return 0;
}

A
Avi Kivity 已提交
1892
static int io_interception(struct vcpu_svm *svm)
A
Avi Kivity 已提交
1893
{
1894
	struct kvm_vcpu *vcpu = &svm->vcpu;
M
Mike Day 已提交
1895
	u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
1896
	int size, in, string;
1897
	unsigned port;
A
Avi Kivity 已提交
1898

R
Rusty Russell 已提交
1899
	++svm->vcpu.stat.io_exits;
1900
	string = (io_info & SVM_IOIO_STR_MASK) != 0;
1901
	in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
1902
	if (string || in)
1903
		return emulate_instruction(vcpu, 0) == EMULATE_DONE;
1904

1905 1906
	port = io_info >> 16;
	size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
1907
	svm->next_rip = svm->vmcb->control.exit_info_2;
1908
	skip_emulated_instruction(&svm->vcpu);
1909 1910

	return kvm_fast_pio_out(vcpu, size, port);
A
Avi Kivity 已提交
1911 1912
}

A
Avi Kivity 已提交
1913
static int nmi_interception(struct vcpu_svm *svm)
1914 1915 1916 1917
{
	return 1;
}

A
Avi Kivity 已提交
1918
static int intr_interception(struct vcpu_svm *svm)
1919 1920 1921 1922 1923
{
	++svm->vcpu.stat.irq_exits;
	return 1;
}

A
Avi Kivity 已提交
1924
static int nop_on_interception(struct vcpu_svm *svm)
A
Avi Kivity 已提交
1925 1926 1927 1928
{
	return 1;
}

A
Avi Kivity 已提交
1929
static int halt_interception(struct vcpu_svm *svm)
A
Avi Kivity 已提交
1930
{
1931
	svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
R
Rusty Russell 已提交
1932 1933
	skip_emulated_instruction(&svm->vcpu);
	return kvm_emulate_halt(&svm->vcpu);
A
Avi Kivity 已提交
1934 1935
}

A
Avi Kivity 已提交
1936
static int vmmcall_interception(struct vcpu_svm *svm)
1937
{
1938
	svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
R
Rusty Russell 已提交
1939
	skip_emulated_instruction(&svm->vcpu);
1940 1941
	kvm_emulate_hypercall(&svm->vcpu);
	return 1;
1942 1943
}

1944 1945 1946 1947 1948 1949 1950
static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	return svm->nested.nested_cr3;
}

1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964
static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	u64 cr3 = svm->nested.nested_cr3;
	u64 pdpte;
	int ret;

	ret = kvm_read_guest_page(vcpu->kvm, gpa_to_gfn(cr3), &pdpte,
				  offset_in_page(cr3) + index * 8, 8);
	if (ret)
		return 0;
	return pdpte;
}

1965 1966 1967 1968 1969 1970
static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
				   unsigned long root)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	svm->vmcb->control.nested_cr3 = root;
1971
	mark_dirty(svm->vmcb, VMCB_NPT);
1972
	svm_flush_tlb(vcpu);
1973 1974
}

1975 1976
static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
				       struct x86_exception *fault)
1977 1978 1979
{
	struct vcpu_svm *svm = to_svm(vcpu);

1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999
	if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
		/*
		 * TODO: track the cause of the nested page fault, and
		 * correctly fill in the high bits of exit_info_1.
		 */
		svm->vmcb->control.exit_code = SVM_EXIT_NPF;
		svm->vmcb->control.exit_code_hi = 0;
		svm->vmcb->control.exit_info_1 = (1ULL << 32);
		svm->vmcb->control.exit_info_2 = fault->address;
	}

	svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
	svm->vmcb->control.exit_info_1 |= fault->error_code;

	/*
	 * The present bit is always zero for page structure faults on real
	 * hardware.
	 */
	if (svm->vmcb->control.exit_info_1 & (2ULL << 32))
		svm->vmcb->control.exit_info_1 &= ~1;
2000 2001 2002 2003

	nested_svm_vmexit(svm);
}

2004
static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
2005
{
2006
	kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu);
2007 2008 2009

	vcpu->arch.mmu.set_cr3           = nested_svm_set_tdp_cr3;
	vcpu->arch.mmu.get_cr3           = nested_svm_get_tdp_cr3;
2010
	vcpu->arch.mmu.get_pdptr         = nested_svm_get_tdp_pdptr;
2011 2012 2013 2014 2015 2016 2017 2018 2019 2020
	vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
	vcpu->arch.mmu.shadow_root_level = get_npt_level();
	vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
}

static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
{
	vcpu->arch.walk_mmu = &vcpu->arch.mmu;
}

2021 2022
static int nested_svm_check_permissions(struct vcpu_svm *svm)
{
2023
	if (!(svm->vcpu.arch.efer & EFER_SVME)
2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036
	    || !is_paging(&svm->vcpu)) {
		kvm_queue_exception(&svm->vcpu, UD_VECTOR);
		return 1;
	}

	if (svm->vmcb->save.cpl) {
		kvm_inject_gp(&svm->vcpu, 0);
		return 1;
	}

       return 0;
}

2037 2038 2039
static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
				      bool has_error_code, u32 error_code)
{
2040 2041
	int vmexit;

2042
	if (!is_guest_mode(&svm->vcpu))
2043
		return 0;
2044

2045 2046 2047 2048 2049
	svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
	svm->vmcb->control.exit_code_hi = 0;
	svm->vmcb->control.exit_info_1 = error_code;
	svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;

2050 2051 2052 2053 2054
	vmexit = nested_svm_intercept(svm);
	if (vmexit == NESTED_EXIT_DONE)
		svm->nested.exit_required = true;

	return vmexit;
2055 2056
}

2057 2058
/* This function returns true if it is save to enable the irq window */
static inline bool nested_svm_intr(struct vcpu_svm *svm)
2059
{
2060
	if (!is_guest_mode(&svm->vcpu))
2061
		return true;
2062

2063
	if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
2064
		return true;
2065

2066
	if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
2067
		return false;
2068

2069 2070 2071 2072 2073 2074 2075 2076
	/*
	 * if vmexit was already requested (by intercepted exception
	 * for instance) do not overwrite it with "external interrupt"
	 * vmexit.
	 */
	if (svm->nested.exit_required)
		return false;

2077 2078 2079
	svm->vmcb->control.exit_code   = SVM_EXIT_INTR;
	svm->vmcb->control.exit_info_1 = 0;
	svm->vmcb->control.exit_info_2 = 0;
2080

2081 2082 2083
	if (svm->nested.intercept & 1ULL) {
		/*
		 * The #vmexit can't be emulated here directly because this
G
Guo Chao 已提交
2084
		 * code path runs with irqs and preemption disabled. A
2085 2086 2087 2088
		 * #vmexit emulation might sleep. Only signal request for
		 * the #vmexit here.
		 */
		svm->nested.exit_required = true;
2089
		trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
2090
		return false;
2091 2092
	}

2093
	return true;
2094 2095
}

2096 2097 2098
/* This function returns true if it is save to enable the nmi window */
static inline bool nested_svm_nmi(struct vcpu_svm *svm)
{
2099
	if (!is_guest_mode(&svm->vcpu))
2100 2101 2102 2103 2104 2105 2106 2107 2108
		return true;

	if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
		return true;

	svm->vmcb->control.exit_code = SVM_EXIT_NMI;
	svm->nested.exit_required = true;

	return false;
2109 2110
}

2111
static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
2112 2113 2114
{
	struct page *page;

2115 2116
	might_sleep();

2117 2118 2119 2120
	page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
	if (is_error_page(page))
		goto error;

2121 2122 2123
	*_page = page;

	return kmap(page);
2124 2125 2126 2127 2128 2129 2130

error:
	kvm_inject_gp(&svm->vcpu, 0);

	return NULL;
}

2131
static void nested_svm_unmap(struct page *page)
2132
{
2133
	kunmap(page);
2134 2135 2136
	kvm_release_page_dirty(page);
}

2137 2138
static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
{
2139 2140 2141
	unsigned port, size, iopm_len;
	u16 val, mask;
	u8 start_bit;
2142
	u64 gpa;
2143

2144 2145
	if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
		return NESTED_EXIT_HOST;
2146

2147
	port = svm->vmcb->control.exit_info_1 >> 16;
2148 2149
	size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
		SVM_IOIO_SIZE_SHIFT;
2150
	gpa  = svm->nested.vmcb_iopm + (port / 8);
2151 2152 2153 2154
	start_bit = port % 8;
	iopm_len = (start_bit + size > 8) ? 2 : 1;
	mask = (0xf >> (4 - size)) << start_bit;
	val = 0;
2155

2156 2157
	if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, iopm_len))
		return NESTED_EXIT_DONE;
2158

2159
	return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
2160 2161
}

2162
static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
2163
{
2164 2165
	u32 offset, msr, value;
	int write, mask;
2166

2167
	if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
2168
		return NESTED_EXIT_HOST;
2169

2170 2171 2172 2173
	msr    = svm->vcpu.arch.regs[VCPU_REGS_RCX];
	offset = svm_msrpm_offset(msr);
	write  = svm->vmcb->control.exit_info_1 & 1;
	mask   = 1 << ((2 * (msr & 0xf)) + write);
2174

2175 2176
	if (offset == MSR_INVALID)
		return NESTED_EXIT_DONE;
2177

2178 2179
	/* Offset is in 32 bit units but need in 8 bit units */
	offset *= 4;
2180

2181 2182
	if (kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + offset, &value, 4))
		return NESTED_EXIT_DONE;
2183

2184
	return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
2185 2186
}

2187
static int nested_svm_exit_special(struct vcpu_svm *svm)
2188 2189
{
	u32 exit_code = svm->vmcb->control.exit_code;
2190

2191 2192 2193
	switch (exit_code) {
	case SVM_EXIT_INTR:
	case SVM_EXIT_NMI:
2194
	case SVM_EXIT_EXCP_BASE + MC_VECTOR:
2195 2196
		return NESTED_EXIT_HOST;
	case SVM_EXIT_NPF:
J
Joerg Roedel 已提交
2197
		/* For now we are always handling NPFs when using them */
2198 2199 2200 2201
		if (npt_enabled)
			return NESTED_EXIT_HOST;
		break;
	case SVM_EXIT_EXCP_BASE + PF_VECTOR:
G
Gleb Natapov 已提交
2202 2203
		/* When we're shadowing, trap PFs, but not async PF */
		if (!npt_enabled && svm->apf_reason == 0)
2204 2205
			return NESTED_EXIT_HOST;
		break;
2206 2207 2208
	case SVM_EXIT_EXCP_BASE + NM_VECTOR:
		nm_interception(svm);
		break;
2209 2210
	default:
		break;
2211 2212
	}

2213 2214 2215 2216 2217 2218
	return NESTED_EXIT_CONTINUE;
}

/*
 * If this function returns true, this #vmexit was already handled
 */
2219
static int nested_svm_intercept(struct vcpu_svm *svm)
2220 2221 2222 2223
{
	u32 exit_code = svm->vmcb->control.exit_code;
	int vmexit = NESTED_EXIT_HOST;

2224
	switch (exit_code) {
2225
	case SVM_EXIT_MSR:
2226
		vmexit = nested_svm_exit_handled_msr(svm);
2227
		break;
2228 2229 2230
	case SVM_EXIT_IOIO:
		vmexit = nested_svm_intercept_ioio(svm);
		break;
2231 2232 2233
	case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
		u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
		if (svm->nested.intercept_cr & bit)
2234
			vmexit = NESTED_EXIT_DONE;
2235 2236
		break;
	}
2237 2238 2239
	case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
		u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
		if (svm->nested.intercept_dr & bit)
2240
			vmexit = NESTED_EXIT_DONE;
2241 2242 2243 2244
		break;
	}
	case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
		u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
J
Joerg Roedel 已提交
2245
		if (svm->nested.intercept_exceptions & excp_bits)
2246
			vmexit = NESTED_EXIT_DONE;
G
Gleb Natapov 已提交
2247 2248 2249 2250
		/* async page fault always cause vmexit */
		else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
			 svm->apf_reason != 0)
			vmexit = NESTED_EXIT_DONE;
2251 2252
		break;
	}
2253 2254 2255 2256
	case SVM_EXIT_ERR: {
		vmexit = NESTED_EXIT_DONE;
		break;
	}
2257 2258
	default: {
		u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
J
Joerg Roedel 已提交
2259
		if (svm->nested.intercept & exit_bits)
2260
			vmexit = NESTED_EXIT_DONE;
2261 2262 2263
	}
	}

2264 2265 2266 2267 2268 2269 2270 2271 2272 2273
	return vmexit;
}

static int nested_svm_exit_handled(struct vcpu_svm *svm)
{
	int vmexit;

	vmexit = nested_svm_intercept(svm);

	if (vmexit == NESTED_EXIT_DONE)
2274 2275 2276
		nested_svm_vmexit(svm);

	return vmexit;
2277 2278
}

2279 2280 2281 2282 2283
static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
{
	struct vmcb_control_area *dst  = &dst_vmcb->control;
	struct vmcb_control_area *from = &from_vmcb->control;

2284
	dst->intercept_cr         = from->intercept_cr;
2285
	dst->intercept_dr         = from->intercept_dr;
2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308
	dst->intercept_exceptions = from->intercept_exceptions;
	dst->intercept            = from->intercept;
	dst->iopm_base_pa         = from->iopm_base_pa;
	dst->msrpm_base_pa        = from->msrpm_base_pa;
	dst->tsc_offset           = from->tsc_offset;
	dst->asid                 = from->asid;
	dst->tlb_ctl              = from->tlb_ctl;
	dst->int_ctl              = from->int_ctl;
	dst->int_vector           = from->int_vector;
	dst->int_state            = from->int_state;
	dst->exit_code            = from->exit_code;
	dst->exit_code_hi         = from->exit_code_hi;
	dst->exit_info_1          = from->exit_info_1;
	dst->exit_info_2          = from->exit_info_2;
	dst->exit_int_info        = from->exit_int_info;
	dst->exit_int_info_err    = from->exit_int_info_err;
	dst->nested_ctl           = from->nested_ctl;
	dst->event_inj            = from->event_inj;
	dst->event_inj_err        = from->event_inj_err;
	dst->nested_cr3           = from->nested_cr3;
	dst->lbr_ctl              = from->lbr_ctl;
}

2309
static int nested_svm_vmexit(struct vcpu_svm *svm)
2310
{
2311
	struct vmcb *nested_vmcb;
2312
	struct vmcb *hsave = svm->nested.hsave;
J
Joerg Roedel 已提交
2313
	struct vmcb *vmcb = svm->vmcb;
2314
	struct page *page;
2315

2316 2317 2318 2319
	trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
				       vmcb->control.exit_info_1,
				       vmcb->control.exit_info_2,
				       vmcb->control.exit_int_info,
2320 2321
				       vmcb->control.exit_int_info_err,
				       KVM_ISA_SVM);
2322

2323
	nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
2324 2325 2326
	if (!nested_vmcb)
		return 1;

2327 2328
	/* Exit Guest-Mode */
	leave_guest_mode(&svm->vcpu);
2329 2330
	svm->nested.vmcb = 0;

2331
	/* Give the current vmcb to the guest */
J
Joerg Roedel 已提交
2332 2333 2334 2335 2336 2337 2338 2339
	disable_gif(svm);

	nested_vmcb->save.es     = vmcb->save.es;
	nested_vmcb->save.cs     = vmcb->save.cs;
	nested_vmcb->save.ss     = vmcb->save.ss;
	nested_vmcb->save.ds     = vmcb->save.ds;
	nested_vmcb->save.gdtr   = vmcb->save.gdtr;
	nested_vmcb->save.idtr   = vmcb->save.idtr;
2340
	nested_vmcb->save.efer   = svm->vcpu.arch.efer;
2341
	nested_vmcb->save.cr0    = kvm_read_cr0(&svm->vcpu);
2342
	nested_vmcb->save.cr3    = kvm_read_cr3(&svm->vcpu);
J
Joerg Roedel 已提交
2343
	nested_vmcb->save.cr2    = vmcb->save.cr2;
2344
	nested_vmcb->save.cr4    = svm->vcpu.arch.cr4;
2345
	nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
J
Joerg Roedel 已提交
2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361
	nested_vmcb->save.rip    = vmcb->save.rip;
	nested_vmcb->save.rsp    = vmcb->save.rsp;
	nested_vmcb->save.rax    = vmcb->save.rax;
	nested_vmcb->save.dr7    = vmcb->save.dr7;
	nested_vmcb->save.dr6    = vmcb->save.dr6;
	nested_vmcb->save.cpl    = vmcb->save.cpl;

	nested_vmcb->control.int_ctl           = vmcb->control.int_ctl;
	nested_vmcb->control.int_vector        = vmcb->control.int_vector;
	nested_vmcb->control.int_state         = vmcb->control.int_state;
	nested_vmcb->control.exit_code         = vmcb->control.exit_code;
	nested_vmcb->control.exit_code_hi      = vmcb->control.exit_code_hi;
	nested_vmcb->control.exit_info_1       = vmcb->control.exit_info_1;
	nested_vmcb->control.exit_info_2       = vmcb->control.exit_info_2;
	nested_vmcb->control.exit_int_info     = vmcb->control.exit_int_info;
	nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
2362
	nested_vmcb->control.next_rip          = vmcb->control.next_rip;
2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378

	/*
	 * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
	 * to make sure that we do not lose injected events. So check event_inj
	 * here and copy it to exit_int_info if it is valid.
	 * Exit_int_info and event_inj can't be both valid because the case
	 * below only happens on a VMRUN instruction intercept which has
	 * no valid exit_int_info set.
	 */
	if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
		struct vmcb_control_area *nc = &nested_vmcb->control;

		nc->exit_int_info     = vmcb->control.event_inj;
		nc->exit_int_info_err = vmcb->control.event_inj_err;
	}

J
Joerg Roedel 已提交
2379 2380 2381
	nested_vmcb->control.tlb_ctl           = 0;
	nested_vmcb->control.event_inj         = 0;
	nested_vmcb->control.event_inj_err     = 0;
2382 2383 2384 2385 2386 2387

	/* We always set V_INTR_MASKING and remember the old value in hflags */
	if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
		nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;

	/* Restore the original control entries */
2388
	copy_vmcb_control_area(vmcb, hsave);
2389

2390 2391
	kvm_clear_exception_queue(&svm->vcpu);
	kvm_clear_interrupt_queue(&svm->vcpu);
2392

2393 2394
	svm->nested.nested_cr3 = 0;

2395 2396 2397 2398 2399 2400 2401
	/* Restore selected save entries */
	svm->vmcb->save.es = hsave->save.es;
	svm->vmcb->save.cs = hsave->save.cs;
	svm->vmcb->save.ss = hsave->save.ss;
	svm->vmcb->save.ds = hsave->save.ds;
	svm->vmcb->save.gdtr = hsave->save.gdtr;
	svm->vmcb->save.idtr = hsave->save.idtr;
2402
	kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
2403 2404 2405 2406 2407 2408 2409
	svm_set_efer(&svm->vcpu, hsave->save.efer);
	svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
	svm_set_cr4(&svm->vcpu, hsave->save.cr4);
	if (npt_enabled) {
		svm->vmcb->save.cr3 = hsave->save.cr3;
		svm->vcpu.arch.cr3 = hsave->save.cr3;
	} else {
2410
		(void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
2411 2412 2413 2414 2415 2416 2417 2418
	}
	kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
	kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
	kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
	svm->vmcb->save.dr7 = 0;
	svm->vmcb->save.cpl = 0;
	svm->vmcb->control.exit_int_info = 0;

2419 2420
	mark_all_dirty(svm->vmcb);

2421
	nested_svm_unmap(page);
2422

2423
	nested_svm_uninit_mmu_context(&svm->vcpu);
2424 2425 2426 2427 2428
	kvm_mmu_reset_context(&svm->vcpu);
	kvm_mmu_load(&svm->vcpu);

	return 0;
}
A
Alexander Graf 已提交
2429

2430
static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
A
Alexander Graf 已提交
2431
{
2432 2433
	/*
	 * This function merges the msr permission bitmaps of kvm and the
G
Guo Chao 已提交
2434
	 * nested vmcb. It is optimized in that it only merges the parts where
2435 2436
	 * the kvm msr permission bitmap may contain zero bits
	 */
A
Alexander Graf 已提交
2437
	int i;
2438

2439 2440
	if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
		return true;
2441

2442 2443 2444
	for (i = 0; i < MSRPM_OFFSETS; i++) {
		u32 value, p;
		u64 offset;
2445

2446 2447
		if (msrpm_offsets[i] == 0xffffffff)
			break;
A
Alexander Graf 已提交
2448

2449 2450
		p      = msrpm_offsets[i];
		offset = svm->nested.vmcb_msrpm + (p * 4);
2451 2452 2453 2454 2455 2456

		if (kvm_read_guest(svm->vcpu.kvm, offset, &value, 4))
			return false;

		svm->nested.msrpm[p] = svm->msrpm[p] | value;
	}
A
Alexander Graf 已提交
2457

2458
	svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
2459 2460

	return true;
A
Alexander Graf 已提交
2461 2462
}

2463 2464 2465 2466 2467
static bool nested_vmcb_checks(struct vmcb *vmcb)
{
	if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
		return false;

2468 2469 2470
	if (vmcb->control.asid == 0)
		return false;

2471 2472 2473
	if (vmcb->control.nested_ctl && !npt_enabled)
		return false;

2474 2475 2476
	return true;
}

2477
static bool nested_svm_vmrun(struct vcpu_svm *svm)
A
Alexander Graf 已提交
2478
{
2479
	struct vmcb *nested_vmcb;
2480
	struct vmcb *hsave = svm->nested.hsave;
J
Joerg Roedel 已提交
2481
	struct vmcb *vmcb = svm->vmcb;
2482
	struct page *page;
2483
	u64 vmcb_gpa;
A
Alexander Graf 已提交
2484

2485
	vmcb_gpa = svm->vmcb->save.rax;
A
Alexander Graf 已提交
2486

2487
	nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
2488 2489 2490
	if (!nested_vmcb)
		return false;

2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501
	if (!nested_vmcb_checks(nested_vmcb)) {
		nested_vmcb->control.exit_code    = SVM_EXIT_ERR;
		nested_vmcb->control.exit_code_hi = 0;
		nested_vmcb->control.exit_info_1  = 0;
		nested_vmcb->control.exit_info_2  = 0;

		nested_svm_unmap(page);

		return false;
	}

2502
	trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
2503 2504 2505 2506 2507
			       nested_vmcb->save.rip,
			       nested_vmcb->control.int_ctl,
			       nested_vmcb->control.event_inj,
			       nested_vmcb->control.nested_ctl);

2508 2509
	trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
				    nested_vmcb->control.intercept_cr >> 16,
2510 2511 2512
				    nested_vmcb->control.intercept_exceptions,
				    nested_vmcb->control.intercept);

A
Alexander Graf 已提交
2513
	/* Clear internal status */
2514 2515
	kvm_clear_exception_queue(&svm->vcpu);
	kvm_clear_interrupt_queue(&svm->vcpu);
A
Alexander Graf 已提交
2516

J
Joerg Roedel 已提交
2517 2518 2519 2520
	/*
	 * Save the old vmcb, so we don't need to pick what we save, but can
	 * restore everything when a VMEXIT occurs
	 */
J
Joerg Roedel 已提交
2521 2522 2523 2524 2525 2526
	hsave->save.es     = vmcb->save.es;
	hsave->save.cs     = vmcb->save.cs;
	hsave->save.ss     = vmcb->save.ss;
	hsave->save.ds     = vmcb->save.ds;
	hsave->save.gdtr   = vmcb->save.gdtr;
	hsave->save.idtr   = vmcb->save.idtr;
2527
	hsave->save.efer   = svm->vcpu.arch.efer;
2528
	hsave->save.cr0    = kvm_read_cr0(&svm->vcpu);
J
Joerg Roedel 已提交
2529
	hsave->save.cr4    = svm->vcpu.arch.cr4;
2530
	hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
2531
	hsave->save.rip    = kvm_rip_read(&svm->vcpu);
J
Joerg Roedel 已提交
2532 2533 2534 2535 2536
	hsave->save.rsp    = vmcb->save.rsp;
	hsave->save.rax    = vmcb->save.rax;
	if (npt_enabled)
		hsave->save.cr3    = vmcb->save.cr3;
	else
2537
		hsave->save.cr3    = kvm_read_cr3(&svm->vcpu);
J
Joerg Roedel 已提交
2538

2539
	copy_vmcb_control_area(hsave, vmcb);
A
Alexander Graf 已提交
2540

2541
	if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
A
Alexander Graf 已提交
2542 2543 2544 2545
		svm->vcpu.arch.hflags |= HF_HIF_MASK;
	else
		svm->vcpu.arch.hflags &= ~HF_HIF_MASK;

2546 2547 2548 2549 2550 2551
	if (nested_vmcb->control.nested_ctl) {
		kvm_mmu_unload(&svm->vcpu);
		svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
		nested_svm_init_mmu_context(&svm->vcpu);
	}

A
Alexander Graf 已提交
2552 2553 2554 2555 2556 2557 2558
	/* Load the nested guest state */
	svm->vmcb->save.es = nested_vmcb->save.es;
	svm->vmcb->save.cs = nested_vmcb->save.cs;
	svm->vmcb->save.ss = nested_vmcb->save.ss;
	svm->vmcb->save.ds = nested_vmcb->save.ds;
	svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
	svm->vmcb->save.idtr = nested_vmcb->save.idtr;
2559
	kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
A
Alexander Graf 已提交
2560 2561 2562 2563 2564 2565
	svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
	svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
	svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
	if (npt_enabled) {
		svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
		svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
2566
	} else
2567
		(void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
2568 2569 2570 2571

	/* Guest paging mode is active - reset mmu */
	kvm_mmu_reset_context(&svm->vcpu);

J
Joerg Roedel 已提交
2572
	svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
A
Alexander Graf 已提交
2573 2574 2575
	kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
	kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
	kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
J
Joerg Roedel 已提交
2576

A
Alexander Graf 已提交
2577 2578 2579 2580 2581 2582 2583 2584
	/* In case we don't even reach vcpu_run, the fields are not updated */
	svm->vmcb->save.rax = nested_vmcb->save.rax;
	svm->vmcb->save.rsp = nested_vmcb->save.rsp;
	svm->vmcb->save.rip = nested_vmcb->save.rip;
	svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
	svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
	svm->vmcb->save.cpl = nested_vmcb->save.cpl;

2585
	svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
2586
	svm->nested.vmcb_iopm  = nested_vmcb->control.iopm_base_pa  & ~0x0fffULL;
A
Alexander Graf 已提交
2587

J
Joerg Roedel 已提交
2588
	/* cache intercepts */
2589
	svm->nested.intercept_cr         = nested_vmcb->control.intercept_cr;
2590
	svm->nested.intercept_dr         = nested_vmcb->control.intercept_dr;
J
Joerg Roedel 已提交
2591 2592 2593
	svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
	svm->nested.intercept            = nested_vmcb->control.intercept;

2594
	svm_flush_tlb(&svm->vcpu);
A
Alexander Graf 已提交
2595 2596 2597 2598 2599 2600
	svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
	if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
		svm->vcpu.arch.hflags |= HF_VINTR_MASK;
	else
		svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;

2601 2602
	if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
		/* We only want the cr8 intercept bits of the guest */
2603 2604
		clr_cr_intercept(svm, INTERCEPT_CR8_READ);
		clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
2605 2606
	}

2607
	/* We don't want to see VMMCALLs from a nested guest */
2608
	clr_intercept(svm, INTERCEPT_VMMCALL);
2609

2610
	svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
A
Alexander Graf 已提交
2611 2612 2613 2614 2615 2616
	svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
	svm->vmcb->control.int_state = nested_vmcb->control.int_state;
	svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
	svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
	svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;

2617
	nested_svm_unmap(page);
2618

2619 2620 2621
	/* Enter Guest-Mode */
	enter_guest_mode(&svm->vcpu);

2622 2623 2624 2625 2626 2627
	/*
	 * Merge guest and host intercepts - must be called  with vcpu in
	 * guest-mode to take affect here
	 */
	recalc_intercepts(svm);

2628
	svm->nested.vmcb = vmcb_gpa;
2629

2630
	enable_gif(svm);
A
Alexander Graf 已提交
2631

2632 2633
	mark_all_dirty(svm->vmcb);

2634
	return true;
A
Alexander Graf 已提交
2635 2636
}

2637
static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652
{
	to_vmcb->save.fs = from_vmcb->save.fs;
	to_vmcb->save.gs = from_vmcb->save.gs;
	to_vmcb->save.tr = from_vmcb->save.tr;
	to_vmcb->save.ldtr = from_vmcb->save.ldtr;
	to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
	to_vmcb->save.star = from_vmcb->save.star;
	to_vmcb->save.lstar = from_vmcb->save.lstar;
	to_vmcb->save.cstar = from_vmcb->save.cstar;
	to_vmcb->save.sfmask = from_vmcb->save.sfmask;
	to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
	to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
	to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
}

A
Avi Kivity 已提交
2653
static int vmload_interception(struct vcpu_svm *svm)
2654
{
2655
	struct vmcb *nested_vmcb;
2656
	struct page *page;
2657

2658 2659 2660
	if (nested_svm_check_permissions(svm))
		return 1;

2661
	nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
2662 2663 2664
	if (!nested_vmcb)
		return 1;

2665 2666 2667
	svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
	skip_emulated_instruction(&svm->vcpu);

2668
	nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
2669
	nested_svm_unmap(page);
2670 2671 2672 2673

	return 1;
}

A
Avi Kivity 已提交
2674
static int vmsave_interception(struct vcpu_svm *svm)
2675
{
2676
	struct vmcb *nested_vmcb;
2677
	struct page *page;
2678

2679 2680 2681
	if (nested_svm_check_permissions(svm))
		return 1;

2682
	nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
2683 2684 2685
	if (!nested_vmcb)
		return 1;

2686 2687 2688
	svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
	skip_emulated_instruction(&svm->vcpu);

2689
	nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
2690
	nested_svm_unmap(page);
2691 2692 2693 2694

	return 1;
}

A
Avi Kivity 已提交
2695
static int vmrun_interception(struct vcpu_svm *svm)
A
Alexander Graf 已提交
2696 2697 2698 2699
{
	if (nested_svm_check_permissions(svm))
		return 1;

2700 2701
	/* Save rip after vmrun instruction */
	kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3);
A
Alexander Graf 已提交
2702

2703
	if (!nested_svm_vmrun(svm))
A
Alexander Graf 已提交
2704 2705
		return 1;

2706
	if (!nested_svm_vmrun_msrpm(svm))
2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718
		goto failed;

	return 1;

failed:

	svm->vmcb->control.exit_code    = SVM_EXIT_ERR;
	svm->vmcb->control.exit_code_hi = 0;
	svm->vmcb->control.exit_info_1  = 0;
	svm->vmcb->control.exit_info_2  = 0;

	nested_svm_vmexit(svm);
A
Alexander Graf 已提交
2719 2720 2721 2722

	return 1;
}

A
Avi Kivity 已提交
2723
static int stgi_interception(struct vcpu_svm *svm)
2724 2725 2726 2727 2728 2729
{
	if (nested_svm_check_permissions(svm))
		return 1;

	svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
	skip_emulated_instruction(&svm->vcpu);
2730
	kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2731

2732
	enable_gif(svm);
2733 2734 2735 2736

	return 1;
}

A
Avi Kivity 已提交
2737
static int clgi_interception(struct vcpu_svm *svm)
2738 2739 2740 2741 2742 2743 2744
{
	if (nested_svm_check_permissions(svm))
		return 1;

	svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
	skip_emulated_instruction(&svm->vcpu);

2745
	disable_gif(svm);
2746 2747 2748 2749 2750

	/* After a CLGI no interrupts should come */
	svm_clear_vintr(svm);
	svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;

2751 2752
	mark_dirty(svm->vmcb, VMCB_INTR);

2753 2754 2755
	return 1;
}

A
Avi Kivity 已提交
2756
static int invlpga_interception(struct vcpu_svm *svm)
A
Alexander Graf 已提交
2757 2758 2759
{
	struct kvm_vcpu *vcpu = &svm->vcpu;

2760 2761 2762
	trace_kvm_invlpga(svm->vmcb->save.rip, vcpu->arch.regs[VCPU_REGS_RCX],
			  vcpu->arch.regs[VCPU_REGS_RAX]);

A
Alexander Graf 已提交
2763 2764 2765 2766 2767 2768 2769 2770
	/* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
	kvm_mmu_invlpg(vcpu, vcpu->arch.regs[VCPU_REGS_RAX]);

	svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
	skip_emulated_instruction(&svm->vcpu);
	return 1;
}

2771 2772 2773 2774 2775 2776 2777 2778
static int skinit_interception(struct vcpu_svm *svm)
{
	trace_kvm_skinit(svm->vmcb->save.rip, svm->vcpu.arch.regs[VCPU_REGS_RAX]);

	kvm_queue_exception(&svm->vcpu, UD_VECTOR);
	return 1;
}

J
Joerg Roedel 已提交
2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791
static int xsetbv_interception(struct vcpu_svm *svm)
{
	u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
	u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);

	if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
		svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
		skip_emulated_instruction(&svm->vcpu);
	}

	return 1;
}

A
Avi Kivity 已提交
2792
static int task_switch_interception(struct vcpu_svm *svm)
A
Avi Kivity 已提交
2793
{
2794
	u16 tss_selector;
2795 2796 2797
	int reason;
	int int_type = svm->vmcb->control.exit_int_info &
		SVM_EXITINTINFO_TYPE_MASK;
2798
	int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
2799 2800 2801 2802
	uint32_t type =
		svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
	uint32_t idt_v =
		svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
2803 2804
	bool has_error_code = false;
	u32 error_code = 0;
2805 2806

	tss_selector = (u16)svm->vmcb->control.exit_info_1;
2807

2808 2809
	if (svm->vmcb->control.exit_info_2 &
	    (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
2810 2811 2812 2813
		reason = TASK_SWITCH_IRET;
	else if (svm->vmcb->control.exit_info_2 &
		 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
		reason = TASK_SWITCH_JMP;
2814
	else if (idt_v)
2815 2816 2817 2818
		reason = TASK_SWITCH_GATE;
	else
		reason = TASK_SWITCH_CALL;

2819 2820 2821 2822 2823 2824
	if (reason == TASK_SWITCH_GATE) {
		switch (type) {
		case SVM_EXITINTINFO_TYPE_NMI:
			svm->vcpu.arch.nmi_injected = false;
			break;
		case SVM_EXITINTINFO_TYPE_EXEPT:
2825 2826 2827 2828 2829 2830
			if (svm->vmcb->control.exit_info_2 &
			    (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
				has_error_code = true;
				error_code =
					(u32)svm->vmcb->control.exit_info_2;
			}
2831 2832 2833 2834 2835 2836 2837 2838 2839
			kvm_clear_exception_queue(&svm->vcpu);
			break;
		case SVM_EXITINTINFO_TYPE_INTR:
			kvm_clear_interrupt_queue(&svm->vcpu);
			break;
		default:
			break;
		}
	}
2840

2841 2842 2843
	if (reason != TASK_SWITCH_GATE ||
	    int_type == SVM_EXITINTINFO_TYPE_SOFT ||
	    (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
2844 2845
	     (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
		skip_emulated_instruction(&svm->vcpu);
2846

2847 2848 2849 2850
	if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
		int_vec = -1;

	if (kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason,
2851 2852 2853 2854 2855 2856 2857
				has_error_code, error_code) == EMULATE_FAIL) {
		svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
		svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
		svm->vcpu.run->internal.ndata = 0;
		return 0;
	}
	return 1;
A
Avi Kivity 已提交
2858 2859
}

A
Avi Kivity 已提交
2860
static int cpuid_interception(struct vcpu_svm *svm)
A
Avi Kivity 已提交
2861
{
2862
	svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
R
Rusty Russell 已提交
2863
	kvm_emulate_cpuid(&svm->vcpu);
2864
	return 1;
A
Avi Kivity 已提交
2865 2866
}

A
Avi Kivity 已提交
2867
static int iret_interception(struct vcpu_svm *svm)
2868 2869
{
	++svm->vcpu.stat.nmi_window_exits;
2870
	clr_intercept(svm, INTERCEPT_IRET);
2871
	svm->vcpu.arch.hflags |= HF_IRET_MASK;
2872
	svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
2873
	kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2874 2875 2876
	return 1;
}

A
Avi Kivity 已提交
2877
static int invlpg_interception(struct vcpu_svm *svm)
M
Marcelo Tosatti 已提交
2878
{
2879 2880 2881 2882 2883 2884
	if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
		return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;

	kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
	skip_emulated_instruction(&svm->vcpu);
	return 1;
M
Marcelo Tosatti 已提交
2885 2886
}

A
Avi Kivity 已提交
2887
static int emulate_on_interception(struct vcpu_svm *svm)
A
Avi Kivity 已提交
2888
{
2889
	return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
A
Avi Kivity 已提交
2890 2891
}

A
Avi Kivity 已提交
2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904
static int rdpmc_interception(struct vcpu_svm *svm)
{
	int err;

	if (!static_cpu_has(X86_FEATURE_NRIPS))
		return emulate_on_interception(svm);

	err = kvm_rdpmc(&svm->vcpu);
	kvm_complete_insn_gp(&svm->vcpu, err);

	return 1;
}

2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927
bool check_selective_cr0_intercepted(struct vcpu_svm *svm, unsigned long val)
{
	unsigned long cr0 = svm->vcpu.arch.cr0;
	bool ret = false;
	u64 intercept;

	intercept = svm->nested.intercept;

	if (!is_guest_mode(&svm->vcpu) ||
	    (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
		return false;

	cr0 &= ~SVM_CR0_SELECTIVE_MASK;
	val &= ~SVM_CR0_SELECTIVE_MASK;

	if (cr0 ^ val) {
		svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
		ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
	}

	return ret;
}

2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950
#define CR_VALID (1ULL << 63)

static int cr_interception(struct vcpu_svm *svm)
{
	int reg, cr;
	unsigned long val;
	int err;

	if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
		return emulate_on_interception(svm);

	if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
		return emulate_on_interception(svm);

	reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
	cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;

	err = 0;
	if (cr >= 16) { /* mov to cr */
		cr -= 16;
		val = kvm_register_read(&svm->vcpu, reg);
		switch (cr) {
		case 0:
2951 2952
			if (!check_selective_cr0_intercepted(svm, val))
				err = kvm_set_cr0(&svm->vcpu, val);
2953 2954 2955
			else
				return 1;

2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979
			break;
		case 3:
			err = kvm_set_cr3(&svm->vcpu, val);
			break;
		case 4:
			err = kvm_set_cr4(&svm->vcpu, val);
			break;
		case 8:
			err = kvm_set_cr8(&svm->vcpu, val);
			break;
		default:
			WARN(1, "unhandled write to CR%d", cr);
			kvm_queue_exception(&svm->vcpu, UD_VECTOR);
			return 1;
		}
	} else { /* mov from cr */
		switch (cr) {
		case 0:
			val = kvm_read_cr0(&svm->vcpu);
			break;
		case 2:
			val = svm->vcpu.arch.cr2;
			break;
		case 3:
2980
			val = kvm_read_cr3(&svm->vcpu);
2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999
			break;
		case 4:
			val = kvm_read_cr4(&svm->vcpu);
			break;
		case 8:
			val = kvm_get_cr8(&svm->vcpu);
			break;
		default:
			WARN(1, "unhandled read from CR%d", cr);
			kvm_queue_exception(&svm->vcpu, UD_VECTOR);
			return 1;
		}
		kvm_register_write(&svm->vcpu, reg, val);
	}
	kvm_complete_insn_gp(&svm->vcpu, err);

	return 1;
}

3000 3001 3002 3003 3004
static int dr_interception(struct vcpu_svm *svm)
{
	int reg, dr;
	unsigned long val;

3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015
	if (svm->vcpu.guest_debug == 0) {
		/*
		 * No more DR vmexits; force a reload of the debug registers
		 * and reenter on this instruction.  The next vmexit will
		 * retrieve the full state of the debug registers.
		 */
		clr_dr_intercepts(svm);
		svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
		return 1;
	}

3016 3017 3018 3019 3020 3021 3022
	if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
		return emulate_on_interception(svm);

	reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
	dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;

	if (dr >= 16) { /* mov to DRn */
3023 3024
		if (!kvm_require_dr(&svm->vcpu, dr - 16))
			return 1;
3025 3026 3027
		val = kvm_register_read(&svm->vcpu, reg);
		kvm_set_dr(&svm->vcpu, dr - 16, val);
	} else {
3028 3029 3030 3031
		if (!kvm_require_dr(&svm->vcpu, dr))
			return 1;
		kvm_get_dr(&svm->vcpu, dr, &val);
		kvm_register_write(&svm->vcpu, reg, val);
3032 3033
	}

3034 3035
	skip_emulated_instruction(&svm->vcpu);

3036 3037 3038
	return 1;
}

A
Avi Kivity 已提交
3039
static int cr8_write_interception(struct vcpu_svm *svm)
3040
{
A
Avi Kivity 已提交
3041
	struct kvm_run *kvm_run = svm->vcpu.run;
A
Andre Przywara 已提交
3042
	int r;
A
Avi Kivity 已提交
3043

3044 3045
	u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
	/* instruction emulation calls kvm_set_cr8() */
3046
	r = cr_interception(svm);
3047
	if (irqchip_in_kernel(svm->vcpu.kvm))
3048
		return r;
3049
	if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
3050
		return r;
3051 3052 3053 3054
	kvm_run->exit_reason = KVM_EXIT_SET_TPR;
	return 0;
}

3055
static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
N
Nadav Har'El 已提交
3056 3057 3058
{
	struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu));
	return vmcb->control.tsc_offset +
3059
		svm_scale_tsc(vcpu, host_tsc);
N
Nadav Har'El 已提交
3060 3061
}

A
Avi Kivity 已提交
3062 3063
static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
{
3064 3065
	struct vcpu_svm *svm = to_svm(vcpu);

A
Avi Kivity 已提交
3066
	switch (ecx) {
3067
	case MSR_IA32_TSC: {
3068
		*data = svm->vmcb->control.tsc_offset +
3069 3070
			svm_scale_tsc(vcpu, native_read_tsc());

A
Avi Kivity 已提交
3071 3072
		break;
	}
B
Brian Gerst 已提交
3073
	case MSR_STAR:
3074
		*data = svm->vmcb->save.star;
A
Avi Kivity 已提交
3075
		break;
3076
#ifdef CONFIG_X86_64
A
Avi Kivity 已提交
3077
	case MSR_LSTAR:
3078
		*data = svm->vmcb->save.lstar;
A
Avi Kivity 已提交
3079 3080
		break;
	case MSR_CSTAR:
3081
		*data = svm->vmcb->save.cstar;
A
Avi Kivity 已提交
3082 3083
		break;
	case MSR_KERNEL_GS_BASE:
3084
		*data = svm->vmcb->save.kernel_gs_base;
A
Avi Kivity 已提交
3085 3086
		break;
	case MSR_SYSCALL_MASK:
3087
		*data = svm->vmcb->save.sfmask;
A
Avi Kivity 已提交
3088 3089 3090
		break;
#endif
	case MSR_IA32_SYSENTER_CS:
3091
		*data = svm->vmcb->save.sysenter_cs;
A
Avi Kivity 已提交
3092 3093
		break;
	case MSR_IA32_SYSENTER_EIP:
3094
		*data = svm->sysenter_eip;
A
Avi Kivity 已提交
3095 3096
		break;
	case MSR_IA32_SYSENTER_ESP:
3097
		*data = svm->sysenter_esp;
A
Avi Kivity 已提交
3098
		break;
J
Joerg Roedel 已提交
3099 3100 3101 3102 3103
	/*
	 * Nobody will change the following 5 values in the VMCB so we can
	 * safely return them on rdmsr. They will always be 0 until LBRV is
	 * implemented.
	 */
3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118
	case MSR_IA32_DEBUGCTLMSR:
		*data = svm->vmcb->save.dbgctl;
		break;
	case MSR_IA32_LASTBRANCHFROMIP:
		*data = svm->vmcb->save.br_from;
		break;
	case MSR_IA32_LASTBRANCHTOIP:
		*data = svm->vmcb->save.br_to;
		break;
	case MSR_IA32_LASTINTFROMIP:
		*data = svm->vmcb->save.last_excp_from;
		break;
	case MSR_IA32_LASTINTTOIP:
		*data = svm->vmcb->save.last_excp_to;
		break;
A
Alexander Graf 已提交
3119
	case MSR_VM_HSAVE_PA:
3120
		*data = svm->nested.hsave_msr;
A
Alexander Graf 已提交
3121
		break;
3122
	case MSR_VM_CR:
3123
		*data = svm->nested.vm_cr_msr;
3124
		break;
3125 3126 3127
	case MSR_IA32_UCODE_REV:
		*data = 0x01000065;
		break;
A
Avi Kivity 已提交
3128
	default:
3129
		return kvm_get_msr_common(vcpu, ecx, data);
A
Avi Kivity 已提交
3130 3131 3132 3133
	}
	return 0;
}

A
Avi Kivity 已提交
3134
static int rdmsr_interception(struct vcpu_svm *svm)
A
Avi Kivity 已提交
3135
{
3136
	u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
A
Avi Kivity 已提交
3137 3138
	u64 data;

3139 3140
	if (svm_get_msr(&svm->vcpu, ecx, &data)) {
		trace_kvm_msr_read_ex(ecx);
3141
		kvm_inject_gp(&svm->vcpu, 0);
3142
	} else {
3143
		trace_kvm_msr_read(ecx, data);
3144

3145
		svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
3146
		svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
3147
		svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
R
Rusty Russell 已提交
3148
		skip_emulated_instruction(&svm->vcpu);
A
Avi Kivity 已提交
3149 3150 3151 3152
	}
	return 1;
}

3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177
static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	int svm_dis, chg_mask;

	if (data & ~SVM_VM_CR_VALID_MASK)
		return 1;

	chg_mask = SVM_VM_CR_VALID_MASK;

	if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
		chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);

	svm->nested.vm_cr_msr &= ~chg_mask;
	svm->nested.vm_cr_msr |= (data & chg_mask);

	svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;

	/* check for svm_disable while efer.svme is set */
	if (svm_dis && (vcpu->arch.efer & EFER_SVME))
		return 1;

	return 0;
}

3178
static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
A
Avi Kivity 已提交
3179
{
3180 3181
	struct vcpu_svm *svm = to_svm(vcpu);

3182 3183
	u32 ecx = msr->index;
	u64 data = msr->data;
A
Avi Kivity 已提交
3184
	switch (ecx) {
3185
	case MSR_IA32_TSC:
3186
		kvm_write_tsc(vcpu, msr);
A
Avi Kivity 已提交
3187
		break;
B
Brian Gerst 已提交
3188
	case MSR_STAR:
3189
		svm->vmcb->save.star = data;
A
Avi Kivity 已提交
3190
		break;
3191
#ifdef CONFIG_X86_64
A
Avi Kivity 已提交
3192
	case MSR_LSTAR:
3193
		svm->vmcb->save.lstar = data;
A
Avi Kivity 已提交
3194 3195
		break;
	case MSR_CSTAR:
3196
		svm->vmcb->save.cstar = data;
A
Avi Kivity 已提交
3197 3198
		break;
	case MSR_KERNEL_GS_BASE:
3199
		svm->vmcb->save.kernel_gs_base = data;
A
Avi Kivity 已提交
3200 3201
		break;
	case MSR_SYSCALL_MASK:
3202
		svm->vmcb->save.sfmask = data;
A
Avi Kivity 已提交
3203 3204 3205
		break;
#endif
	case MSR_IA32_SYSENTER_CS:
3206
		svm->vmcb->save.sysenter_cs = data;
A
Avi Kivity 已提交
3207 3208
		break;
	case MSR_IA32_SYSENTER_EIP:
3209
		svm->sysenter_eip = data;
3210
		svm->vmcb->save.sysenter_eip = data;
A
Avi Kivity 已提交
3211 3212
		break;
	case MSR_IA32_SYSENTER_ESP:
3213
		svm->sysenter_esp = data;
3214
		svm->vmcb->save.sysenter_esp = data;
A
Avi Kivity 已提交
3215
		break;
3216
	case MSR_IA32_DEBUGCTLMSR:
3217
		if (!boot_cpu_has(X86_FEATURE_LBRV)) {
3218 3219
			vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
				    __func__, data);
3220 3221 3222 3223 3224 3225
			break;
		}
		if (data & DEBUGCTL_RESERVED_BITS)
			return 1;

		svm->vmcb->save.dbgctl = data;
3226
		mark_dirty(svm->vmcb, VMCB_LBR);
3227 3228 3229 3230
		if (data & (1ULL<<0))
			svm_enable_lbrv(svm);
		else
			svm_disable_lbrv(svm);
3231
		break;
A
Alexander Graf 已提交
3232
	case MSR_VM_HSAVE_PA:
3233
		svm->nested.hsave_msr = data;
3234
		break;
3235
	case MSR_VM_CR:
3236
		return svm_set_vm_cr(vcpu, data);
3237
	case MSR_VM_IGNNE:
3238
		vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
3239
		break;
A
Avi Kivity 已提交
3240
	default:
3241
		return kvm_set_msr_common(vcpu, msr);
A
Avi Kivity 已提交
3242 3243 3244 3245
	}
	return 0;
}

A
Avi Kivity 已提交
3246
static int wrmsr_interception(struct vcpu_svm *svm)
A
Avi Kivity 已提交
3247
{
3248
	struct msr_data msr;
3249
	u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
3250
	u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
3251
		| ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
3252

3253 3254 3255
	msr.data = data;
	msr.index = ecx;
	msr.host_initiated = false;
3256

3257
	svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
3258
	if (kvm_set_msr(&svm->vcpu, &msr)) {
3259
		trace_kvm_msr_write_ex(ecx, data);
3260
		kvm_inject_gp(&svm->vcpu, 0);
3261 3262
	} else {
		trace_kvm_msr_write(ecx, data);
R
Rusty Russell 已提交
3263
		skip_emulated_instruction(&svm->vcpu);
3264
	}
A
Avi Kivity 已提交
3265 3266 3267
	return 1;
}

A
Avi Kivity 已提交
3268
static int msr_interception(struct vcpu_svm *svm)
A
Avi Kivity 已提交
3269
{
R
Rusty Russell 已提交
3270
	if (svm->vmcb->control.exit_info_1)
A
Avi Kivity 已提交
3271
		return wrmsr_interception(svm);
A
Avi Kivity 已提交
3272
	else
A
Avi Kivity 已提交
3273
		return rdmsr_interception(svm);
A
Avi Kivity 已提交
3274 3275
}

A
Avi Kivity 已提交
3276
static int interrupt_window_interception(struct vcpu_svm *svm)
3277
{
A
Avi Kivity 已提交
3278 3279
	struct kvm_run *kvm_run = svm->vcpu.run;

3280
	kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3281
	svm_clear_vintr(svm);
3282
	svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
3283
	mark_dirty(svm->vmcb, VMCB_INTR);
3284
	++svm->vcpu.stat.irq_window_exits;
3285 3286 3287 3288
	/*
	 * If the user space waits to inject interrupts, exit as soon as
	 * possible
	 */
3289 3290 3291
	if (!irqchip_in_kernel(svm->vcpu.kvm) &&
	    kvm_run->request_interrupt_window &&
	    !kvm_cpu_has_interrupt(&svm->vcpu)) {
3292 3293 3294 3295 3296 3297 3298
		kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
		return 0;
	}

	return 1;
}

3299 3300 3301 3302 3303 3304
static int pause_interception(struct vcpu_svm *svm)
{
	kvm_vcpu_on_spin(&(svm->vcpu));
	return 1;
}

3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322
static int nop_interception(struct vcpu_svm *svm)
{
	skip_emulated_instruction(&(svm->vcpu));
	return 1;
}

static int monitor_interception(struct vcpu_svm *svm)
{
	printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
	return nop_interception(svm);
}

static int mwait_interception(struct vcpu_svm *svm)
{
	printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
	return nop_interception(svm);
}

3323
static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
3324 3325 3326 3327
	[SVM_EXIT_READ_CR0]			= cr_interception,
	[SVM_EXIT_READ_CR3]			= cr_interception,
	[SVM_EXIT_READ_CR4]			= cr_interception,
	[SVM_EXIT_READ_CR8]			= cr_interception,
A
Avi Kivity 已提交
3328
	[SVM_EXIT_CR0_SEL_WRITE]		= emulate_on_interception,
3329
	[SVM_EXIT_WRITE_CR0]			= cr_interception,
3330 3331
	[SVM_EXIT_WRITE_CR3]			= cr_interception,
	[SVM_EXIT_WRITE_CR4]			= cr_interception,
J
Joerg Roedel 已提交
3332
	[SVM_EXIT_WRITE_CR8]			= cr8_write_interception,
3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348
	[SVM_EXIT_READ_DR0]			= dr_interception,
	[SVM_EXIT_READ_DR1]			= dr_interception,
	[SVM_EXIT_READ_DR2]			= dr_interception,
	[SVM_EXIT_READ_DR3]			= dr_interception,
	[SVM_EXIT_READ_DR4]			= dr_interception,
	[SVM_EXIT_READ_DR5]			= dr_interception,
	[SVM_EXIT_READ_DR6]			= dr_interception,
	[SVM_EXIT_READ_DR7]			= dr_interception,
	[SVM_EXIT_WRITE_DR0]			= dr_interception,
	[SVM_EXIT_WRITE_DR1]			= dr_interception,
	[SVM_EXIT_WRITE_DR2]			= dr_interception,
	[SVM_EXIT_WRITE_DR3]			= dr_interception,
	[SVM_EXIT_WRITE_DR4]			= dr_interception,
	[SVM_EXIT_WRITE_DR5]			= dr_interception,
	[SVM_EXIT_WRITE_DR6]			= dr_interception,
	[SVM_EXIT_WRITE_DR7]			= dr_interception,
J
Jan Kiszka 已提交
3349 3350
	[SVM_EXIT_EXCP_BASE + DB_VECTOR]	= db_interception,
	[SVM_EXIT_EXCP_BASE + BP_VECTOR]	= bp_interception,
3351
	[SVM_EXIT_EXCP_BASE + UD_VECTOR]	= ud_interception,
J
Joerg Roedel 已提交
3352 3353 3354 3355
	[SVM_EXIT_EXCP_BASE + PF_VECTOR]	= pf_interception,
	[SVM_EXIT_EXCP_BASE + NM_VECTOR]	= nm_interception,
	[SVM_EXIT_EXCP_BASE + MC_VECTOR]	= mc_interception,
	[SVM_EXIT_INTR]				= intr_interception,
3356
	[SVM_EXIT_NMI]				= nmi_interception,
A
Avi Kivity 已提交
3357 3358
	[SVM_EXIT_SMI]				= nop_on_interception,
	[SVM_EXIT_INIT]				= nop_on_interception,
3359
	[SVM_EXIT_VINTR]			= interrupt_window_interception,
A
Avi Kivity 已提交
3360
	[SVM_EXIT_RDPMC]			= rdpmc_interception,
A
Avi Kivity 已提交
3361
	[SVM_EXIT_CPUID]			= cpuid_interception,
3362
	[SVM_EXIT_IRET]                         = iret_interception,
3363
	[SVM_EXIT_INVD]                         = emulate_on_interception,
3364
	[SVM_EXIT_PAUSE]			= pause_interception,
A
Avi Kivity 已提交
3365
	[SVM_EXIT_HLT]				= halt_interception,
M
Marcelo Tosatti 已提交
3366
	[SVM_EXIT_INVLPG]			= invlpg_interception,
A
Alexander Graf 已提交
3367
	[SVM_EXIT_INVLPGA]			= invlpga_interception,
J
Joerg Roedel 已提交
3368
	[SVM_EXIT_IOIO]				= io_interception,
A
Avi Kivity 已提交
3369 3370
	[SVM_EXIT_MSR]				= msr_interception,
	[SVM_EXIT_TASK_SWITCH]			= task_switch_interception,
3371
	[SVM_EXIT_SHUTDOWN]			= shutdown_interception,
A
Alexander Graf 已提交
3372
	[SVM_EXIT_VMRUN]			= vmrun_interception,
3373
	[SVM_EXIT_VMMCALL]			= vmmcall_interception,
3374 3375
	[SVM_EXIT_VMLOAD]			= vmload_interception,
	[SVM_EXIT_VMSAVE]			= vmsave_interception,
3376 3377
	[SVM_EXIT_STGI]				= stgi_interception,
	[SVM_EXIT_CLGI]				= clgi_interception,
3378
	[SVM_EXIT_SKINIT]			= skinit_interception,
3379
	[SVM_EXIT_WBINVD]                       = emulate_on_interception,
3380 3381
	[SVM_EXIT_MONITOR]			= monitor_interception,
	[SVM_EXIT_MWAIT]			= mwait_interception,
J
Joerg Roedel 已提交
3382
	[SVM_EXIT_XSETBV]			= xsetbv_interception,
3383
	[SVM_EXIT_NPF]				= pf_interception,
A
Avi Kivity 已提交
3384 3385
};

3386
static void dump_vmcb(struct kvm_vcpu *vcpu)
3387 3388 3389 3390 3391 3392
{
	struct vcpu_svm *svm = to_svm(vcpu);
	struct vmcb_control_area *control = &svm->vmcb->control;
	struct vmcb_save_area *save = &svm->vmcb->save;

	pr_err("VMCB Control Area:\n");
3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418
	pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff);
	pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16);
	pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff);
	pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
	pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
	pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
	pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
	pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
	pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
	pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
	pr_err("%-20s%d\n", "asid:", control->asid);
	pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
	pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
	pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
	pr_err("%-20s%08x\n", "int_state:", control->int_state);
	pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
	pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
	pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
	pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
	pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
	pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
	pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
	pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
	pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
	pr_err("%-20s%lld\n", "lbr_ctl:", control->lbr_ctl);
	pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
3419
	pr_err("VMCB State Save Area:\n");
3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "es:",
	       save->es.selector, save->es.attrib,
	       save->es.limit, save->es.base);
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "cs:",
	       save->cs.selector, save->cs.attrib,
	       save->cs.limit, save->cs.base);
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "ss:",
	       save->ss.selector, save->ss.attrib,
	       save->ss.limit, save->ss.base);
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "ds:",
	       save->ds.selector, save->ds.attrib,
	       save->ds.limit, save->ds.base);
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "fs:",
	       save->fs.selector, save->fs.attrib,
	       save->fs.limit, save->fs.base);
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "gs:",
	       save->gs.selector, save->gs.attrib,
	       save->gs.limit, save->gs.base);
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "gdtr:",
	       save->gdtr.selector, save->gdtr.attrib,
	       save->gdtr.limit, save->gdtr.base);
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "ldtr:",
	       save->ldtr.selector, save->ldtr.attrib,
	       save->ldtr.limit, save->ldtr.base);
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "idtr:",
	       save->idtr.selector, save->idtr.attrib,
	       save->idtr.limit, save->idtr.base);
	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
	       "tr:",
	       save->tr.selector, save->tr.attrib,
	       save->tr.limit, save->tr.base);
3460 3461
	pr_err("cpl:            %d                efer:         %016llx\n",
		save->cpl, save->efer);
3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "cr0:", save->cr0, "cr2:", save->cr2);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "cr3:", save->cr3, "cr4:", save->cr4);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "dr6:", save->dr6, "dr7:", save->dr7);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "rip:", save->rip, "rflags:", save->rflags);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "rsp:", save->rsp, "rax:", save->rax);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "star:", save->star, "lstar:", save->lstar);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "cstar:", save->cstar, "sfmask:", save->sfmask);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "kernel_gs_base:", save->kernel_gs_base,
	       "sysenter_cs:", save->sysenter_cs);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "sysenter_esp:", save->sysenter_esp,
	       "sysenter_eip:", save->sysenter_eip);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "br_from:", save->br_from, "br_to:", save->br_to);
	pr_err("%-15s %016llx %-13s %016llx\n",
	       "excp_from:", save->last_excp_from,
	       "excp_to:", save->last_excp_to);
3489 3490
}

3491 3492 3493 3494 3495 3496 3497 3498
static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
{
	struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;

	*info1 = control->exit_info_1;
	*info2 = control->exit_info_2;
}

A
Avi Kivity 已提交
3499
static int handle_exit(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
3500
{
3501
	struct vcpu_svm *svm = to_svm(vcpu);
A
Avi Kivity 已提交
3502
	struct kvm_run *kvm_run = vcpu->run;
3503
	u32 exit_code = svm->vmcb->control.exit_code;
A
Avi Kivity 已提交
3504

3505
	if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
3506 3507 3508
		vcpu->arch.cr0 = svm->vmcb->save.cr0;
	if (npt_enabled)
		vcpu->arch.cr3 = svm->vmcb->save.cr3;
3509

3510 3511 3512 3513 3514 3515 3516
	if (unlikely(svm->nested.exit_required)) {
		nested_svm_vmexit(svm);
		svm->nested.exit_required = false;

		return 1;
	}

3517
	if (is_guest_mode(vcpu)) {
3518 3519
		int vmexit;

3520 3521 3522 3523
		trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
					svm->vmcb->control.exit_info_1,
					svm->vmcb->control.exit_info_2,
					svm->vmcb->control.exit_int_info,
3524 3525
					svm->vmcb->control.exit_int_info_err,
					KVM_ISA_SVM);
3526

3527 3528 3529 3530 3531 3532
		vmexit = nested_svm_exit_special(svm);

		if (vmexit == NESTED_EXIT_CONTINUE)
			vmexit = nested_svm_exit_handled(svm);

		if (vmexit == NESTED_EXIT_DONE)
3533 3534 3535
			return 1;
	}

3536 3537
	svm_complete_interrupts(svm);

3538 3539 3540 3541
	if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
		kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
		kvm_run->fail_entry.hardware_entry_failure_reason
			= svm->vmcb->control.exit_code;
3542 3543
		pr_err("KVM: FAILED VMRUN WITH VMCB:\n");
		dump_vmcb(vcpu);
3544 3545 3546
		return 0;
	}

3547
	if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
3548
	    exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
3549 3550
	    exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
	    exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
3551
		printk(KERN_ERR "%s: unexpected exit_int_info 0x%x "
A
Avi Kivity 已提交
3552
		       "exit_code 0x%x\n",
3553
		       __func__, svm->vmcb->control.exit_int_info,
A
Avi Kivity 已提交
3554 3555
		       exit_code);

3556
	if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
J
Joe Perches 已提交
3557
	    || !svm_exit_handlers[exit_code]) {
3558 3559 3560
		WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_code);
		kvm_queue_exception(vcpu, UD_VECTOR);
		return 1;
A
Avi Kivity 已提交
3561 3562
	}

A
Avi Kivity 已提交
3563
	return svm_exit_handlers[exit_code](svm);
A
Avi Kivity 已提交
3564 3565 3566 3567 3568 3569
}

static void reload_tss(struct kvm_vcpu *vcpu)
{
	int cpu = raw_smp_processor_id();

3570 3571
	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
	sd->tss_desc->type = 9; /* available 32/64-bit TSS */
A
Avi Kivity 已提交
3572 3573 3574
	load_TR_desc();
}

R
Rusty Russell 已提交
3575
static void pre_svm_run(struct vcpu_svm *svm)
A
Avi Kivity 已提交
3576 3577 3578
{
	int cpu = raw_smp_processor_id();

3579
	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
A
Avi Kivity 已提交
3580

3581
	/* FIXME: handle wraparound of asid_generation */
3582 3583
	if (svm->asid_generation != sd->asid_generation)
		new_asid(svm, sd);
A
Avi Kivity 已提交
3584 3585
}

3586 3587 3588 3589 3590 3591
static void svm_inject_nmi(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
	vcpu->arch.hflags |= HF_NMI_MASK;
3592
	set_intercept(svm, INTERCEPT_IRET);
3593 3594
	++vcpu->stat.nmi_injections;
}
A
Avi Kivity 已提交
3595

3596
static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
A
Avi Kivity 已提交
3597 3598 3599
{
	struct vmcb_control_area *control;

R
Rusty Russell 已提交
3600
	control = &svm->vmcb->control;
3601
	control->int_vector = irq;
A
Avi Kivity 已提交
3602 3603 3604
	control->int_ctl &= ~V_INTR_PRIO_MASK;
	control->int_ctl |= V_IRQ_MASK |
		((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
3605
	mark_dirty(svm->vmcb, VMCB_INTR);
A
Avi Kivity 已提交
3606 3607
}

3608
static void svm_set_irq(struct kvm_vcpu *vcpu)
E
Eddie Dong 已提交
3609 3610 3611
{
	struct vcpu_svm *svm = to_svm(vcpu);

3612
	BUG_ON(!(gif_set(svm)));
3613

3614 3615 3616
	trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
	++vcpu->stat.irq_injections;

3617 3618
	svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
		SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
E
Eddie Dong 已提交
3619 3620
}

3621
static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3622 3623 3624
{
	struct vcpu_svm *svm = to_svm(vcpu);

3625
	if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
3626 3627
		return;

3628 3629
	clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);

3630
	if (irr == -1)
3631 3632
		return;

3633
	if (tpr >= irr)
3634
		set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
3635
}
3636

3637 3638 3639 3640 3641
static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
{
	return;
}

3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656
static int svm_vm_has_apicv(struct kvm *kvm)
{
	return 0;
}

static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
{
	return;
}

static void svm_hwapic_isr_update(struct kvm *kvm, int isr)
{
	return;
}

3657 3658 3659 3660 3661
static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu)
{
	return;
}

3662 3663 3664 3665
static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	struct vmcb *vmcb = svm->vmcb;
J
Joerg Roedel 已提交
3666 3667 3668 3669 3670 3671
	int ret;
	ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
	      !(svm->vcpu.arch.hflags & HF_NMI_MASK);
	ret = ret && gif_set(svm) && nested_svm_nmi(svm);

	return ret;
3672 3673
}

J
Jan Kiszka 已提交
3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686
static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
}

static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	if (masked) {
		svm->vcpu.arch.hflags |= HF_NMI_MASK;
3687
		set_intercept(svm, INTERCEPT_IRET);
J
Jan Kiszka 已提交
3688 3689
	} else {
		svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
3690
		clr_intercept(svm, INTERCEPT_IRET);
J
Jan Kiszka 已提交
3691 3692 3693
	}
}

3694 3695 3696 3697
static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	struct vmcb *vmcb = svm->vmcb;
3698 3699 3700 3701 3702 3703
	int ret;

	if (!gif_set(svm) ||
	     (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
		return 0;

3704
	ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
3705

3706
	if (is_guest_mode(vcpu))
3707 3708 3709
		return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);

	return ret;
3710 3711
}

3712
static void enable_irq_window(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
3713
{
3714 3715
	struct vcpu_svm *svm = to_svm(vcpu);

J
Joerg Roedel 已提交
3716 3717 3718 3719 3720 3721
	/*
	 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
	 * 1, because that's a separate STGI/VMRUN intercept.  The next time we
	 * get that intercept, this function will be called again though and
	 * we'll get the vintr intercept.
	 */
3722
	if (gif_set(svm) && nested_svm_intr(svm)) {
3723 3724 3725
		svm_set_vintr(svm);
		svm_inject_irq(svm, 0x0);
	}
3726 3727
}

3728
static void enable_nmi_window(struct kvm_vcpu *vcpu)
3729
{
3730
	struct vcpu_svm *svm = to_svm(vcpu);
3731

3732 3733
	if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
	    == HF_NMI_MASK)
3734
		return; /* IRET will cause a vm exit */
3735

J
Joerg Roedel 已提交
3736 3737 3738 3739
	/*
	 * Something prevents NMI from been injected. Single step over possible
	 * problem (IRET or exception injection or interrupt shadow)
	 */
J
Jan Kiszka 已提交
3740
	svm->nmi_singlestep = true;
3741
	svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
3742
	update_db_bp_intercept(vcpu);
3743 3744
}

3745 3746 3747 3748 3749
static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
{
	return 0;
}

3750 3751
static void svm_flush_tlb(struct kvm_vcpu *vcpu)
{
3752 3753 3754 3755 3756 3757
	struct vcpu_svm *svm = to_svm(vcpu);

	if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
		svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
	else
		svm->asid_generation--;
3758 3759
}

3760 3761 3762 3763
static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
{
}

3764 3765 3766 3767
static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);

3768
	if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
3769 3770
		return;

3771
	if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
3772
		int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
3773
		kvm_set_cr8(vcpu, cr8);
3774 3775 3776
	}
}

3777 3778 3779 3780 3781
static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	u64 cr8;

3782
	if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
3783 3784
		return;

3785 3786 3787 3788 3789
	cr8 = kvm_get_cr8(vcpu);
	svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
	svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
}

3790 3791 3792 3793 3794
static void svm_complete_interrupts(struct vcpu_svm *svm)
{
	u8 vector;
	int type;
	u32 exitintinfo = svm->vmcb->control.exit_int_info;
3795 3796 3797
	unsigned int3_injected = svm->int3_injected;

	svm->int3_injected = 0;
3798

3799 3800 3801 3802 3803 3804
	/*
	 * If we've made progress since setting HF_IRET_MASK, we've
	 * executed an IRET and can allow NMI injection.
	 */
	if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
	    && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
3805
		svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
3806 3807
		kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
	}
3808

3809 3810 3811 3812 3813 3814 3815
	svm->vcpu.arch.nmi_injected = false;
	kvm_clear_exception_queue(&svm->vcpu);
	kvm_clear_interrupt_queue(&svm->vcpu);

	if (!(exitintinfo & SVM_EXITINTINFO_VALID))
		return;

3816 3817
	kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);

3818 3819 3820 3821 3822 3823 3824 3825
	vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
	type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;

	switch (type) {
	case SVM_EXITINTINFO_TYPE_NMI:
		svm->vcpu.arch.nmi_injected = true;
		break;
	case SVM_EXITINTINFO_TYPE_EXEPT:
3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836
		/*
		 * In case of software exceptions, do not reinject the vector,
		 * but re-execute the instruction instead. Rewind RIP first
		 * if we emulated INT3 before.
		 */
		if (kvm_exception_is_soft(vector)) {
			if (vector == BP_VECTOR && int3_injected &&
			    kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
				kvm_rip_write(&svm->vcpu,
					      kvm_rip_read(&svm->vcpu) -
					      int3_injected);
3837
			break;
3838
		}
3839 3840
		if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
			u32 err = svm->vmcb->control.exit_int_info_err;
3841
			kvm_requeue_exception_e(&svm->vcpu, vector, err);
3842 3843

		} else
3844
			kvm_requeue_exception(&svm->vcpu, vector);
3845 3846
		break;
	case SVM_EXITINTINFO_TYPE_INTR:
3847
		kvm_queue_interrupt(&svm->vcpu, vector, false);
3848 3849 3850 3851 3852 3853
		break;
	default:
		break;
	}
}

A
Avi Kivity 已提交
3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864
static void svm_cancel_injection(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	struct vmcb_control_area *control = &svm->vmcb->control;

	control->exit_int_info = control->event_inj;
	control->exit_int_info_err = control->event_inj_err;
	control->event_inj = 0;
	svm_complete_interrupts(svm);
}

A
Avi Kivity 已提交
3865
static void svm_vcpu_run(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
3866
{
3867
	struct vcpu_svm *svm = to_svm(vcpu);
3868

3869 3870 3871 3872
	svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
	svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
	svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];

3873 3874 3875 3876 3877 3878 3879
	/*
	 * A vmexit emulation is required before the vcpu can be executed
	 * again.
	 */
	if (unlikely(svm->nested.exit_required))
		return;

R
Rusty Russell 已提交
3880
	pre_svm_run(svm);
A
Avi Kivity 已提交
3881

3882 3883
	sync_lapic_to_cr8(vcpu);

3884
	svm->vmcb->save.cr2 = vcpu->arch.cr2;
A
Avi Kivity 已提交
3885

3886 3887 3888
	clgi();

	local_irq_enable();
3889

A
Avi Kivity 已提交
3890
	asm volatile (
A
Avi Kivity 已提交
3891 3892 3893 3894 3895 3896 3897
		"push %%" _ASM_BP "; \n\t"
		"mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
		"mov %c[rcx](%[svm]), %%" _ASM_CX " \n\t"
		"mov %c[rdx](%[svm]), %%" _ASM_DX " \n\t"
		"mov %c[rsi](%[svm]), %%" _ASM_SI " \n\t"
		"mov %c[rdi](%[svm]), %%" _ASM_DI " \n\t"
		"mov %c[rbp](%[svm]), %%" _ASM_BP " \n\t"
3898
#ifdef CONFIG_X86_64
R
Rusty Russell 已提交
3899 3900 3901 3902 3903 3904 3905 3906
		"mov %c[r8](%[svm]),  %%r8  \n\t"
		"mov %c[r9](%[svm]),  %%r9  \n\t"
		"mov %c[r10](%[svm]), %%r10 \n\t"
		"mov %c[r11](%[svm]), %%r11 \n\t"
		"mov %c[r12](%[svm]), %%r12 \n\t"
		"mov %c[r13](%[svm]), %%r13 \n\t"
		"mov %c[r14](%[svm]), %%r14 \n\t"
		"mov %c[r15](%[svm]), %%r15 \n\t"
A
Avi Kivity 已提交
3907 3908 3909
#endif

		/* Enter guest mode */
A
Avi Kivity 已提交
3910 3911
		"push %%" _ASM_AX " \n\t"
		"mov %c[vmcb](%[svm]), %%" _ASM_AX " \n\t"
3912 3913 3914
		__ex(SVM_VMLOAD) "\n\t"
		__ex(SVM_VMRUN) "\n\t"
		__ex(SVM_VMSAVE) "\n\t"
A
Avi Kivity 已提交
3915
		"pop %%" _ASM_AX " \n\t"
A
Avi Kivity 已提交
3916 3917

		/* Save guest registers, load host registers */
A
Avi Kivity 已提交
3918 3919 3920 3921 3922 3923
		"mov %%" _ASM_BX ", %c[rbx](%[svm]) \n\t"
		"mov %%" _ASM_CX ", %c[rcx](%[svm]) \n\t"
		"mov %%" _ASM_DX ", %c[rdx](%[svm]) \n\t"
		"mov %%" _ASM_SI ", %c[rsi](%[svm]) \n\t"
		"mov %%" _ASM_DI ", %c[rdi](%[svm]) \n\t"
		"mov %%" _ASM_BP ", %c[rbp](%[svm]) \n\t"
3924
#ifdef CONFIG_X86_64
R
Rusty Russell 已提交
3925 3926 3927 3928 3929 3930 3931 3932
		"mov %%r8,  %c[r8](%[svm]) \n\t"
		"mov %%r9,  %c[r9](%[svm]) \n\t"
		"mov %%r10, %c[r10](%[svm]) \n\t"
		"mov %%r11, %c[r11](%[svm]) \n\t"
		"mov %%r12, %c[r12](%[svm]) \n\t"
		"mov %%r13, %c[r13](%[svm]) \n\t"
		"mov %%r14, %c[r14](%[svm]) \n\t"
		"mov %%r15, %c[r15](%[svm]) \n\t"
A
Avi Kivity 已提交
3933
#endif
A
Avi Kivity 已提交
3934
		"pop %%" _ASM_BP
A
Avi Kivity 已提交
3935
		:
R
Rusty Russell 已提交
3936
		: [svm]"a"(svm),
A
Avi Kivity 已提交
3937
		  [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
3938 3939 3940 3941 3942 3943
		  [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
		  [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
		  [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
		  [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
		  [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
		  [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
3944
#ifdef CONFIG_X86_64
3945 3946 3947 3948 3949 3950 3951 3952
		  , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
		  [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
		  [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
		  [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
		  [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
		  [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
		  [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
		  [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
A
Avi Kivity 已提交
3953
#endif
3954 3955
		: "cc", "memory"
#ifdef CONFIG_X86_64
A
Avi Kivity 已提交
3956
		, "rbx", "rcx", "rdx", "rsi", "rdi"
3957
		, "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
A
Avi Kivity 已提交
3958 3959
#else
		, "ebx", "ecx", "edx", "esi", "edi"
3960 3961
#endif
		);
A
Avi Kivity 已提交
3962

3963 3964 3965
#ifdef CONFIG_X86_64
	wrmsrl(MSR_GS_BASE, svm->host.gs_base);
#else
3966
	loadsegment(fs, svm->host.fs);
3967 3968 3969
#ifndef CONFIG_X86_32_LAZY_GS
	loadsegment(gs, svm->host.gs);
#endif
3970
#endif
A
Avi Kivity 已提交
3971 3972 3973

	reload_tss(vcpu);

3974 3975
	local_irq_disable();

3976 3977 3978 3979 3980
	vcpu->arch.cr2 = svm->vmcb->save.cr2;
	vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
	vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
	vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;

3981 3982
	trace_kvm_exit(svm->vmcb->control.exit_code, vcpu, KVM_ISA_SVM);

3983 3984 3985 3986 3987 3988 3989 3990 3991 3992
	if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
		kvm_before_handle_nmi(&svm->vcpu);

	stgi();

	/* Any pending NMI will happen here */

	if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
		kvm_after_handle_nmi(&svm->vcpu);

3993 3994
	sync_cr8_to_lapic(vcpu);

3995
	svm->next_rip = 0;
3996

3997 3998
	svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;

G
Gleb Natapov 已提交
3999 4000 4001 4002
	/* if exit due to PF check for async PF */
	if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
		svm->apf_reason = kvm_read_and_reset_pf_reason();

A
Avi Kivity 已提交
4003 4004 4005 4006
	if (npt_enabled) {
		vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
		vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
	}
4007 4008 4009 4010 4011 4012 4013 4014

	/*
	 * We need to handle MC intercepts here before the vcpu has a chance to
	 * change the physical cpu
	 */
	if (unlikely(svm->vmcb->control.exit_code ==
		     SVM_EXIT_EXCP_BASE + MC_VECTOR))
		svm_handle_mce(svm);
4015 4016

	mark_all_clean(svm->vmcb);
A
Avi Kivity 已提交
4017 4018 4019 4020
}

static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
{
4021 4022 4023
	struct vcpu_svm *svm = to_svm(vcpu);

	svm->vmcb->save.cr3 = root;
4024
	mark_dirty(svm->vmcb, VMCB_CR);
4025
	svm_flush_tlb(vcpu);
A
Avi Kivity 已提交
4026 4027
}

4028 4029 4030 4031 4032
static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	svm->vmcb->control.nested_cr3 = root;
4033
	mark_dirty(svm->vmcb, VMCB_NPT);
4034 4035

	/* Also sync guest cr3 here in case we live migrate */
4036
	svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
4037
	mark_dirty(svm->vmcb, VMCB_CR);
4038

4039
	svm_flush_tlb(vcpu);
4040 4041
}

A
Avi Kivity 已提交
4042 4043
static int is_disabled(void)
{
4044 4045 4046 4047 4048 4049
	u64 vm_cr;

	rdmsrl(MSR_VM_CR, vm_cr);
	if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
		return 1;

A
Avi Kivity 已提交
4050 4051 4052
	return 0;
}

I
Ingo Molnar 已提交
4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063
static void
svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
{
	/*
	 * Patch in the VMMCALL instruction:
	 */
	hypercall[0] = 0x0f;
	hypercall[1] = 0x01;
	hypercall[2] = 0xd9;
}

Y
Yang, Sheng 已提交
4064 4065 4066 4067 4068
static void svm_check_processor_compat(void *rtn)
{
	*(int *)rtn = 0;
}

4069 4070 4071 4072 4073
static bool svm_cpu_has_accelerated_tpr(void)
{
	return false;
}

4074
static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
S
Sheng Yang 已提交
4075 4076 4077 4078
{
	return 0;
}

4079 4080 4081 4082
static void svm_cpuid_update(struct kvm_vcpu *vcpu)
{
}

4083 4084
static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
{
4085
	switch (func) {
4086 4087 4088 4089
	case 0x80000001:
		if (nested)
			entry->ecx |= (1 << 2); /* Set SVM bit */
		break;
4090 4091 4092 4093 4094
	case 0x8000000A:
		entry->eax = 1; /* SVM revision 1 */
		entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
				   ASID emulation to nested SVM */
		entry->ecx = 0; /* Reserved */
4095 4096 4097 4098
		entry->edx = 0; /* Per default do not support any
				   additional features */

		/* Support next_rip if host supports it */
4099
		if (boot_cpu_has(X86_FEATURE_NRIPS))
4100
			entry->edx |= SVM_FEATURE_NRIP;
4101

4102 4103 4104 4105
		/* Support NPT for the guest if enabled */
		if (npt_enabled)
			entry->edx |= SVM_FEATURE_NPT;

4106 4107
		break;
	}
4108 4109
}

4110
static int svm_get_lpage_level(void)
4111
{
4112
	return PT_PDPE_LEVEL;
4113 4114
}

4115 4116 4117 4118 4119
static bool svm_rdtscp_supported(void)
{
	return false;
}

4120 4121 4122 4123 4124
static bool svm_invpcid_supported(void)
{
	return false;
}

4125 4126 4127 4128 4129
static bool svm_mpx_supported(void)
{
	return false;
}

4130 4131 4132 4133 4134
static bool svm_has_wbinvd_exit(void)
{
	return true;
}

4135 4136 4137 4138
static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);

4139
	set_exception_intercept(svm, NM_VECTOR);
4140
	update_cr0_intercept(svm);
4141 4142
}

4143
#define PRE_EX(exit)  { .exit_code = (exit), \
4144
			.stage = X86_ICPT_PRE_EXCEPT, }
4145
#define POST_EX(exit) { .exit_code = (exit), \
4146
			.stage = X86_ICPT_POST_EXCEPT, }
4147
#define POST_MEM(exit) { .exit_code = (exit), \
4148
			.stage = X86_ICPT_POST_MEMACCESS, }
4149

4150
static const struct __x86_intercept {
4151 4152 4153 4154 4155 4156 4157 4158
	u32 exit_code;
	enum x86_intercept_stage stage;
} x86_intercept_map[] = {
	[x86_intercept_cr_read]		= POST_EX(SVM_EXIT_READ_CR0),
	[x86_intercept_cr_write]	= POST_EX(SVM_EXIT_WRITE_CR0),
	[x86_intercept_clts]		= POST_EX(SVM_EXIT_WRITE_CR0),
	[x86_intercept_lmsw]		= POST_EX(SVM_EXIT_WRITE_CR0),
	[x86_intercept_smsw]		= POST_EX(SVM_EXIT_READ_CR0),
4159 4160
	[x86_intercept_dr_read]		= POST_EX(SVM_EXIT_READ_DR0),
	[x86_intercept_dr_write]	= POST_EX(SVM_EXIT_WRITE_DR0),
4161 4162 4163 4164 4165 4166 4167 4168
	[x86_intercept_sldt]		= POST_EX(SVM_EXIT_LDTR_READ),
	[x86_intercept_str]		= POST_EX(SVM_EXIT_TR_READ),
	[x86_intercept_lldt]		= POST_EX(SVM_EXIT_LDTR_WRITE),
	[x86_intercept_ltr]		= POST_EX(SVM_EXIT_TR_WRITE),
	[x86_intercept_sgdt]		= POST_EX(SVM_EXIT_GDTR_READ),
	[x86_intercept_sidt]		= POST_EX(SVM_EXIT_IDTR_READ),
	[x86_intercept_lgdt]		= POST_EX(SVM_EXIT_GDTR_WRITE),
	[x86_intercept_lidt]		= POST_EX(SVM_EXIT_IDTR_WRITE),
4169 4170 4171 4172 4173 4174 4175 4176
	[x86_intercept_vmrun]		= POST_EX(SVM_EXIT_VMRUN),
	[x86_intercept_vmmcall]		= POST_EX(SVM_EXIT_VMMCALL),
	[x86_intercept_vmload]		= POST_EX(SVM_EXIT_VMLOAD),
	[x86_intercept_vmsave]		= POST_EX(SVM_EXIT_VMSAVE),
	[x86_intercept_stgi]		= POST_EX(SVM_EXIT_STGI),
	[x86_intercept_clgi]		= POST_EX(SVM_EXIT_CLGI),
	[x86_intercept_skinit]		= POST_EX(SVM_EXIT_SKINIT),
	[x86_intercept_invlpga]		= POST_EX(SVM_EXIT_INVLPGA),
4177 4178 4179
	[x86_intercept_rdtscp]		= POST_EX(SVM_EXIT_RDTSCP),
	[x86_intercept_monitor]		= POST_MEM(SVM_EXIT_MONITOR),
	[x86_intercept_mwait]		= POST_EX(SVM_EXIT_MWAIT),
4180 4181 4182 4183 4184 4185 4186 4187 4188
	[x86_intercept_invlpg]		= POST_EX(SVM_EXIT_INVLPG),
	[x86_intercept_invd]		= POST_EX(SVM_EXIT_INVD),
	[x86_intercept_wbinvd]		= POST_EX(SVM_EXIT_WBINVD),
	[x86_intercept_wrmsr]		= POST_EX(SVM_EXIT_MSR),
	[x86_intercept_rdtsc]		= POST_EX(SVM_EXIT_RDTSC),
	[x86_intercept_rdmsr]		= POST_EX(SVM_EXIT_MSR),
	[x86_intercept_rdpmc]		= POST_EX(SVM_EXIT_RDPMC),
	[x86_intercept_cpuid]		= PRE_EX(SVM_EXIT_CPUID),
	[x86_intercept_rsm]		= PRE_EX(SVM_EXIT_RSM),
4189 4190 4191 4192 4193 4194 4195
	[x86_intercept_pause]		= PRE_EX(SVM_EXIT_PAUSE),
	[x86_intercept_pushf]		= PRE_EX(SVM_EXIT_PUSHF),
	[x86_intercept_popf]		= PRE_EX(SVM_EXIT_POPF),
	[x86_intercept_intn]		= PRE_EX(SVM_EXIT_SWINT),
	[x86_intercept_iret]		= PRE_EX(SVM_EXIT_IRET),
	[x86_intercept_icebp]		= PRE_EX(SVM_EXIT_ICEBP),
	[x86_intercept_hlt]		= POST_EX(SVM_EXIT_HLT),
4196 4197 4198 4199
	[x86_intercept_in]		= POST_EX(SVM_EXIT_IOIO),
	[x86_intercept_ins]		= POST_EX(SVM_EXIT_IOIO),
	[x86_intercept_out]		= POST_EX(SVM_EXIT_IOIO),
	[x86_intercept_outs]		= POST_EX(SVM_EXIT_IOIO),
4200 4201
};

4202
#undef PRE_EX
4203
#undef POST_EX
4204
#undef POST_MEM
4205

4206 4207 4208 4209
static int svm_check_intercept(struct kvm_vcpu *vcpu,
			       struct x86_instruction_info *info,
			       enum x86_intercept_stage stage)
{
4210 4211 4212 4213 4214 4215 4216 4217 4218 4219
	struct vcpu_svm *svm = to_svm(vcpu);
	int vmexit, ret = X86EMUL_CONTINUE;
	struct __x86_intercept icpt_info;
	struct vmcb *vmcb = svm->vmcb;

	if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
		goto out;

	icpt_info = x86_intercept_map[info->intercept];

4220
	if (stage != icpt_info.stage)
4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234
		goto out;

	switch (icpt_info.exit_code) {
	case SVM_EXIT_READ_CR0:
		if (info->intercept == x86_intercept_cr_read)
			icpt_info.exit_code += info->modrm_reg;
		break;
	case SVM_EXIT_WRITE_CR0: {
		unsigned long cr0, val;
		u64 intercept;

		if (info->intercept == x86_intercept_cr_write)
			icpt_info.exit_code += info->modrm_reg;

4235 4236
		if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
		    info->intercept == x86_intercept_clts)
4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259
			break;

		intercept = svm->nested.intercept;

		if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
			break;

		cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
		val = info->src_val  & ~SVM_CR0_SELECTIVE_MASK;

		if (info->intercept == x86_intercept_lmsw) {
			cr0 &= 0xfUL;
			val &= 0xfUL;
			/* lmsw can't clear PE - catch this here */
			if (cr0 & X86_CR0_PE)
				val |= X86_CR0_PE;
		}

		if (cr0 ^ val)
			icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;

		break;
	}
4260 4261 4262 4263
	case SVM_EXIT_READ_DR0:
	case SVM_EXIT_WRITE_DR0:
		icpt_info.exit_code += info->modrm_reg;
		break;
4264 4265 4266 4267 4268 4269
	case SVM_EXIT_MSR:
		if (info->intercept == x86_intercept_wrmsr)
			vmcb->control.exit_info_1 = 1;
		else
			vmcb->control.exit_info_1 = 0;
		break;
4270 4271 4272 4273 4274 4275 4276
	case SVM_EXIT_PAUSE:
		/*
		 * We get this for NOP only, but pause
		 * is rep not, check this here
		 */
		if (info->rep_prefix != REPE_PREFIX)
			goto out;
4277 4278 4279 4280 4281 4282
	case SVM_EXIT_IOIO: {
		u64 exit_info;
		u32 bytes;

		if (info->intercept == x86_intercept_in ||
		    info->intercept == x86_intercept_ins) {
4283 4284
			exit_info = ((info->src_val & 0xffff) << 16) |
				SVM_IOIO_TYPE_MASK;
4285
			bytes = info->dst_bytes;
4286
		} else {
4287
			exit_info = (info->dst_val & 0xffff) << 16;
4288
			bytes = info->src_bytes;
4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308
		}

		if (info->intercept == x86_intercept_outs ||
		    info->intercept == x86_intercept_ins)
			exit_info |= SVM_IOIO_STR_MASK;

		if (info->rep_prefix)
			exit_info |= SVM_IOIO_REP_MASK;

		bytes = min(bytes, 4u);

		exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;

		exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);

		vmcb->control.exit_info_1 = exit_info;
		vmcb->control.exit_info_2 = info->next_rip;

		break;
	}
4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321
	default:
		break;
	}

	vmcb->control.next_rip  = info->next_rip;
	vmcb->control.exit_code = icpt_info.exit_code;
	vmexit = nested_svm_exit_handled(svm);

	ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
					   : X86EMUL_CONTINUE;

out:
	return ret;
4322 4323
}

4324 4325 4326 4327 4328
static void svm_handle_external_intr(struct kvm_vcpu *vcpu)
{
	local_irq_enable();
}

4329 4330 4331 4332
static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
{
}

4333
static struct kvm_x86_ops svm_x86_ops = {
A
Avi Kivity 已提交
4334 4335 4336 4337
	.cpu_has_kvm_support = has_svm,
	.disabled_by_bios = is_disabled,
	.hardware_setup = svm_hardware_setup,
	.hardware_unsetup = svm_hardware_unsetup,
Y
Yang, Sheng 已提交
4338
	.check_processor_compatibility = svm_check_processor_compat,
A
Avi Kivity 已提交
4339 4340
	.hardware_enable = svm_hardware_enable,
	.hardware_disable = svm_hardware_disable,
4341
	.cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
A
Avi Kivity 已提交
4342 4343 4344

	.vcpu_create = svm_create_vcpu,
	.vcpu_free = svm_free_vcpu,
4345
	.vcpu_reset = svm_vcpu_reset,
A
Avi Kivity 已提交
4346

4347
	.prepare_guest_switch = svm_prepare_guest_switch,
A
Avi Kivity 已提交
4348 4349 4350
	.vcpu_load = svm_vcpu_load,
	.vcpu_put = svm_vcpu_put,

4351
	.update_db_bp_intercept = update_db_bp_intercept,
A
Avi Kivity 已提交
4352 4353 4354 4355 4356
	.get_msr = svm_get_msr,
	.set_msr = svm_set_msr,
	.get_segment_base = svm_get_segment_base,
	.get_segment = svm_get_segment,
	.set_segment = svm_set_segment,
4357
	.get_cpl = svm_get_cpl,
4358
	.get_cs_db_l_bits = kvm_get_cs_db_l_bits,
4359
	.decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
4360
	.decache_cr3 = svm_decache_cr3,
4361
	.decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
A
Avi Kivity 已提交
4362 4363 4364 4365 4366 4367 4368 4369
	.set_cr0 = svm_set_cr0,
	.set_cr3 = svm_set_cr3,
	.set_cr4 = svm_set_cr4,
	.set_efer = svm_set_efer,
	.get_idt = svm_get_idt,
	.set_idt = svm_set_idt,
	.get_gdt = svm_get_gdt,
	.set_gdt = svm_set_gdt,
J
Jan Kiszka 已提交
4370 4371
	.get_dr6 = svm_get_dr6,
	.set_dr6 = svm_set_dr6,
4372
	.set_dr7 = svm_set_dr7,
4373
	.sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
A
Avi Kivity 已提交
4374
	.cache_reg = svm_cache_reg,
A
Avi Kivity 已提交
4375 4376
	.get_rflags = svm_get_rflags,
	.set_rflags = svm_set_rflags,
4377
	.fpu_deactivate = svm_fpu_deactivate,
A
Avi Kivity 已提交
4378 4379 4380 4381

	.tlb_flush = svm_flush_tlb,

	.run = svm_vcpu_run,
4382
	.handle_exit = handle_exit,
A
Avi Kivity 已提交
4383
	.skip_emulated_instruction = skip_emulated_instruction,
4384 4385
	.set_interrupt_shadow = svm_set_interrupt_shadow,
	.get_interrupt_shadow = svm_get_interrupt_shadow,
I
Ingo Molnar 已提交
4386
	.patch_hypercall = svm_patch_hypercall,
E
Eddie Dong 已提交
4387
	.set_irq = svm_set_irq,
4388
	.set_nmi = svm_inject_nmi,
4389
	.queue_exception = svm_queue_exception,
A
Avi Kivity 已提交
4390
	.cancel_injection = svm_cancel_injection,
4391
	.interrupt_allowed = svm_interrupt_allowed,
4392
	.nmi_allowed = svm_nmi_allowed,
J
Jan Kiszka 已提交
4393 4394
	.get_nmi_mask = svm_get_nmi_mask,
	.set_nmi_mask = svm_set_nmi_mask,
4395 4396 4397
	.enable_nmi_window = enable_nmi_window,
	.enable_irq_window = enable_irq_window,
	.update_cr8_intercept = update_cr8_intercept,
4398
	.set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
4399 4400 4401
	.vm_has_apicv = svm_vm_has_apicv,
	.load_eoi_exitmap = svm_load_eoi_exitmap,
	.hwapic_isr_update = svm_hwapic_isr_update,
4402
	.sync_pir_to_irr = svm_sync_pir_to_irr,
4403 4404

	.set_tss_addr = svm_set_tss_addr,
4405
	.get_tdp_level = get_npt_level,
4406
	.get_mt_mask = svm_get_mt_mask,
4407

4408 4409
	.get_exit_info = svm_get_exit_info,

4410
	.get_lpage_level = svm_get_lpage_level,
4411 4412

	.cpuid_update = svm_cpuid_update,
4413 4414

	.rdtscp_supported = svm_rdtscp_supported,
4415
	.invpcid_supported = svm_invpcid_supported,
4416
	.mpx_supported = svm_mpx_supported,
4417 4418

	.set_supported_cpuid = svm_set_supported_cpuid,
4419 4420

	.has_wbinvd_exit = svm_has_wbinvd_exit,
4421

4422
	.set_tsc_khz = svm_set_tsc_khz,
W
Will Auld 已提交
4423
	.read_tsc_offset = svm_read_tsc_offset,
4424
	.write_tsc_offset = svm_write_tsc_offset,
Z
Zachary Amsden 已提交
4425
	.adjust_tsc_offset = svm_adjust_tsc_offset,
4426
	.compute_tsc_offset = svm_compute_tsc_offset,
N
Nadav Har'El 已提交
4427
	.read_l1_tsc = svm_read_l1_tsc,
4428 4429

	.set_tdp_cr3 = set_tdp_cr3,
4430 4431

	.check_intercept = svm_check_intercept,
4432
	.handle_external_intr = svm_handle_external_intr,
4433 4434

	.sched_in = svm_sched_in,
A
Avi Kivity 已提交
4435 4436 4437 4438
};

static int __init svm_init(void)
{
4439
	return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
4440
			__alignof__(struct vcpu_svm), THIS_MODULE);
A
Avi Kivity 已提交
4441 4442 4443 4444
}

static void __exit svm_exit(void)
{
4445
	kvm_exit();
A
Avi Kivity 已提交
4446 4447 4448 4449
}

module_init(svm_init)
module_exit(svm_exit)