nested.c 39.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * AMD SVM support
 *
 * Copyright (C) 2006 Qumranet, Inc.
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
 *
 * Authors:
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *   Avi Kivity   <avi@qumranet.com>
 */

#define pr_fmt(fmt) "SVM: " fmt

#include <linux/kvm_types.h>
#include <linux/kvm_host.h>
#include <linux/kernel.h>

#include <asm/msr-index.h>
22
#include <asm/debugreg.h>
23 24 25 26 27

#include "kvm_emulate.h"
#include "trace.h"
#include "mmu.h"
#include "x86.h"
28
#include "cpuid.h"
29
#include "lapic.h"
30 31
#include "svm.h"

32 33
#define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK

34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
				       struct x86_exception *fault)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
		/*
		 * TODO: track the cause of the nested page fault, and
		 * correctly fill in the high bits of exit_info_1.
		 */
		svm->vmcb->control.exit_code = SVM_EXIT_NPF;
		svm->vmcb->control.exit_code_hi = 0;
		svm->vmcb->control.exit_info_1 = (1ULL << 32);
		svm->vmcb->control.exit_info_2 = fault->address;
	}

	svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
	svm->vmcb->control.exit_info_1 |= fault->error_code;

	nested_svm_vmexit(svm);
}

56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
static void svm_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_exception *fault)
{
       struct vcpu_svm *svm = to_svm(vcpu);
       WARN_ON(!is_guest_mode(vcpu));

       if (vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) &&
	   !svm->nested.nested_run_pending) {
               svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR;
               svm->vmcb->control.exit_code_hi = 0;
               svm->vmcb->control.exit_info_1 = fault->error_code;
               svm->vmcb->control.exit_info_2 = fault->address;
               nested_svm_vmexit(svm);
       } else {
               kvm_inject_page_fault(vcpu, fault);
       }
}

73 74 75
static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
{
	struct vcpu_svm *svm = to_svm(vcpu);
76
	u64 cr3 = svm->nested.ctl.nested_cr3;
77 78 79
	u64 pdpte;
	int ret;

80
	ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
81 82 83 84 85 86 87 88 89 90
				       offset_in_page(cr3) + index * 8, 8);
	if (ret)
		return 0;
	return pdpte;
}

static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);

91
	return svm->nested.ctl.nested_cr3;
92 93 94 95
}

static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
{
96 97
	struct vcpu_svm *svm = to_svm(vcpu);

98 99 100
	WARN_ON(mmu_is_nested(vcpu));

	vcpu->arch.mmu = &vcpu->arch.guest_mmu;
101 102
	kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4,
				svm->vmcb01.ptr->save.efer,
103
				svm->nested.ctl.nested_cr3);
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
	vcpu->arch.mmu->get_guest_pgd     = nested_svm_get_tdp_cr3;
	vcpu->arch.mmu->get_pdptr         = nested_svm_get_tdp_pdptr;
	vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
	reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
	vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
}

static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
{
	vcpu->arch.mmu = &vcpu->arch.root_mmu;
	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
}

void recalc_intercepts(struct vcpu_svm *svm)
{
119
	struct vmcb_control_area *c, *h, *g;
120
	unsigned int i;
121

122
	vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
123 124 125 126 127

	if (!is_guest_mode(&svm->vcpu))
		return;

	c = &svm->vmcb->control;
128
	h = &svm->vmcb01.ptr->control;
129
	g = &svm->nested.ctl;
130

131 132 133
	for (i = 0; i < MAX_INTERCEPT; i++)
		c->intercepts[i] = h->intercepts[i];

P
Paolo Bonzini 已提交
134
	if (g->int_ctl & V_INTR_MASKING_MASK) {
135
		/* We only want the cr8 intercept bits of L1 */
136 137
		vmcb_clr_intercept(c, INTERCEPT_CR8_READ);
		vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
138 139 140 141 142 143

		/*
		 * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
		 * affect any interrupt we may want to inject; therefore,
		 * interrupt window vmexits are irrelevant to L0.
		 */
144
		vmcb_clr_intercept(c, INTERCEPT_VINTR);
145 146 147
	}

	/* We don't want to see VMMCALLs from a nested guest */
148
	vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
149

150 151
	for (i = 0; i < MAX_INTERCEPT; i++)
		c->intercepts[i] |= g->intercepts[i];
152 153
}

154 155
static void copy_vmcb_control_area(struct vmcb_control_area *dst,
				   struct vmcb_control_area *from)
156
{
157 158 159 160 161
	unsigned int i;

	for (i = 0; i < MAX_INTERCEPT; i++)
		dst->intercepts[i] = from->intercepts[i];

162 163 164
	dst->iopm_base_pa         = from->iopm_base_pa;
	dst->msrpm_base_pa        = from->msrpm_base_pa;
	dst->tsc_offset           = from->tsc_offset;
165
	/* asid not copied, it is handled manually for svm->vmcb.  */
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
	dst->tlb_ctl              = from->tlb_ctl;
	dst->int_ctl              = from->int_ctl;
	dst->int_vector           = from->int_vector;
	dst->int_state            = from->int_state;
	dst->exit_code            = from->exit_code;
	dst->exit_code_hi         = from->exit_code_hi;
	dst->exit_info_1          = from->exit_info_1;
	dst->exit_info_2          = from->exit_info_2;
	dst->exit_int_info        = from->exit_int_info;
	dst->exit_int_info_err    = from->exit_int_info_err;
	dst->nested_ctl           = from->nested_ctl;
	dst->event_inj            = from->event_inj;
	dst->event_inj_err        = from->event_inj_err;
	dst->nested_cr3           = from->nested_cr3;
	dst->virt_ext              = from->virt_ext;
	dst->pause_filter_count   = from->pause_filter_count;
	dst->pause_filter_thresh  = from->pause_filter_thresh;
}

static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
{
	/*
	 * This function merges the msr permission bitmaps of kvm and the
	 * nested vmcb. It is optimized in that it only merges the parts where
	 * the kvm msr permission bitmap may contain zero bits
	 */
	int i;

194
	if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
195 196 197 198 199 200 201 202 203 204
		return true;

	for (i = 0; i < MSRPM_OFFSETS; i++) {
		u32 value, p;
		u64 offset;

		if (msrpm_offsets[i] == 0xffffffff)
			break;

		p      = msrpm_offsets[i];
205
		offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
206 207 208 209 210 211 212 213 214 215 216 217

		if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
			return false;

		svm->nested.msrpm[p] = svm->msrpm[p] | value;
	}

	svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));

	return true;
}

218 219 220 221 222 223 224 225 226 227 228 229 230
/*
 * Bits 11:0 of bitmap address are ignored by hardware
 */
static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size)
{
	u64 addr = PAGE_ALIGN(pa);

	return kvm_vcpu_is_legal_gpa(vcpu, addr) &&
	    kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1);
}

static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
				       struct vmcb_control_area *control)
231
{
232
	if (CC(!vmcb_is_intercept(control, INTERCEPT_VMRUN)))
233 234
		return false;

235
	if (CC(control->asid == 0))
236 237
		return false;

238
	if (CC((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && !npt_enabled))
239 240
		return false;

241 242 243 244 245 246 247
	if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa,
					   MSRPM_SIZE)))
		return false;
	if (CC(!nested_svm_check_bitmap_pa(vcpu, control->iopm_base_pa,
					   IOPM_SIZE)))
		return false;

248 249 250
	return true;
}

251
static bool nested_vmcb_check_cr3_cr4(struct kvm_vcpu *vcpu,
252
				      struct vmcb_save_area *save)
253
{
254 255 256 257 258 259
	/*
	 * These checks are also performed by KVM_SET_SREGS,
	 * except that EFER.LMA is not checked by SVM against
	 * CR0.PG && EFER.LME.
	 */
	if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) {
260 261 262
		if (CC(!(save->cr4 & X86_CR4_PAE)) ||
		    CC(!(save->cr0 & X86_CR0_PE)) ||
		    CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3)))
263 264 265
			return false;
	}

266 267 268 269
	if (CC(!kvm_is_valid_cr4(vcpu, save->cr4)))
		return false;

	return true;
270 271 272
}

/* Common checks that apply to both L1 and L2 state.  */
273
static bool nested_vmcb_valid_sregs(struct kvm_vcpu *vcpu,
274 275
				    struct vmcb_save_area *save)
{
276 277 278 279 280 281 282
	/*
	 * FIXME: these should be done after copying the fields,
	 * to avoid TOC/TOU races.  For these save area checks
	 * the possible damage is limited since kvm_set_cr0 and
	 * kvm_set_cr4 handle failure; EFER_SVME is an exception
	 * so it is force-set later in nested_prepare_vmcb_save.
	 */
283
	if (CC(!(save->efer & EFER_SVME)))
284 285
		return false;

286 287
	if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) ||
	    CC(save->cr0 & ~0xffffffffULL))
288 289
		return false;

290
	if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7)))
291 292
		return false;

293
	if (!nested_vmcb_check_cr3_cr4(vcpu, save))
294
		return false;
295

296
	if (CC(!kvm_valid_efer(vcpu, save->efer)))
297 298 299 300 301
		return false;

	return true;
}

302 303
static void nested_load_control_from_vmcb12(struct vcpu_svm *svm,
					    struct vmcb_control_area *control)
304
{
305
	copy_vmcb_control_area(&svm->nested.ctl, control);
306

307 308
	/* Copy it here because nested_svm_check_controls will check it.  */
	svm->nested.ctl.asid           = control->asid;
309 310
	svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL;
	svm->nested.ctl.iopm_base_pa  &= ~0x0fffULL;
311 312
}

313 314
/*
 * Synchronize fields that are written by the processor, so that
315
 * they can be copied back into the vmcb12.
316
 */
317
void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
318 319 320 321 322 323 324 325
{
	u32 mask;
	svm->nested.ctl.event_inj      = svm->vmcb->control.event_inj;
	svm->nested.ctl.event_inj_err  = svm->vmcb->control.event_inj_err;

	/* Only a few fields of int_ctl are written by the processor.  */
	mask = V_IRQ_MASK | V_TPR_MASK;
	if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) &&
326
	    svm_is_intercept(svm, INTERCEPT_VINTR)) {
327 328 329 330 331 332 333 334 335 336 337 338 339 340
		/*
		 * In order to request an interrupt window, L0 is usurping
		 * svm->vmcb->control.int_ctl and possibly setting V_IRQ
		 * even if it was clear in L1's VMCB.  Restoring it would be
		 * wrong.  However, in this case V_IRQ will remain true until
		 * interrupt_window_interception calls svm_clear_vintr and
		 * restores int_ctl.  We can just leave it aside.
		 */
		mask &= ~V_IRQ_MASK;
	}
	svm->nested.ctl.int_ctl        &= ~mask;
	svm->nested.ctl.int_ctl        |= svm->vmcb->control.int_ctl & mask;
}

341 342 343 344
/*
 * Transfer any event that L0 or L1 wanted to inject into L2 to
 * EXIT_INT_INFO.
 */
345 346
static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm,
						struct vmcb *vmcb12)
347 348 349 350 351 352 353 354 355 356 357
{
	struct kvm_vcpu *vcpu = &svm->vcpu;
	u32 exit_int_info = 0;
	unsigned int nr;

	if (vcpu->arch.exception.injected) {
		nr = vcpu->arch.exception.nr;
		exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;

		if (vcpu->arch.exception.has_error_code) {
			exit_int_info |= SVM_EVTINJ_VALID_ERR;
358
			vmcb12->control.exit_int_info_err =
359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
				vcpu->arch.exception.error_code;
		}

	} else if (vcpu->arch.nmi_injected) {
		exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;

	} else if (vcpu->arch.interrupt.injected) {
		nr = vcpu->arch.interrupt.nr;
		exit_int_info = nr | SVM_EVTINJ_VALID;

		if (vcpu->arch.interrupt.soft)
			exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
		else
			exit_int_info |= SVM_EVTINJ_TYPE_INTR;
	}

375
	vmcb12->control.exit_int_info = exit_int_info;
376 377
}

378 379 380 381 382
static inline bool nested_npt_enabled(struct vcpu_svm *svm)
{
	return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
}

383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
{
	/*
	 * TODO: optimize unconditional TLB flush/MMU sync.  A partial list of
	 * things to fix before this can be conditional:
	 *
	 *  - Flush TLBs for both L1 and L2 remote TLB flush
	 *  - Honor L1's request to flush an ASID on nested VMRUN
	 *  - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*]
	 *  - Don't crush a pending TLB flush in vmcb02 on nested VMRUN
	 *  - Flush L1's ASID on KVM_REQ_TLB_FLUSH_GUEST
	 *
	 * [*] Unlike nested EPT, SVM's ASID management can invalidate nested
	 *     NPT guest-physical mappings on VMRUN.
	 */
	kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
	kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
}

402
/*
403 404
 * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
 * if we are emulating VM-Entry into a guest with NPT enabled.
405 406
 */
static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
407
			       bool nested_npt, bool reload_pdptrs)
408
{
409
	if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3)))
410 411
		return -EINVAL;

412
	if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) &&
413 414
	    CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)))
		return -EINVAL;
415 416

	if (!nested_npt)
417
		kvm_mmu_new_pgd(vcpu, cr3);
418 419 420 421

	vcpu->arch.cr3 = cr3;
	kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);

422
	kvm_init_mmu(vcpu);
423 424

	return 0;
425 426
}

427 428 429 430 431 432 433 434 435
void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
{
	if (!svm->nested.vmcb02.ptr)
		return;

	/* FIXME: merge g_pat from vmcb01 and vmcb12.  */
	svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat;
}

436
static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
437
{
438 439
	bool new_vmcb12 = false;

440 441
	nested_vmcb02_compute_g_pat(svm);

442
	/* Load the nested guest state */
443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
	if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) {
		new_vmcb12 = true;
		svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa;
	}

	if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_SEG))) {
		svm->vmcb->save.es = vmcb12->save.es;
		svm->vmcb->save.cs = vmcb12->save.cs;
		svm->vmcb->save.ss = vmcb12->save.ss;
		svm->vmcb->save.ds = vmcb12->save.ds;
		svm->vmcb->save.cpl = vmcb12->save.cpl;
		vmcb_mark_dirty(svm->vmcb, VMCB_SEG);
	}

	if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DT))) {
		svm->vmcb->save.gdtr = vmcb12->save.gdtr;
		svm->vmcb->save.idtr = vmcb12->save.idtr;
		vmcb_mark_dirty(svm->vmcb, VMCB_DT);
	}
462

P
Paolo Bonzini 已提交
463
	kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
464 465 466 467 468 469 470 471

	/*
	 * Force-set EFER_SVME even though it is checked earlier on the
	 * VMCB12, because the guest can flip the bit between the check
	 * and now.  Clearing EFER_SVME would call svm_free_nested.
	 */
	svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME);

472 473
	svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
	svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
474 475

	svm->vcpu.arch.cr2 = vmcb12->save.cr2;
476

477 478 479
	kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
	kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
	kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
480 481

	/* In case we don't even reach vcpu_run, the fields are not updated */
482 483 484
	svm->vmcb->save.rax = vmcb12->save.rax;
	svm->vmcb->save.rsp = vmcb12->save.rsp;
	svm->vmcb->save.rip = vmcb12->save.rip;
485

486 487 488 489 490 491
	/* These bits will be set properly on the first execution when new_vmc12 is true */
	if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) {
		svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1;
		svm->vcpu.arch.dr6  = vmcb12->save.dr6 | DR6_ACTIVE_LOW;
		vmcb_mark_dirty(svm->vmcb, VMCB_DR);
	}
492
}
493

494
static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
495
{
496
	const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
497
	struct kvm_vcpu *vcpu = &svm->vcpu;
498

499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
	/*
	 * Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2,
	 * exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes.
	 */

	/*
	 * Also covers avic_vapic_bar, avic_backing_page, avic_logical_id,
	 * avic_physical_id.
	 */
	WARN_ON(svm->vmcb01.ptr->control.int_ctl & AVIC_ENABLE_MASK);

	/* Copied from vmcb01.  msrpm_base can be overwritten later.  */
	svm->vmcb->control.nested_ctl = svm->vmcb01.ptr->control.nested_ctl;
	svm->vmcb->control.iopm_base_pa = svm->vmcb01.ptr->control.iopm_base_pa;
	svm->vmcb->control.msrpm_base_pa = svm->vmcb01.ptr->control.msrpm_base_pa;

	/* Done at vmrun: asid.  */

	/* Also overwritten later if necessary.  */
	svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
519

520
	/* nested_cr3.  */
521
	if (nested_npt_enabled(svm))
522
		nested_svm_init_mmu_context(vcpu);
523

524 525
	svm->vmcb->control.tsc_offset = vcpu->arch.tsc_offset =
		vcpu->arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
526

527 528
	svm->vmcb->control.int_ctl             =
		(svm->nested.ctl.int_ctl & ~mask) |
529
		(svm->vmcb01.ptr->control.int_ctl & mask);
530

531 532 533 534 535
	svm->vmcb->control.virt_ext            = svm->nested.ctl.virt_ext;
	svm->vmcb->control.int_vector          = svm->nested.ctl.int_vector;
	svm->vmcb->control.int_state           = svm->nested.ctl.int_state;
	svm->vmcb->control.event_inj           = svm->nested.ctl.event_inj;
	svm->vmcb->control.event_inj_err       = svm->nested.ctl.event_inj_err;
536

537 538
	svm->vmcb->control.pause_filter_count  = svm->nested.ctl.pause_filter_count;
	svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh;
539

540 541
	nested_svm_transition_tlb_flush(vcpu);

542
	/* Enter Guest-Mode */
543
	enter_guest_mode(vcpu);
544 545

	/*
546 547
	 * Merge guest and host intercepts - must be called with vcpu in
	 * guest-mode to take effect.
548 549
	 */
	recalc_intercepts(svm);
550 551
}

552 553 554 555 556 557 558 559 560 561 562 563
static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
{
	/*
	 * Some VMCB state is shared between L1 and L2 and thus has to be
	 * moved at the time of nested vmrun and vmexit.
	 *
	 * VMLOAD/VMSAVE state would also belong in this category, but KVM
	 * always performs VMLOAD and VMSAVE from the VMCB01.
	 */
	to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl;
}

564
int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
565
			 struct vmcb *vmcb12)
566
{
567
	struct vcpu_svm *svm = to_svm(vcpu);
568 569
	int ret;

570 571 572 573 574 575 576 577 578 579 580 581 582 583
	trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
			       vmcb12->save.rip,
			       vmcb12->control.int_ctl,
			       vmcb12->control.event_inj,
			       vmcb12->control.nested_ctl);

	trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
				    vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
				    vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
				    vmcb12->control.intercepts[INTERCEPT_WORD3],
				    vmcb12->control.intercepts[INTERCEPT_WORD4],
				    vmcb12->control.intercepts[INTERCEPT_WORD5]);


584
	svm->nested.vmcb12_gpa = vmcb12_gpa;
585 586 587

	WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr);

588
	nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr);
589 590

	svm_switch_vmcb(svm, &svm->nested.vmcb02);
591 592
	nested_vmcb02_prepare_control(svm);
	nested_vmcb02_prepare_save(svm, vmcb12);
593

594
	ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
595
				  nested_npt_enabled(svm), true);
596 597 598
	if (ret)
		return ret;

599
	if (!npt_enabled)
600
		vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
601

P
Paolo Bonzini 已提交
602
	svm_set_gif(svm, true);
603 604

	return 0;
605 606
}

607
int nested_svm_vmrun(struct kvm_vcpu *vcpu)
608
{
609
	struct vcpu_svm *svm = to_svm(vcpu);
610
	int ret;
611
	struct vmcb *vmcb12;
612
	struct kvm_host_map map;
613
	u64 vmcb12_gpa;
614

615 616
	if (is_smm(vcpu)) {
		kvm_queue_exception(vcpu, UD_VECTOR);
617 618
		return 1;
	}
619

620
	vmcb12_gpa = svm->vmcb->save.rax;
621
	ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map);
622
	if (ret == -EINVAL) {
623
		kvm_inject_gp(vcpu, 0);
624 625
		return 1;
	} else if (ret) {
626
		return kvm_skip_emulated_instruction(vcpu);
627 628
	}

629
	ret = kvm_skip_emulated_instruction(vcpu);
630

631
	vmcb12 = map.hva;
632

633 634 635
	if (WARN_ON_ONCE(!svm->nested.initialized))
		return -EINVAL;

636
	nested_load_control_from_vmcb12(svm, &vmcb12->control);
637

638
	if (!nested_vmcb_valid_sregs(vcpu, &vmcb12->save) ||
639
	    !nested_vmcb_check_controls(vcpu, &svm->nested.ctl)) {
640 641 642 643
		vmcb12->control.exit_code    = SVM_EXIT_ERR;
		vmcb12->control.exit_code_hi = 0;
		vmcb12->control.exit_info_1  = 0;
		vmcb12->control.exit_info_2  = 0;
644
		goto out;
645 646 647 648
	}


	/* Clear internal status */
649 650
	kvm_clear_exception_queue(vcpu);
	kvm_clear_interrupt_queue(vcpu);
651 652

	/*
653 654
	 * Since vmcb01 is not in use, we can use it to store some of the L1
	 * state.
655
	 */
656 657 658 659 660
	svm->vmcb01.ptr->save.efer   = vcpu->arch.efer;
	svm->vmcb01.ptr->save.cr0    = kvm_read_cr0(vcpu);
	svm->vmcb01.ptr->save.cr4    = vcpu->arch.cr4;
	svm->vmcb01.ptr->save.rflags = kvm_get_rflags(vcpu);
	svm->vmcb01.ptr->save.rip    = kvm_rip_read(vcpu);
661 662

	if (!npt_enabled)
663
		svm->vmcb01.ptr->save.cr3 = kvm_read_cr3(vcpu);
664

665
	svm->nested.nested_run_pending = 1;
666

667
	if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12))
668
		goto out_exit_err;
669

670 671
	if (nested_svm_vmrun_msrpm(svm))
		goto out;
672

673 674 675 676 677 678 679 680 681
out_exit_err:
	svm->nested.nested_run_pending = 0;

	svm->vmcb->control.exit_code    = SVM_EXIT_ERR;
	svm->vmcb->control.exit_code_hi = 0;
	svm->vmcb->control.exit_info_1  = 0;
	svm->vmcb->control.exit_info_2  = 0;

	nested_svm_vmexit(svm);
682

683
out:
684
	kvm_vcpu_unmap(vcpu, &map, true);
685

686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
	return ret;
}

void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
{
	to_vmcb->save.fs = from_vmcb->save.fs;
	to_vmcb->save.gs = from_vmcb->save.gs;
	to_vmcb->save.tr = from_vmcb->save.tr;
	to_vmcb->save.ldtr = from_vmcb->save.ldtr;
	to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
	to_vmcb->save.star = from_vmcb->save.star;
	to_vmcb->save.lstar = from_vmcb->save.lstar;
	to_vmcb->save.cstar = from_vmcb->save.cstar;
	to_vmcb->save.sfmask = from_vmcb->save.sfmask;
	to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
	to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
	to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
}

int nested_svm_vmexit(struct vcpu_svm *svm)
{
707
	struct kvm_vcpu *vcpu = &svm->vcpu;
708
	struct vmcb *vmcb12;
709 710
	struct vmcb *vmcb = svm->vmcb;
	struct kvm_host_map map;
711
	int rc;
712

713 714 715
	/* Triple faults in L2 should never escape. */
	WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu));

716
	rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
717 718
	if (rc) {
		if (rc == -EINVAL)
719
			kvm_inject_gp(vcpu, 0);
720 721 722
		return 1;
	}

723
	vmcb12 = map.hva;
724 725

	/* Exit Guest-Mode */
726
	leave_guest_mode(vcpu);
727
	svm->nested.vmcb12_gpa = 0;
728
	WARN_ON_ONCE(svm->nested.nested_run_pending);
729

730
	kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
731

732 733 734
	/* in case we halted in L2 */
	svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;

735 736
	/* Give the current vmcb to the guest */

737 738 739 740 741 742 743
	vmcb12->save.es     = vmcb->save.es;
	vmcb12->save.cs     = vmcb->save.cs;
	vmcb12->save.ss     = vmcb->save.ss;
	vmcb12->save.ds     = vmcb->save.ds;
	vmcb12->save.gdtr   = vmcb->save.gdtr;
	vmcb12->save.idtr   = vmcb->save.idtr;
	vmcb12->save.efer   = svm->vcpu.arch.efer;
744 745
	vmcb12->save.cr0    = kvm_read_cr0(vcpu);
	vmcb12->save.cr3    = kvm_read_cr3(vcpu);
746 747
	vmcb12->save.cr2    = vmcb->save.cr2;
	vmcb12->save.cr4    = svm->vcpu.arch.cr4;
748 749 750 751
	vmcb12->save.rflags = kvm_get_rflags(vcpu);
	vmcb12->save.rip    = kvm_rip_read(vcpu);
	vmcb12->save.rsp    = kvm_rsp_read(vcpu);
	vmcb12->save.rax    = kvm_rax_read(vcpu);
752 753 754 755 756 757 758 759 760 761 762
	vmcb12->save.dr7    = vmcb->save.dr7;
	vmcb12->save.dr6    = svm->vcpu.arch.dr6;
	vmcb12->save.cpl    = vmcb->save.cpl;

	vmcb12->control.int_state         = vmcb->control.int_state;
	vmcb12->control.exit_code         = vmcb->control.exit_code;
	vmcb12->control.exit_code_hi      = vmcb->control.exit_code_hi;
	vmcb12->control.exit_info_1       = vmcb->control.exit_info_1;
	vmcb12->control.exit_info_2       = vmcb->control.exit_info_2;

	if (vmcb12->control.exit_code != SVM_EXIT_ERR)
763
		nested_save_pending_event_to_vmcb12(svm, vmcb12);
764 765

	if (svm->nrips_enabled)
766
		vmcb12->control.next_rip  = vmcb->control.next_rip;
767

768 769 770 771
	vmcb12->control.int_ctl           = svm->nested.ctl.int_ctl;
	vmcb12->control.tlb_ctl           = svm->nested.ctl.tlb_ctl;
	vmcb12->control.event_inj         = svm->nested.ctl.event_inj;
	vmcb12->control.event_inj_err     = svm->nested.ctl.event_inj_err;
772

773
	vmcb12->control.pause_filter_count =
774
		svm->vmcb->control.pause_filter_count;
775
	vmcb12->control.pause_filter_thresh =
776 777
		svm->vmcb->control.pause_filter_thresh;

778 779
	nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);

780
	svm_switch_vmcb(svm, &svm->vmcb01);
781

782 783 784 785
	/*
	 * On vmexit the  GIF is set to false and
	 * no event can be injected in L1.
	 */
786
	svm_set_gif(svm, false);
787
	svm->vmcb->control.exit_int_info = 0;
788

789 790 791 792 793
	svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset;
	if (svm->vmcb->control.tsc_offset != svm->vcpu.arch.tsc_offset) {
		svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset;
		vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
	}
794

795
	svm->nested.ctl.nested_cr3 = 0;
796

797 798 799
	/*
	 * Restore processor state that had been saved in vmcb01
	 */
800 801 802 803 804 805 806
	kvm_set_rflags(vcpu, svm->vmcb->save.rflags);
	svm_set_efer(vcpu, svm->vmcb->save.efer);
	svm_set_cr0(vcpu, svm->vmcb->save.cr0 | X86_CR0_PE);
	svm_set_cr4(vcpu, svm->vmcb->save.cr4);
	kvm_rax_write(vcpu, svm->vmcb->save.rax);
	kvm_rsp_write(vcpu, svm->vmcb->save.rsp);
	kvm_rip_write(vcpu, svm->vmcb->save.rip);
807 808 809

	svm->vcpu.arch.dr7 = DR7_FIXED_1;
	kvm_update_dr7(&svm->vcpu);
810

811 812 813 814 815
	trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
				       vmcb12->control.exit_info_1,
				       vmcb12->control.exit_info_2,
				       vmcb12->control.exit_int_info,
				       vmcb12->control.exit_int_info_err,
816 817
				       KVM_ISA_SVM);

818
	kvm_vcpu_unmap(vcpu, &map, true);
819

820 821
	nested_svm_transition_tlb_flush(vcpu);

822
	nested_svm_uninit_mmu_context(vcpu);
823

824
	rc = nested_svm_load_cr3(vcpu, svm->vmcb->save.cr3, false, true);
825 826
	if (rc)
		return 1;
827

828 829 830 831 832
	/*
	 * Drop what we picked up for L2 via svm_complete_interrupts() so it
	 * doesn't end up in L1.
	 */
	svm->vcpu.arch.nmi_injected = false;
833 834
	kvm_clear_exception_queue(vcpu);
	kvm_clear_interrupt_queue(vcpu);
835 836 837 838 839 840 841 842 843

	/*
	 * If we are here following the completion of a VMRUN that
	 * is being single-stepped, queue the pending #DB intercept
	 * right now so that it an be accounted for before we execute
	 * L1's next instruction.
	 */
	if (unlikely(svm->vmcb->save.rflags & X86_EFLAGS_TF))
		kvm_queue_exception(&(svm->vcpu), DB_VECTOR);
844 845 846 847

	return 0;
}

848 849
static void nested_svm_triple_fault(struct kvm_vcpu *vcpu)
{
850
	nested_svm_simple_vmexit(to_svm(vcpu), SVM_EXIT_SHUTDOWN);
851 852
}

853 854
int svm_allocate_nested(struct vcpu_svm *svm)
{
855
	struct page *vmcb02_page;
856 857 858 859

	if (svm->nested.initialized)
		return 0;

860 861
	vmcb02_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
	if (!vmcb02_page)
862
		return -ENOMEM;
863 864
	svm->nested.vmcb02.ptr = page_address(vmcb02_page);
	svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT);
865 866 867

	svm->nested.msrpm = svm_vcpu_alloc_msrpm();
	if (!svm->nested.msrpm)
868
		goto err_free_vmcb02;
869 870 871 872 873
	svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);

	svm->nested.initialized = true;
	return 0;

874 875
err_free_vmcb02:
	__free_page(vmcb02_page);
876 877 878 879 880 881 882 883 884 885 886
	return -ENOMEM;
}

void svm_free_nested(struct vcpu_svm *svm)
{
	if (!svm->nested.initialized)
		return;

	svm_vcpu_free_msrpm(svm->nested.msrpm);
	svm->nested.msrpm = NULL;

887 888
	__free_page(virt_to_page(svm->nested.vmcb02.ptr));
	svm->nested.vmcb02.ptr = NULL;
889

890 891 892 893 894 895 896 897 898
	/*
	 * When last_vmcb12_gpa matches the current vmcb12 gpa,
	 * some vmcb12 fields are not loaded if they are marked clean
	 * in the vmcb12, since in this case they are up to date already.
	 *
	 * When the vmcb02 is freed, this optimization becomes invalid.
	 */
	svm->nested.last_vmcb12_gpa = INVALID_GPA;

899 900 901
	svm->nested.initialized = false;
}

902 903 904 905 906
/*
 * Forcibly leave nested mode in order to be able to reset the VCPU later on.
 */
void svm_leave_nested(struct vcpu_svm *svm)
{
907 908 909
	struct kvm_vcpu *vcpu = &svm->vcpu;

	if (is_guest_mode(vcpu)) {
910
		svm->nested.nested_run_pending = 0;
911 912
		svm->nested.vmcb12_gpa = INVALID_GPA;

913
		leave_guest_mode(vcpu);
914

915
		svm_switch_vmcb(svm, &svm->vmcb01);
916

917
		nested_svm_uninit_mmu_context(vcpu);
918
		vmcb_mark_all_dirty(svm->vmcb);
919
	}
920

921
	kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
922 923
}

924 925 926 927 928
static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
{
	u32 offset, msr, value;
	int write, mask;

929
	if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
930 931 932 933 934 935 936 937 938 939 940 941 942
		return NESTED_EXIT_HOST;

	msr    = svm->vcpu.arch.regs[VCPU_REGS_RCX];
	offset = svm_msrpm_offset(msr);
	write  = svm->vmcb->control.exit_info_1 & 1;
	mask   = 1 << ((2 * (msr & 0xf)) + write);

	if (offset == MSR_INVALID)
		return NESTED_EXIT_DONE;

	/* Offset is in 32 bit units but need in 8 bit units */
	offset *= 4;

943
	if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
944 945 946 947 948 949 950 951 952 953 954 955
		return NESTED_EXIT_DONE;

	return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
}

static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
{
	unsigned port, size, iopm_len;
	u16 val, mask;
	u8 start_bit;
	u64 gpa;

956
	if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
957 958 959 960 961
		return NESTED_EXIT_HOST;

	port = svm->vmcb->control.exit_info_1 >> 16;
	size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
		SVM_IOIO_SIZE_SHIFT;
962
	gpa  = svm->nested.ctl.iopm_base_pa + (port / 8);
963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986
	start_bit = port % 8;
	iopm_len = (start_bit + size > 8) ? 2 : 1;
	mask = (0xf >> (4 - size)) << start_bit;
	val = 0;

	if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
		return NESTED_EXIT_DONE;

	return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
}

static int nested_svm_intercept(struct vcpu_svm *svm)
{
	u32 exit_code = svm->vmcb->control.exit_code;
	int vmexit = NESTED_EXIT_HOST;

	switch (exit_code) {
	case SVM_EXIT_MSR:
		vmexit = nested_svm_exit_handled_msr(svm);
		break;
	case SVM_EXIT_IOIO:
		vmexit = nested_svm_intercept_ioio(svm);
		break;
	case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
987
		if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
988 989 990 991
			vmexit = NESTED_EXIT_DONE;
		break;
	}
	case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
992
		if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
993 994 995 996
			vmexit = NESTED_EXIT_DONE;
		break;
	}
	case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
997 998 999 1000 1001 1002
		/*
		 * Host-intercepted exceptions have been checked already in
		 * nested_svm_exit_special.  There is nothing to do here,
		 * the vmexit is injected by svm_check_nested_events.
		 */
		vmexit = NESTED_EXIT_DONE;
1003 1004 1005 1006 1007 1008 1009
		break;
	}
	case SVM_EXIT_ERR: {
		vmexit = NESTED_EXIT_DONE;
		break;
	}
	default: {
1010
		if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029
			vmexit = NESTED_EXIT_DONE;
	}
	}

	return vmexit;
}

int nested_svm_exit_handled(struct vcpu_svm *svm)
{
	int vmexit;

	vmexit = nested_svm_intercept(svm);

	if (vmexit == NESTED_EXIT_DONE)
		nested_svm_vmexit(svm);

	return vmexit;
}

1030
int nested_svm_check_permissions(struct kvm_vcpu *vcpu)
1031
{
1032 1033
	if (!(vcpu->arch.efer & EFER_SVME) || !is_paging(vcpu)) {
		kvm_queue_exception(vcpu, UD_VECTOR);
1034 1035 1036
		return 1;
	}

1037 1038
	if (to_svm(vcpu)->vmcb->save.cpl) {
		kvm_inject_gp(vcpu, 0);
1039 1040 1041 1042 1043 1044
		return 1;
	}

	return 0;
}

1045
static bool nested_exit_on_exception(struct vcpu_svm *svm)
1046
{
1047
	unsigned int nr = svm->vcpu.arch.exception.nr;
1048

1049
	return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(nr));
1050
}
1051

1052 1053 1054
static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
{
	unsigned int nr = svm->vcpu.arch.exception.nr;
1055 1056 1057

	svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
	svm->vmcb->control.exit_code_hi = 0;
1058 1059 1060

	if (svm->vcpu.arch.exception.has_error_code)
		svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code;
1061 1062 1063 1064 1065

	/*
	 * EXITINFO2 is undefined for all exception intercepts other
	 * than #PF.
	 */
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
	if (nr == PF_VECTOR) {
		if (svm->vcpu.arch.exception.nested_apf)
			svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
		else if (svm->vcpu.arch.exception.has_payload)
			svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
		else
			svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
	} else if (nr == DB_VECTOR) {
		/* See inject_pending_event.  */
		kvm_deliver_exception_payload(&svm->vcpu);
		if (svm->vcpu.arch.dr7 & DR7_GD) {
			svm->vcpu.arch.dr7 &= ~DR7_GD;
			kvm_update_dr7(&svm->vcpu);
		}
	} else
		WARN_ON(svm->vcpu.arch.exception.has_payload);
1082

1083
	nested_svm_vmexit(svm);
1084 1085
}

1086 1087
static inline bool nested_exit_on_init(struct vcpu_svm *svm)
{
1088
	return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
1089 1090
}

1091
static int svm_check_nested_events(struct kvm_vcpu *vcpu)
1092 1093 1094
{
	struct vcpu_svm *svm = to_svm(vcpu);
	bool block_nested_events =
P
Paolo Bonzini 已提交
1095
		kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
1096 1097 1098 1099 1100 1101 1102 1103
	struct kvm_lapic *apic = vcpu->arch.apic;

	if (lapic_in_kernel(vcpu) &&
	    test_bit(KVM_APIC_INIT, &apic->pending_events)) {
		if (block_nested_events)
			return -EBUSY;
		if (!nested_exit_on_init(svm))
			return 0;
1104
		nested_svm_simple_vmexit(svm, SVM_EXIT_INIT);
1105 1106
		return 0;
	}
1107

1108
	if (vcpu->arch.exception.pending) {
1109 1110 1111 1112 1113 1114 1115
		/*
		 * Only a pending nested run can block a pending exception.
		 * Otherwise an injected NMI/interrupt should either be
		 * lost or delivered to the nested hypervisor in the EXITINTINFO
		 * vmcb field, while delivering the pending exception.
		 */
		if (svm->nested.nested_run_pending)
1116 1117 1118 1119 1120 1121 1122
                        return -EBUSY;
		if (!nested_exit_on_exception(svm))
			return 0;
		nested_svm_inject_exception_vmexit(svm);
		return 0;
	}

1123
	if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
1124 1125
		if (block_nested_events)
			return -EBUSY;
1126 1127
		if (!nested_exit_on_smi(svm))
			return 0;
1128
		nested_svm_simple_vmexit(svm, SVM_EXIT_SMI);
1129 1130 1131
		return 0;
	}

1132
	if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
1133 1134
		if (block_nested_events)
			return -EBUSY;
1135 1136
		if (!nested_exit_on_nmi(svm))
			return 0;
1137
		nested_svm_simple_vmexit(svm, SVM_EXIT_NMI);
1138 1139 1140
		return 0;
	}

1141
	if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
1142 1143
		if (block_nested_events)
			return -EBUSY;
1144 1145
		if (!nested_exit_on_intr(svm))
			return 0;
1146 1147
		trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
		nested_svm_simple_vmexit(svm, SVM_EXIT_INTR);
1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161
		return 0;
	}

	return 0;
}

int nested_svm_exit_special(struct vcpu_svm *svm)
{
	u32 exit_code = svm->vmcb->control.exit_code;

	switch (exit_code) {
	case SVM_EXIT_INTR:
	case SVM_EXIT_NMI:
	case SVM_EXIT_NPF:
1162 1163 1164 1165
		return NESTED_EXIT_HOST;
	case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
		u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);

1166 1167
		if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] &
		    excp_bits)
1168
			return NESTED_EXIT_HOST;
1169
		else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
1170
			 svm->vcpu.arch.apf.host_apf_flags)
1171
			/* Trap async PF even if not shadowing */
1172 1173
			return NESTED_EXIT_HOST;
		break;
1174
	}
1175 1176 1177 1178 1179 1180
	default:
		break;
	}

	return NESTED_EXIT_CONTINUE;
}
1181

1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204
static int svm_get_nested_state(struct kvm_vcpu *vcpu,
				struct kvm_nested_state __user *user_kvm_nested_state,
				u32 user_data_size)
{
	struct vcpu_svm *svm;
	struct kvm_nested_state kvm_state = {
		.flags = 0,
		.format = KVM_STATE_NESTED_FORMAT_SVM,
		.size = sizeof(kvm_state),
	};
	struct vmcb __user *user_vmcb = (struct vmcb __user *)
		&user_kvm_nested_state->data.svm[0];

	if (!vcpu)
		return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;

	svm = to_svm(vcpu);

	if (user_data_size < kvm_state.size)
		goto out;

	/* First fill in the header and copy it out.  */
	if (is_guest_mode(vcpu)) {
1205
		kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa;
1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
		kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
		kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;

		if (svm->nested.nested_run_pending)
			kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
	}

	if (gif_set(svm))
		kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;

	if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
		return -EFAULT;

	if (!is_guest_mode(vcpu))
		goto out;

	/*
	 * Copy over the full size of the VMCB rather than just the size
	 * of the structs.
	 */
	if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
		return -EFAULT;
	if (copy_to_user(&user_vmcb->control, &svm->nested.ctl,
			 sizeof(user_vmcb->control)))
		return -EFAULT;
1231
	if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save,
1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244
			 sizeof(user_vmcb->save)))
		return -EFAULT;
out:
	return kvm_state.size;
}

static int svm_set_nested_state(struct kvm_vcpu *vcpu,
				struct kvm_nested_state __user *user_kvm_nested_state,
				struct kvm_nested_state *kvm_state)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	struct vmcb __user *user_vmcb = (struct vmcb __user *)
		&user_kvm_nested_state->data.svm[0];
1245 1246
	struct vmcb_control_area *ctl;
	struct vmcb_save_area *save;
1247
	unsigned long cr0;
1248
	int ret;
1249

1250 1251 1252
	BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
		     KVM_STATE_NESTED_SVM_VMCB_SIZE);

1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276
	if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
		return -EINVAL;

	if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
				 KVM_STATE_NESTED_RUN_PENDING |
				 KVM_STATE_NESTED_GIF_SET))
		return -EINVAL;

	/*
	 * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's
	 * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed.
	 */
	if (!(vcpu->arch.efer & EFER_SVME)) {
		/* GIF=1 and no guest mode are required if SVME=0.  */
		if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
			return -EINVAL;
	}

	/* SMM temporarily disables SVM, so we cannot be in guest mode.  */
	if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
		return -EINVAL;

	if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
		svm_leave_nested(svm);
1277 1278
		svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
		return 0;
1279 1280 1281 1282 1283 1284 1285
	}

	if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
		return -EINVAL;
	if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
		return -EINVAL;

1286
	ret  = -ENOMEM;
1287 1288
	ctl  = kzalloc(sizeof(*ctl),  GFP_KERNEL_ACCOUNT);
	save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT);
1289 1290 1291 1292 1293 1294 1295 1296 1297 1298
	if (!ctl || !save)
		goto out_free;

	ret = -EFAULT;
	if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl)))
		goto out_free;
	if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
		goto out_free;

	ret = -EINVAL;
1299
	if (!nested_vmcb_check_controls(vcpu, ctl))
1300
		goto out_free;
1301 1302 1303

	/*
	 * Processor state contains L2 state.  Check that it is
1304
	 * valid for guest mode (see nested_vmcb_check_save).
1305 1306 1307
	 */
	cr0 = kvm_read_cr0(vcpu);
        if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
1308
		goto out_free;
1309 1310 1311 1312 1313

	/*
	 * Validate host state saved from before VMRUN (see
	 * nested_svm_check_permissions).
	 */
1314 1315 1316
	if (!(save->cr0 & X86_CR0_PG) ||
	    !(save->cr0 & X86_CR0_PE) ||
	    (save->rflags & X86_EFLAGS_VM) ||
1317
	    !nested_vmcb_valid_sregs(vcpu, save))
1318
		goto out_free;
1319

1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332
	/*
	 * While the nested guest CR3 is already checked and set by
	 * KVM_SET_SREGS, it was set when nested state was yet loaded,
	 * thus MMU might not be initialized correctly.
	 * Set it again to fix this.
	 */

	ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
				  nested_npt_enabled(svm), false);
	if (WARN_ON_ONCE(ret))
		goto out_free;


1333
	/*
1334 1335 1336 1337
	 * All checks done, we can enter guest mode. Userspace provides
	 * vmcb12.control, which will be combined with L1 and stored into
	 * vmcb02, and the L1 save state which we store in vmcb01.
	 * L2 registers if needed are moved from the current VMCB to VMCB02.
1338
	 */
1339

1340 1341 1342 1343 1344
	if (is_guest_mode(vcpu))
		svm_leave_nested(svm);
	else
		svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;

1345 1346
	svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));

1347 1348 1349
	svm->nested.nested_run_pending =
		!!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);

1350
	svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367

	svm->vmcb01.ptr->save.es = save->es;
	svm->vmcb01.ptr->save.cs = save->cs;
	svm->vmcb01.ptr->save.ss = save->ss;
	svm->vmcb01.ptr->save.ds = save->ds;
	svm->vmcb01.ptr->save.gdtr = save->gdtr;
	svm->vmcb01.ptr->save.idtr = save->idtr;
	svm->vmcb01.ptr->save.rflags = save->rflags | X86_EFLAGS_FIXED;
	svm->vmcb01.ptr->save.efer = save->efer;
	svm->vmcb01.ptr->save.cr0 = save->cr0;
	svm->vmcb01.ptr->save.cr3 = save->cr3;
	svm->vmcb01.ptr->save.cr4 = save->cr4;
	svm->vmcb01.ptr->save.rax = save->rax;
	svm->vmcb01.ptr->save.rsp = save->rsp;
	svm->vmcb01.ptr->save.rip = save->rip;
	svm->vmcb01.ptr->save.cpl = 0;

1368
	nested_load_control_from_vmcb12(svm, ctl);
1369 1370 1371

	svm_switch_vmcb(svm, &svm->nested.vmcb02);

1372
	nested_vmcb02_prepare_control(svm);
1373

1374
	kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1375 1376 1377 1378 1379 1380
	ret = 0;
out_free:
	kfree(save);
	kfree(ctl);

	return ret;
1381 1382
}

1383 1384 1385 1386 1387 1388 1389
static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	if (WARN_ON(!is_guest_mode(vcpu)))
		return true;

1390 1391
	if (!vcpu->arch.pdptrs_from_userspace &&
	    !nested_npt_enabled(svm) && is_pae_paging(vcpu))
1392 1393 1394 1395 1396 1397 1398
		/*
		 * Reload the guest's PDPTRs since after a migration
		 * the guest CR3 might be restored prior to setting the nested
		 * state which can lead to a load of wrong PDPTRs.
		 */
		if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3)))
			return false;
1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410

	if (!nested_svm_vmrun_msrpm(svm)) {
		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
		vcpu->run->internal.suberror =
			KVM_INTERNAL_ERROR_EMULATION;
		vcpu->run->internal.ndata = 0;
		return false;
	}

	return true;
}

1411 1412
struct kvm_x86_nested_ops svm_nested_ops = {
	.check_events = svm_check_nested_events,
1413
	.triple_fault = nested_svm_triple_fault,
1414
	.get_nested_state_pages = svm_get_nested_state_pages,
1415 1416
	.get_state = svm_get_nested_state,
	.set_state = svm_set_nested_state,
1417
};