svm.h 16.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * AMD SVM support
 *
 * Copyright (C) 2006 Qumranet, Inc.
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
 *
 * Authors:
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *   Avi Kivity   <avi@qumranet.com>
 */

#ifndef __SVM_SVM_H
#define __SVM_SVM_H

#include <linux/kvm_types.h>
#include <linux/kvm_host.h>
20
#include <linux/bits.h>
21 22 23

#include <asm/svm.h>

24 25
#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)

26 27 28
#define	IOPM_SIZE PAGE_SIZE * 3
#define	MSRPM_SIZE PAGE_SIZE * 2

29
#define MAX_DIRECT_ACCESS_MSRS	20
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
#define MSRPM_OFFSETS	16
extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
extern bool npt_enabled;

enum {
	VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
			    pause filter count */
	VMCB_PERM_MAP,   /* IOPM Base and MSRPM Base */
	VMCB_ASID,	 /* ASID */
	VMCB_INTR,	 /* int_ctl, int_vector */
	VMCB_NPT,        /* npt_en, nCR3, gPAT */
	VMCB_CR,	 /* CR0, CR3, CR4, EFER */
	VMCB_DR,         /* DR6, DR7 */
	VMCB_DT,         /* GDT, IDT */
	VMCB_SEG,        /* CS, DS, SS, ES, CPL */
	VMCB_CR2,        /* CR2 only */
	VMCB_LBR,        /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
	VMCB_AVIC,       /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
			  * AVIC PHYSICAL_TABLE pointer,
			  * AVIC LOGICAL_TABLE pointer
			  */
	VMCB_DIRTY_MAX,
};

/* TPR and CR2 are always written before VMRUN */
#define VMCB_ALWAYS_DIRTY_MASK	((1U << VMCB_INTR) | (1U << VMCB_CR2))

struct kvm_sev_info {
	bool active;		/* SEV enabled guest */
59
	bool es_active;		/* SEV-ES enabled guest */
60 61 62 63 64
	unsigned int asid;	/* ASID used for this guest */
	unsigned int handle;	/* SEV firmware handle */
	int fd;			/* SEV device fd */
	unsigned long pages_locked; /* Number of pages locked */
	struct list_head regions_list;  /* List of registered regions */
65
	u64 ap_jump_table;	/* SEV-ES AP Jump Table address */
66
	struct kvm *enc_context_owner; /* Owner of copied encryption context */
67
	struct misc_cg *misc_cg; /* For misc cgroup accounting */
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
};

struct kvm_svm {
	struct kvm kvm;

	/* Struct members for AVIC */
	u32 avic_vm_id;
	struct page *avic_logical_id_table_page;
	struct page *avic_physical_id_table_page;
	struct hlist_node hnode;

	struct kvm_sev_info sev_info;
};

struct kvm_vcpu;

84 85 86
struct kvm_vmcb_info {
	struct vmcb *ptr;
	unsigned long pa;
87
	int cpu;
88
	uint64_t asid_generation;
89 90
};

91
struct svm_nested_state {
92
	struct kvm_vmcb_info vmcb02;
93 94
	u64 hsave_msr;
	u64 vm_cr_msr;
95
	u64 vmcb12_gpa;
96
	u64 last_vmcb12_gpa;
97 98 99 100

	/* These are the merged vectors */
	u32 *msrpm;

101 102 103 104
	/* A VMRUN has started but has not yet been performed, so
	 * we cannot inject a nested vmexit yet.  */
	bool nested_run_pending;

105 106
	/* cache for control fields of the guest */
	struct vmcb_control_area ctl;
107 108

	bool initialized;
109 110 111 112
};

struct vcpu_svm {
	struct kvm_vcpu vcpu;
113
	/* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */
114
	struct vmcb *vmcb;
115 116
	struct kvm_vmcb_info vmcb01;
	struct kvm_vmcb_info *current_vmcb;
117
	struct svm_cpu_data *svm_data;
C
Cathy Avery 已提交
118
	u32 asid;
119 120
	u32 sysenter_esp_hi;
	u32 sysenter_eip_hi;
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
	uint64_t tsc_aux;

	u64 msr_decfg;

	u64 next_rip;

	u64 spec_ctrl;
	/*
	 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
	 * translated into the appropriate L2_CFG bits on the host to
	 * perform speculative control.
	 */
	u64 virt_spec_ctrl;

	u32 *msrpm;

	ulong nmi_iret_rip;

139
	struct svm_nested_state nested;
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163

	bool nmi_singlestep;
	u64 nmi_singlestep_guest_rflags;

	unsigned int3_injected;
	unsigned long int3_rip;

	/* cached guest cpuid flags for faster access */
	bool nrips_enabled	: 1;

	u32 ldr_reg;
	u32 dfr_reg;
	struct page *avic_backing_page;
	u64 *avic_physical_id_cache;
	bool avic_is_running;

	/*
	 * Per-vcpu list of struct amd_svm_iommu_ir:
	 * This is used mainly to store interrupt remapping information used
	 * when update the vcpu affinity. This avoids the need to scan for
	 * IRTE and try to match ga_tag in the IOMMU driver.
	 */
	struct list_head ir_list;
	spinlock_t ir_list_lock;
164 165 166 167 168 169

	/* Save desired MSR intercept (read: pass-through) state */
	struct {
		DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS);
		DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
	} shadow_msr_intercept;
170 171 172 173

	/* SEV-ES support */
	struct vmcb_save_area *vmsa;
	struct ghcb *ghcb;
174
	struct kvm_host_map ghcb_map;
175
	bool received_first_sipi;
176 177 178 179 180 181

	/* SEV-ES scratch area support */
	void *ghcb_sa;
	u64 ghcb_sa_len;
	bool ghcb_sa_sync;
	bool ghcb_sa_free;
182 183

	bool guest_state_loaded;
184 185
};

186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
struct svm_cpu_data {
	int cpu;

	u64 asid_generation;
	u32 max_asid;
	u32 next_asid;
	u32 min_asid;
	struct kvm_ldttss_desc *tss_desc;

	struct page *save_area;
	struct vmcb *current_vmcb;

	/* index = sev_asid, value = vmcb pointer */
	struct vmcb **sev_vmcbs;
};

DECLARE_PER_CPU(struct svm_cpu_data *, svm_data);

204 205
void recalc_intercepts(struct vcpu_svm *svm);

206 207 208 209 210
static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
{
	return container_of(kvm, struct kvm_svm, kvm);
}

211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
static inline bool sev_guest(struct kvm *kvm)
{
#ifdef CONFIG_KVM_AMD_SEV
	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;

	return sev->active;
#else
	return false;
#endif
}

static inline bool sev_es_guest(struct kvm *kvm)
{
#ifdef CONFIG_KVM_AMD_SEV
	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;

	return sev_guest(kvm) && sev->es_active;
#else
	return false;
#endif
}

233
static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
234 235 236 237
{
	vmcb->control.clean = 0;
}

238
static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
239 240 241 242 243
{
	vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
			       & ~VMCB_ALWAYS_DIRTY_MASK;
}

244
static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
245 246 247 248
{
	vmcb->control.clean &= ~(1 << bit);
}

249 250 251 252 253
static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit)
{
        return !test_bit(bit, (unsigned long *)&vmcb->control.clean);
}

254 255 256 257 258
static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
{
	return container_of(vcpu, struct vcpu_svm, vcpu);
}

259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
{
	WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
	__set_bit(bit, (unsigned long *)&control->intercepts);
}

static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
{
	WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
	__clear_bit(bit, (unsigned long *)&control->intercepts);
}

static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
{
	WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
	return test_bit(bit, (unsigned long *)&control->intercepts);
}

277 278
static inline void set_dr_intercepts(struct vcpu_svm *svm)
{
279
	struct vmcb *vmcb = svm->vmcb01.ptr;
280

281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
	if (!sev_es_guest(svm->vcpu.kvm)) {
		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ);
		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ);
		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ);
		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ);
		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ);
		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ);
		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE);
		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE);
		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE);
		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE);
		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE);
		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE);
		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE);
	}

298 299
	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
300 301 302 303 304 305

	recalc_intercepts(svm);
}

static inline void clr_dr_intercepts(struct vcpu_svm *svm)
{
306
	struct vmcb *vmcb = svm->vmcb01.ptr;
307

308
	vmcb->control.intercepts[INTERCEPT_DR] = 0;
309

310 311 312 313 314 315
	/* DR7 access must remain intercepted for an SEV-ES guest */
	if (sev_es_guest(svm->vcpu.kvm)) {
		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
	}

316 317 318
	recalc_intercepts(svm);
}

319
static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
320
{
321
	struct vmcb *vmcb = svm->vmcb01.ptr;
322

323 324
	WARN_ON_ONCE(bit >= 32);
	vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
325 326 327 328

	recalc_intercepts(svm);
}

329
static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
330
{
331
	struct vmcb *vmcb = svm->vmcb01.ptr;
332

333 334
	WARN_ON_ONCE(bit >= 32);
	vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
335 336 337 338

	recalc_intercepts(svm);
}

339
static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
340
{
341
	struct vmcb *vmcb = svm->vmcb01.ptr;
342

343
	vmcb_set_intercept(&vmcb->control, bit);
344 345 346 347

	recalc_intercepts(svm);
}

348
static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
349
{
350
	struct vmcb *vmcb = svm->vmcb01.ptr;
351

352
	vmcb_clr_intercept(&vmcb->control, bit);
353 354 355 356

	recalc_intercepts(svm);
}

357
static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
358
{
359
	return vmcb_is_intercept(&svm->vmcb->control, bit);
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
}

static inline bool vgif_enabled(struct vcpu_svm *svm)
{
	return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
}

static inline void enable_gif(struct vcpu_svm *svm)
{
	if (vgif_enabled(svm))
		svm->vmcb->control.int_ctl |= V_GIF_MASK;
	else
		svm->vcpu.arch.hflags |= HF_GIF_MASK;
}

static inline void disable_gif(struct vcpu_svm *svm)
{
	if (vgif_enabled(svm))
		svm->vmcb->control.int_ctl &= ~V_GIF_MASK;
	else
		svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
}

static inline bool gif_set(struct vcpu_svm *svm)
{
	if (vgif_enabled(svm))
		return !!(svm->vmcb->control.int_ctl & V_GIF_MASK);
	else
		return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
}

/* svm.c */
392
#define MSR_INVALID				0xffffffffU
393

394
extern bool dump_invalid_vmcb;
395

396
u32 svm_msrpm_offset(u32 msr);
397 398 399 400
u32 *svm_vcpu_alloc_msrpm(void);
void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
void svm_vcpu_free_msrpm(u32 *msrpm);

401
int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
402
void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
403
void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
404
void svm_flush_tlb(struct kvm_vcpu *vcpu);
405
void disable_nmi_singlestep(struct vcpu_svm *svm);
406 407 408
bool svm_smi_blocked(struct kvm_vcpu *vcpu);
bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
P
Paolo Bonzini 已提交
409
void svm_set_gif(struct vcpu_svm *svm, bool value);
410
int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
411 412
void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
			  int read, int write);
413 414 415 416 417 418 419

/* nested.c */

#define NESTED_EXIT_HOST	0	/* Exit handled on host level */
#define NESTED_EXIT_DONE	1	/* Exit caused nested vmexit  */
#define NESTED_EXIT_CONTINUE	2	/* Further checks needed      */

420
static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
421
{
P
Paolo Bonzini 已提交
422 423 424
	struct vcpu_svm *svm = to_svm(vcpu);

	return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
425 426
}

427 428
static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
{
429
	return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
430 431
}

432 433
static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
{
434
	return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
435 436
}

437 438
static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
{
439
	return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
440 441
}

442
int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb_gpa, struct vmcb *vmcb12);
443
void svm_leave_nested(struct vcpu_svm *svm);
444 445
void svm_free_nested(struct vcpu_svm *svm);
int svm_allocate_nested(struct vcpu_svm *svm);
446
int nested_svm_vmrun(struct kvm_vcpu *vcpu);
447 448
void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb);
int nested_svm_vmexit(struct vcpu_svm *svm);
449 450 451 452 453 454 455 456 457

static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
{
	svm->vmcb->control.exit_code   = exit_code;
	svm->vmcb->control.exit_info_1 = 0;
	svm->vmcb->control.exit_info_2 = 0;
	return nested_svm_vmexit(svm);
}

458
int nested_svm_exit_handled(struct vcpu_svm *svm);
459
int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
460 461 462
int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
			       bool has_error_code, u32 error_code);
int nested_svm_exit_special(struct vcpu_svm *svm);
463
void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
464 465
void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);
466

467 468
extern struct kvm_x86_nested_ops svm_nested_ops;

469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
/* avic.c */

#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK	(0xFF)
#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT			31
#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK		(1 << 31)

#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK	(0xFFULL)
#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK	(0xFFFFFFFFFFULL << 12)
#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK		(1ULL << 62)
#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK		(1ULL << 63)

#define VMCB_AVIC_APIC_BAR_MASK		0xFFFFFFFFFF000ULL

extern int avic;

static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
{
	svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK;
487
	vmcb_mark_dirty(svm->vmcb, VMCB_AVIC);
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505
}

static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	u64 *entry = svm->avic_physical_id_cache;

	if (!entry)
		return false;

	return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
}

int avic_ga_log_notifier(u32 ga_tag);
void avic_vm_destroy(struct kvm *kvm);
int avic_vm_init(struct kvm *kvm);
void avic_init_vmcb(struct vcpu_svm *svm);
void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate);
506 507
int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
int avic_init_vcpu(struct vcpu_svm *svm);
void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
void avic_vcpu_put(struct kvm_vcpu *vcpu);
void avic_post_state_restore(struct kvm_vcpu *vcpu);
void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
bool svm_check_apicv_inhibit_reasons(ulong bit);
void svm_pre_update_apicv_exec_ctrl(struct kvm *kvm, bool activate);
void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec);
bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
		       uint32_t guest_irq, bool set);
void svm_vcpu_blocking(struct kvm_vcpu *vcpu);
void svm_vcpu_unblocking(struct kvm_vcpu *vcpu);

526 527
/* sev.c */

528 529 530
#define GHCB_VERSION_MAX		1ULL
#define GHCB_VERSION_MIN		1ULL

531 532 533
#define GHCB_MSR_INFO_POS		0
#define GHCB_MSR_INFO_MASK		(BIT_ULL(12) - 1)

534 535 536 537 538 539 540 541 542 543 544 545 546 547
#define GHCB_MSR_SEV_INFO_RESP		0x001
#define GHCB_MSR_SEV_INFO_REQ		0x002
#define GHCB_MSR_VER_MAX_POS		48
#define GHCB_MSR_VER_MAX_MASK		0xffff
#define GHCB_MSR_VER_MIN_POS		32
#define GHCB_MSR_VER_MIN_MASK		0xffff
#define GHCB_MSR_CBIT_POS		24
#define GHCB_MSR_CBIT_MASK		0xff
#define GHCB_MSR_SEV_INFO(_max, _min, _cbit)				\
	((((_max) & GHCB_MSR_VER_MAX_MASK) << GHCB_MSR_VER_MAX_POS) |	\
	 (((_min) & GHCB_MSR_VER_MIN_MASK) << GHCB_MSR_VER_MIN_POS) |	\
	 (((_cbit) & GHCB_MSR_CBIT_MASK) << GHCB_MSR_CBIT_POS) |	\
	 GHCB_MSR_SEV_INFO_RESP)

548 549 550 551 552 553 554 555 556
#define GHCB_MSR_CPUID_REQ		0x004
#define GHCB_MSR_CPUID_RESP		0x005
#define GHCB_MSR_CPUID_FUNC_POS		32
#define GHCB_MSR_CPUID_FUNC_MASK	0xffffffff
#define GHCB_MSR_CPUID_VALUE_POS	32
#define GHCB_MSR_CPUID_VALUE_MASK	0xffffffff
#define GHCB_MSR_CPUID_REG_POS		30
#define GHCB_MSR_CPUID_REG_MASK		0x3

557 558 559 560 561 562
#define GHCB_MSR_TERM_REQ		0x100
#define GHCB_MSR_TERM_REASON_SET_POS	12
#define GHCB_MSR_TERM_REASON_SET_MASK	0xf
#define GHCB_MSR_TERM_REASON_POS	16
#define GHCB_MSR_TERM_REASON_MASK	0xff

563 564 565 566 567 568 569 570
extern unsigned int max_sev_asid;

void sev_vm_destroy(struct kvm *kvm);
int svm_mem_enc_op(struct kvm *kvm, void __user *argp);
int svm_register_enc_region(struct kvm *kvm,
			    struct kvm_enc_region *range);
int svm_unregister_enc_region(struct kvm *kvm,
			      struct kvm_enc_region *range);
571
int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd);
572
void pre_sev_run(struct vcpu_svm *svm, int cpu);
573
void __init sev_set_cpu_caps(void);
574
void __init sev_hardware_setup(void);
575
void sev_hardware_teardown(void);
576
int sev_cpu_init(struct svm_cpu_data *sd);
577
void sev_free_vcpu(struct kvm_vcpu *vcpu);
578
int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
579
int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
580 581
void sev_es_init_vmcb(struct vcpu_svm *svm);
void sev_es_create_vcpu(struct vcpu_svm *svm);
582
void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
583
void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu);
584

585 586 587 588 589
/* vmenter.S */

void __svm_sev_es_vcpu_run(unsigned long vmcb_pa);
void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);

590
#endif