x86.h 7.3 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4
#ifndef ARCH_X86_KVM_X86_H
#define ARCH_X86_KVM_X86_H

5 6
#include <asm/processor.h>
#include <asm/mwait.h>
7
#include <linux/kvm_host.h>
8
#include <asm/pvclock.h>
9
#include "kvm_cache_regs.h"
10

11 12
#define MSR_IA32_CR_PAT_DEFAULT  0x0007040600070406ULL

13 14
static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
{
15
	vcpu->arch.exception.injected = false;
16 17
}

18 19
static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
	bool soft)
A
Avi Kivity 已提交
20 21
{
	vcpu->arch.interrupt.pending = true;
22
	vcpu->arch.interrupt.soft = soft;
A
Avi Kivity 已提交
23 24 25 26 27 28 29 30
	vcpu->arch.interrupt.nr = vector;
}

static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
{
	vcpu->arch.interrupt.pending = false;
}

31 32
static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
{
33
	return vcpu->arch.exception.injected || vcpu->arch.interrupt.pending ||
34 35
		vcpu->arch.nmi_injected;
}
36 37 38 39 40

static inline bool kvm_exception_is_soft(unsigned int nr)
{
	return (nr == BP_VECTOR) || (nr == OF_VECTOR);
}
41

42 43 44 45 46
static inline bool is_protmode(struct kvm_vcpu *vcpu)
{
	return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
}

47 48 49
static inline int is_long_mode(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_X86_64
50
	return vcpu->arch.efer & EFER_LMA;
51 52 53 54 55
#else
	return 0;
#endif
}

56 57 58 59 60 61 62 63 64 65
static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
{
	int cs_db, cs_l;

	if (!is_long_mode(vcpu))
		return false;
	kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
	return cs_l;
}

66 67 68 69 70 71 72 73 74 75
static inline bool is_la57_mode(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_X86_64
	return (vcpu->arch.efer & EFER_LMA) &&
		 kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
#else
	return 0;
#endif
}

76 77 78 79 80
static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
}

81 82 83 84 85 86 87 88 89 90 91 92
static inline int is_pae(struct kvm_vcpu *vcpu)
{
	return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
}

static inline int is_pse(struct kvm_vcpu *vcpu)
{
	return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
}

static inline int is_paging(struct kvm_vcpu *vcpu)
{
93
	return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
94 95
}

96 97 98 99 100
static inline u32 bit(int bitno)
{
	return 1 << (bitno & 31);
}

101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
{
	return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48;
}

static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
{
	return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48;
}

static inline u64 get_canonical(u64 la, u8 vaddr_bits)
{
	return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits);
}

static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_X86_64
	return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la;
#else
	return false;
#endif
}

static inline bool emul_is_noncanonical_address(u64 la,
						struct x86_emulate_ctxt *ctxt)
{
#ifdef CONFIG_X86_64
	return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la;
#else
	return false;
#endif
}

135 136 137
static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
					gva_t gva, gfn_t gfn, unsigned access)
{
138 139 140 141 142
	/*
	 * If this is a shadow nested page table, the "GVA" is
	 * actually a nGPA.
	 */
	vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
143 144
	vcpu->arch.access = access;
	vcpu->arch.mmio_gfn = gfn;
145 146 147 148 149 150
	vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation;
}

static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
151 152 153
}

/*
154 155
 * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
 * clear all mmio cache info.
156
 */
157 158
#define MMIO_GVA_ANY (~(gva_t)0)

159 160
static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
{
161
	if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
162 163 164 165 166 167 168
		return;

	vcpu->arch.mmio_gva = 0;
}

static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
{
169 170
	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
	      vcpu->arch.mmio_gva == (gva & PAGE_MASK))
171 172 173 174 175 176 177
		return true;

	return false;
}

static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
{
178 179
	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
	      vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
180 181 182 183 184
		return true;

	return false;
}

185 186 187 188 189 190 191 192
static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu,
					       enum kvm_reg reg)
{
	unsigned long val = kvm_register_read(vcpu, reg);

	return is_64_bit_mode(vcpu) ? val : (u32)val;
}

193 194 195 196 197 198 199 200 201
static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
				       enum kvm_reg reg,
				       unsigned long val)
{
	if (!is_64_bit_mode(vcpu))
		val = (u32)val;
	return kvm_register_write(vcpu, reg, val);
}

202 203 204 205 206
static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
{
	return !(kvm->arch.disabled_quirks & quirk);
}

207 208
void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
209
void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
210
int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
211

212
void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
213
u64 get_kvmclock_ns(struct kvm *kvm);
214

215 216 217 218
int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
	gva_t addr, void *val, unsigned int bytes,
	struct x86_exception *exception);

N
Nadav Har'El 已提交
219 220 221 222
int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
	gva_t addr, void *val, unsigned int bytes,
	struct x86_exception *exception);

X
Xiao Guangrong 已提交
223
void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
224
u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
225
bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
226 227
int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
228 229
bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
					  int page_num);
230
bool kvm_vector_hashing_enabled(void);
231

D
Dave Hansen 已提交
232 233
#define KVM_SUPPORTED_XCR0     (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
				| XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
234 235
				| XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \
				| XFEATURE_MASK_PKRU)
A
Avi Kivity 已提交
236 237
extern u64 host_xcr0;

238 239
extern u64 kvm_supported_xcr0(void);

240 241
extern unsigned int min_timer_period_us;

242 243
extern unsigned int lapic_timer_advance_ns;

244
extern struct static_key kvm_no_apic_vcpu;
245

246 247 248 249 250 251
static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
{
	return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
				   vcpu->arch.virtual_tsc_shift);
}

252 253 254 255 256 257 258 259 260 261 262 263 264 265
/* Same "calling convention" as do_div:
 * - divide (n << 32) by base
 * - put result in n
 * - return remainder
 */
#define do_shl32_div32(n, base)					\
	({							\
	    u32 __quot, __rem;					\
	    asm("divl %2" : "=a" (__quot), "=d" (__rem)		\
			: "rm" (base), "0" (0), "1" ((u32) n));	\
	    n = __quot;						\
	    __rem;						\
	 })

266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
static inline bool kvm_mwait_in_guest(void)
{
	unsigned int eax, ebx, ecx, edx;

	if (!cpu_has(&boot_cpu_data, X86_FEATURE_MWAIT))
		return false;

	switch (boot_cpu_data.x86_vendor) {
	case X86_VENDOR_AMD:
		/* All AMD CPUs have a working MWAIT implementation */
		return true;
	case X86_VENDOR_INTEL:
		/* Handle Intel below */
		break;
	default:
		return false;
	}

284 285 286
	if (boot_cpu_has_bug(X86_BUG_MONITOR))
		return false;

287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
	/*
	 * Intel CPUs without CPUID5_ECX_INTERRUPT_BREAK are problematic as
	 * they would allow guest to stop the CPU completely by disabling
	 * interrupts then invoking MWAIT.
	 */
	if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
		return false;

	cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);

	if (!(ecx & CPUID5_ECX_INTERRUPT_BREAK))
		return false;

	return true;
}

303
#endif