x86.h 4.1 KB
Newer Older
1 2 3 4
#ifndef ARCH_X86_KVM_X86_H
#define ARCH_X86_KVM_X86_H

#include <linux/kvm_host.h>
5
#include "kvm_cache_regs.h"
6 7 8 9 10 11

static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
{
	vcpu->arch.exception.pending = false;
}

12 13
static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
	bool soft)
A
Avi Kivity 已提交
14 15
{
	vcpu->arch.interrupt.pending = true;
16
	vcpu->arch.interrupt.soft = soft;
A
Avi Kivity 已提交
17 18 19 20 21 22 23 24
	vcpu->arch.interrupt.nr = vector;
}

static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
{
	vcpu->arch.interrupt.pending = false;
}

25 26 27 28 29
static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.exception.pending || vcpu->arch.interrupt.pending ||
		vcpu->arch.nmi_injected;
}
30 31 32 33 34

static inline bool kvm_exception_is_soft(unsigned int nr)
{
	return (nr == BP_VECTOR) || (nr == OF_VECTOR);
}
35

36 37 38 39 40
static inline bool is_protmode(struct kvm_vcpu *vcpu)
{
	return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
}

41 42 43
static inline int is_long_mode(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_X86_64
44
	return vcpu->arch.efer & EFER_LMA;
45 46 47 48 49
#else
	return 0;
#endif
}

50 51 52 53 54 55 56 57 58 59
static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
{
	int cs_db, cs_l;

	if (!is_long_mode(vcpu))
		return false;
	kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
	return cs_l;
}

60 61 62 63 64
static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
}

65 66 67 68 69 70 71 72 73 74 75 76
static inline int is_pae(struct kvm_vcpu *vcpu)
{
	return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
}

static inline int is_pse(struct kvm_vcpu *vcpu)
{
	return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
}

static inline int is_paging(struct kvm_vcpu *vcpu)
{
77
	return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
78 79
}

80 81 82 83 84
static inline u32 bit(int bitno)
{
	return 1 << (bitno & 31);
}

85 86 87 88 89 90
static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
					gva_t gva, gfn_t gfn, unsigned access)
{
	vcpu->arch.mmio_gva = gva & PAGE_MASK;
	vcpu->arch.access = access;
	vcpu->arch.mmio_gfn = gfn;
91 92 93 94 95 96
	vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation;
}

static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
97 98 99
}

/*
100 101
 * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
 * clear all mmio cache info.
102
 */
103 104
#define MMIO_GVA_ANY (~(gva_t)0)

105 106
static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
{
107
	if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
108 109 110 111 112 113 114
		return;

	vcpu->arch.mmio_gva = 0;
}

static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
{
115 116
	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
	      vcpu->arch.mmio_gva == (gva & PAGE_MASK))
117 118 119 120 121 122 123
		return true;

	return false;
}

static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
{
124 125
	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
	      vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
126 127 128 129 130
		return true;

	return false;
}

131 132 133 134 135 136 137 138
static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu,
					       enum kvm_reg reg)
{
	unsigned long val = kvm_register_read(vcpu, reg);

	return is_64_bit_mode(vcpu) ? val : (u32)val;
}

139 140 141 142 143 144 145 146 147
static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
				       enum kvm_reg reg,
				       unsigned long val)
{
	if (!is_64_bit_mode(vcpu))
		val = (u32)val;
	return kvm_register_write(vcpu, reg, val);
}

148 149
void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
150
int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
151

152
void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
153

154 155 156 157
int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
	gva_t addr, void *val, unsigned int bytes,
	struct x86_exception *exception);

N
Nadav Har'El 已提交
158 159 160 161
int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
	gva_t addr, void *val, unsigned int bytes,
	struct x86_exception *exception);

162 163
bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);

164 165
#define KVM_SUPPORTED_XCR0     (XSTATE_FP | XSTATE_SSE | XSTATE_YMM \
				| XSTATE_BNDREGS | XSTATE_BNDCSR)
A
Avi Kivity 已提交
166 167
extern u64 host_xcr0;

168 169
extern u64 kvm_supported_xcr0(void);

170 171
extern unsigned int min_timer_period_us;

172
extern struct static_key kvm_no_apic_vcpu;
173
#endif