提交 a2fa3e9f 编写于 作者: G Gregory Haskins 提交者: Avi Kivity

KVM: Remove arch specific components from the general code

struct kvm_vcpu has vmx-specific members; remove them to a private structure.
Signed-off-by: NGregory Haskins <ghaskins@novell.com>
Signed-off-by: NRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: NAvi Kivity <avi@qumranet.com>
上级 c820c2aa
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/signal.h> #include <asm/signal.h>
#include "vmx.h"
#include <linux/kvm.h> #include <linux/kvm.h>
#include <linux/kvm_para.h> #include <linux/kvm_para.h>
...@@ -140,14 +139,6 @@ struct kvm_mmu_page { ...@@ -140,14 +139,6 @@ struct kvm_mmu_page {
}; };
}; };
struct vmcs {
u32 revision_id;
u32 abort;
char data[0];
};
#define vmx_msr_entry kvm_msr_entry
struct kvm_vcpu; struct kvm_vcpu;
/* /*
...@@ -309,15 +300,12 @@ void kvm_io_bus_register_dev(struct kvm_io_bus *bus, ...@@ -309,15 +300,12 @@ void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
struct kvm_io_device *dev); struct kvm_io_device *dev);
struct kvm_vcpu { struct kvm_vcpu {
int valid;
struct kvm *kvm; struct kvm *kvm;
int vcpu_id; int vcpu_id;
union { void *_priv;
struct vmcs *vmcs;
struct vcpu_svm *svm;
};
struct mutex mutex; struct mutex mutex;
int cpu; int cpu;
int launched;
u64 host_tsc; u64 host_tsc;
struct kvm_run *run; struct kvm_run *run;
int interrupt_window_open; int interrupt_window_open;
...@@ -340,14 +328,6 @@ struct kvm_vcpu { ...@@ -340,14 +328,6 @@ struct kvm_vcpu {
u64 shadow_efer; u64 shadow_efer;
u64 apic_base; u64 apic_base;
u64 ia32_misc_enable_msr; u64 ia32_misc_enable_msr;
int nmsrs;
int save_nmsrs;
int msr_offset_efer;
#ifdef CONFIG_X86_64
int msr_offset_kernel_gs_base;
#endif
struct vmx_msr_entry *guest_msrs;
struct vmx_msr_entry *host_msrs;
struct kvm_mmu mmu; struct kvm_mmu mmu;
...@@ -366,11 +346,6 @@ struct kvm_vcpu { ...@@ -366,11 +346,6 @@ struct kvm_vcpu {
char *guest_fx_image; char *guest_fx_image;
int fpu_active; int fpu_active;
int guest_fpu_loaded; int guest_fpu_loaded;
struct vmx_host_state {
int loaded;
u16 fs_sel, gs_sel, ldt_sel;
int fs_gs_ldt_reload_needed;
} vmx_host_state;
int mmio_needed; int mmio_needed;
int mmio_read_completed; int mmio_read_completed;
...@@ -579,8 +554,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); ...@@ -579,8 +554,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
void fx_init(struct kvm_vcpu *vcpu); void fx_init(struct kvm_vcpu *vcpu);
void load_msrs(struct vmx_msr_entry *e, int n);
void save_msrs(struct vmx_msr_entry *e, int n);
void kvm_resched(struct kvm_vcpu *vcpu); void kvm_resched(struct kvm_vcpu *vcpu);
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
......
...@@ -367,7 +367,7 @@ static void free_pio_guest_pages(struct kvm_vcpu *vcpu) ...@@ -367,7 +367,7 @@ static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
{ {
if (!vcpu->vmcs) if (!vcpu->valid)
return; return;
vcpu_load(vcpu); vcpu_load(vcpu);
...@@ -377,7 +377,7 @@ static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) ...@@ -377,7 +377,7 @@ static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
static void kvm_free_vcpu(struct kvm_vcpu *vcpu) static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
{ {
if (!vcpu->vmcs) if (!vcpu->valid)
return; return;
vcpu_load(vcpu); vcpu_load(vcpu);
...@@ -1645,24 +1645,6 @@ void kvm_resched(struct kvm_vcpu *vcpu) ...@@ -1645,24 +1645,6 @@ void kvm_resched(struct kvm_vcpu *vcpu)
} }
EXPORT_SYMBOL_GPL(kvm_resched); EXPORT_SYMBOL_GPL(kvm_resched);
void load_msrs(struct vmx_msr_entry *e, int n)
{
int i;
for (i = 0; i < n; ++i)
wrmsrl(e[i].index, e[i].data);
}
EXPORT_SYMBOL_GPL(load_msrs);
void save_msrs(struct vmx_msr_entry *e, int n)
{
int i;
for (i = 0; i < n; ++i)
rdmsrl(e[i].index, e[i].data);
}
EXPORT_SYMBOL_GPL(save_msrs);
void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
{ {
int i; int i;
...@@ -2401,7 +2383,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n) ...@@ -2401,7 +2383,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
mutex_lock(&vcpu->mutex); mutex_lock(&vcpu->mutex);
if (vcpu->vmcs) { if (vcpu->valid) {
mutex_unlock(&vcpu->mutex); mutex_unlock(&vcpu->mutex);
return -EEXIST; return -EEXIST;
} }
...@@ -2449,6 +2431,8 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n) ...@@ -2449,6 +2431,8 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
kvm->nvcpus = n + 1; kvm->nvcpus = n + 1;
spin_unlock(&kvm_lock); spin_unlock(&kvm_lock);
vcpu->valid = 1;
return r; return r;
out_free_vcpus: out_free_vcpus:
......
...@@ -20,7 +20,10 @@ static const u32 host_save_user_msrs[] = { ...@@ -20,7 +20,10 @@ static const u32 host_save_user_msrs[] = {
#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs) #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
#define NUM_DB_REGS 4 #define NUM_DB_REGS 4
struct kvm_vcpu;
struct vcpu_svm { struct vcpu_svm {
struct kvm_vcpu *vcpu;
struct vmcb *vmcb; struct vmcb *vmcb;
unsigned long vmcb_pa; unsigned long vmcb_pa;
struct svm_cpu_data *svm_data; struct svm_cpu_data *svm_data;
......
此差异已折叠。
...@@ -32,6 +32,37 @@ ...@@ -32,6 +32,37 @@
MODULE_AUTHOR("Qumranet"); MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
struct vmcs {
u32 revision_id;
u32 abort;
char data[0];
};
struct vcpu_vmx {
struct kvm_vcpu *vcpu;
int launched;
struct kvm_msr_entry *guest_msrs;
struct kvm_msr_entry *host_msrs;
int nmsrs;
int save_nmsrs;
int msr_offset_efer;
#ifdef CONFIG_X86_64
int msr_offset_kernel_gs_base;
#endif
struct vmcs *vmcs;
struct {
int loaded;
u16 fs_sel, gs_sel, ldt_sel;
int fs_gs_ldt_reload_needed;
}host_state;
};
static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
{
return (struct vcpu_vmx*)vcpu->_priv;
}
static int init_rmode_tss(struct kvm *kvm); static int init_rmode_tss(struct kvm *kvm);
static DEFINE_PER_CPU(struct vmcs *, vmxarea); static DEFINE_PER_CPU(struct vmcs *, vmxarea);
...@@ -89,16 +120,33 @@ static const u32 vmx_msr_index[] = { ...@@ -89,16 +120,33 @@ static const u32 vmx_msr_index[] = {
}; };
#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
static inline u64 msr_efer_save_restore_bits(struct vmx_msr_entry msr) static void load_msrs(struct kvm_msr_entry *e, int n)
{
int i;
for (i = 0; i < n; ++i)
wrmsrl(e[i].index, e[i].data);
}
static void save_msrs(struct kvm_msr_entry *e, int n)
{
int i;
for (i = 0; i < n; ++i)
rdmsrl(e[i].index, e[i].data);
}
static inline u64 msr_efer_save_restore_bits(struct kvm_msr_entry msr)
{ {
return (u64)msr.data & EFER_SAVE_RESTORE_BITS; return (u64)msr.data & EFER_SAVE_RESTORE_BITS;
} }
static inline int msr_efer_need_save_restore(struct kvm_vcpu *vcpu) static inline int msr_efer_need_save_restore(struct kvm_vcpu *vcpu)
{ {
int efer_offset = vcpu->msr_offset_efer; struct vcpu_vmx *vmx = to_vmx(vcpu);
return msr_efer_save_restore_bits(vcpu->host_msrs[efer_offset]) != int efer_offset = vmx->msr_offset_efer;
msr_efer_save_restore_bits(vcpu->guest_msrs[efer_offset]); return msr_efer_save_restore_bits(vmx->host_msrs[efer_offset]) !=
msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
} }
static inline int is_page_fault(u32 intr_info) static inline int is_page_fault(u32 intr_info)
...@@ -123,21 +171,23 @@ static inline int is_external_interrupt(u32 intr_info) ...@@ -123,21 +171,23 @@ static inline int is_external_interrupt(u32 intr_info)
static int __find_msr_index(struct kvm_vcpu *vcpu, u32 msr) static int __find_msr_index(struct kvm_vcpu *vcpu, u32 msr)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu);
int i; int i;
for (i = 0; i < vcpu->nmsrs; ++i) for (i = 0; i < vmx->nmsrs; ++i)
if (vcpu->guest_msrs[i].index == msr) if (vmx->guest_msrs[i].index == msr)
return i; return i;
return -1; return -1;
} }
static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr) static struct kvm_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu);
int i; int i;
i = __find_msr_index(vcpu, msr); i = __find_msr_index(vcpu, msr);
if (i >= 0) if (i >= 0)
return &vcpu->guest_msrs[i]; return &vmx->guest_msrs[i];
return NULL; return NULL;
} }
...@@ -157,11 +207,12 @@ static void vmcs_clear(struct vmcs *vmcs) ...@@ -157,11 +207,12 @@ static void vmcs_clear(struct vmcs *vmcs)
static void __vcpu_clear(void *arg) static void __vcpu_clear(void *arg)
{ {
struct kvm_vcpu *vcpu = arg; struct kvm_vcpu *vcpu = arg;
struct vcpu_vmx *vmx = to_vmx(vcpu);
int cpu = raw_smp_processor_id(); int cpu = raw_smp_processor_id();
if (vcpu->cpu == cpu) if (vcpu->cpu == cpu)
vmcs_clear(vcpu->vmcs); vmcs_clear(vmx->vmcs);
if (per_cpu(current_vmcs, cpu) == vcpu->vmcs) if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
per_cpu(current_vmcs, cpu) = NULL; per_cpu(current_vmcs, cpu) = NULL;
rdtscll(vcpu->host_tsc); rdtscll(vcpu->host_tsc);
} }
...@@ -172,7 +223,7 @@ static void vcpu_clear(struct kvm_vcpu *vcpu) ...@@ -172,7 +223,7 @@ static void vcpu_clear(struct kvm_vcpu *vcpu)
smp_call_function_single(vcpu->cpu, __vcpu_clear, vcpu, 0, 1); smp_call_function_single(vcpu->cpu, __vcpu_clear, vcpu, 0, 1);
else else
__vcpu_clear(vcpu); __vcpu_clear(vcpu);
vcpu->launched = 0; to_vmx(vcpu)->launched = 0;
} }
static unsigned long vmcs_readl(unsigned long field) static unsigned long vmcs_readl(unsigned long field)
...@@ -285,80 +336,81 @@ static void reload_tss(void) ...@@ -285,80 +336,81 @@ static void reload_tss(void)
static void load_transition_efer(struct kvm_vcpu *vcpu) static void load_transition_efer(struct kvm_vcpu *vcpu)
{ {
u64 trans_efer; u64 trans_efer;
int efer_offset = vcpu->msr_offset_efer; struct vcpu_vmx *vmx = to_vmx(vcpu);
int efer_offset = vmx->msr_offset_efer;
trans_efer = vcpu->host_msrs[efer_offset].data; trans_efer = vmx->host_msrs[efer_offset].data;
trans_efer &= ~EFER_SAVE_RESTORE_BITS; trans_efer &= ~EFER_SAVE_RESTORE_BITS;
trans_efer |= msr_efer_save_restore_bits( trans_efer |= msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
vcpu->guest_msrs[efer_offset]);
wrmsrl(MSR_EFER, trans_efer); wrmsrl(MSR_EFER, trans_efer);
vcpu->stat.efer_reload++; vcpu->stat.efer_reload++;
} }
static void vmx_save_host_state(struct kvm_vcpu *vcpu) static void vmx_save_host_state(struct kvm_vcpu *vcpu)
{ {
struct vmx_host_state *hs = &vcpu->vmx_host_state; struct vcpu_vmx *vmx = to_vmx(vcpu);
if (hs->loaded) if (vmx->host_state.loaded)
return; return;
hs->loaded = 1; vmx->host_state.loaded = 1;
/* /*
* Set host fs and gs selectors. Unfortunately, 22.2.3 does not * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
* allow segment selectors with cpl > 0 or ti == 1. * allow segment selectors with cpl > 0 or ti == 1.
*/ */
hs->ldt_sel = read_ldt(); vmx->host_state.ldt_sel = read_ldt();
hs->fs_gs_ldt_reload_needed = hs->ldt_sel; vmx->host_state.fs_gs_ldt_reload_needed = vmx->host_state.ldt_sel;
hs->fs_sel = read_fs(); vmx->host_state.fs_sel = read_fs();
if (!(hs->fs_sel & 7)) if (!(vmx->host_state.fs_sel & 7))
vmcs_write16(HOST_FS_SELECTOR, hs->fs_sel); vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
else { else {
vmcs_write16(HOST_FS_SELECTOR, 0); vmcs_write16(HOST_FS_SELECTOR, 0);
hs->fs_gs_ldt_reload_needed = 1; vmx->host_state.fs_gs_ldt_reload_needed = 1;
} }
hs->gs_sel = read_gs(); vmx->host_state.gs_sel = read_gs();
if (!(hs->gs_sel & 7)) if (!(vmx->host_state.gs_sel & 7))
vmcs_write16(HOST_GS_SELECTOR, hs->gs_sel); vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
else { else {
vmcs_write16(HOST_GS_SELECTOR, 0); vmcs_write16(HOST_GS_SELECTOR, 0);
hs->fs_gs_ldt_reload_needed = 1; vmx->host_state.fs_gs_ldt_reload_needed = 1;
} }
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE)); vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE)); vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
#else #else
vmcs_writel(HOST_FS_BASE, segment_base(hs->fs_sel)); vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
vmcs_writel(HOST_GS_BASE, segment_base(hs->gs_sel)); vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
#endif #endif
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (is_long_mode(vcpu)) { if (is_long_mode(vcpu)) {
save_msrs(vcpu->host_msrs + vcpu->msr_offset_kernel_gs_base, 1); save_msrs(vmx->host_msrs +
vmx->msr_offset_kernel_gs_base, 1);
} }
#endif #endif
load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs); load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
if (msr_efer_need_save_restore(vcpu)) if (msr_efer_need_save_restore(vcpu))
load_transition_efer(vcpu); load_transition_efer(vcpu);
} }
static void vmx_load_host_state(struct kvm_vcpu *vcpu) static void vmx_load_host_state(struct kvm_vcpu *vcpu)
{ {
struct vmx_host_state *hs = &vcpu->vmx_host_state; struct vcpu_vmx *vmx = to_vmx(vcpu);
if (!hs->loaded) if (!vmx->host_state.loaded)
return; return;
hs->loaded = 0; vmx->host_state.loaded = 0;
if (hs->fs_gs_ldt_reload_needed) { if (vmx->host_state.fs_gs_ldt_reload_needed) {
load_ldt(hs->ldt_sel); load_ldt(vmx->host_state.ldt_sel);
load_fs(hs->fs_sel); load_fs(vmx->host_state.fs_sel);
/* /*
* If we have to reload gs, we must take care to * If we have to reload gs, we must take care to
* preserve our gs base. * preserve our gs base.
*/ */
local_irq_disable(); local_irq_disable();
load_gs(hs->gs_sel); load_gs(vmx->host_state.gs_sel);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
#endif #endif
...@@ -366,10 +418,10 @@ static void vmx_load_host_state(struct kvm_vcpu *vcpu) ...@@ -366,10 +418,10 @@ static void vmx_load_host_state(struct kvm_vcpu *vcpu)
reload_tss(); reload_tss();
} }
save_msrs(vcpu->guest_msrs, vcpu->save_nmsrs); save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
load_msrs(vcpu->host_msrs, vcpu->save_nmsrs); load_msrs(vmx->host_msrs, vmx->save_nmsrs);
if (msr_efer_need_save_restore(vcpu)) if (msr_efer_need_save_restore(vcpu))
load_msrs(vcpu->host_msrs + vcpu->msr_offset_efer, 1); load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
} }
/* /*
...@@ -378,7 +430,8 @@ static void vmx_load_host_state(struct kvm_vcpu *vcpu) ...@@ -378,7 +430,8 @@ static void vmx_load_host_state(struct kvm_vcpu *vcpu)
*/ */
static void vmx_vcpu_load(struct kvm_vcpu *vcpu) static void vmx_vcpu_load(struct kvm_vcpu *vcpu)
{ {
u64 phys_addr = __pa(vcpu->vmcs); struct vcpu_vmx *vmx = to_vmx(vcpu);
u64 phys_addr = __pa(vmx->vmcs);
int cpu; int cpu;
u64 tsc_this, delta; u64 tsc_this, delta;
...@@ -387,16 +440,16 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu) ...@@ -387,16 +440,16 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu)
if (vcpu->cpu != cpu) if (vcpu->cpu != cpu)
vcpu_clear(vcpu); vcpu_clear(vcpu);
if (per_cpu(current_vmcs, cpu) != vcpu->vmcs) { if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
u8 error; u8 error;
per_cpu(current_vmcs, cpu) = vcpu->vmcs; per_cpu(current_vmcs, cpu) = vmx->vmcs;
asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0" asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0"
: "=g"(error) : "a"(&phys_addr), "m"(phys_addr) : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
: "cc"); : "cc");
if (error) if (error)
printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n", printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
vcpu->vmcs, phys_addr); vmx->vmcs, phys_addr);
} }
if (vcpu->cpu != cpu) { if (vcpu->cpu != cpu) {
...@@ -503,13 +556,15 @@ static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code) ...@@ -503,13 +556,15 @@ static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
*/ */
void move_msr_up(struct kvm_vcpu *vcpu, int from, int to) void move_msr_up(struct kvm_vcpu *vcpu, int from, int to)
{ {
struct vmx_msr_entry tmp; struct vcpu_vmx *vmx = to_vmx(vcpu);
tmp = vcpu->guest_msrs[to]; struct kvm_msr_entry tmp;
vcpu->guest_msrs[to] = vcpu->guest_msrs[from];
vcpu->guest_msrs[from] = tmp; tmp = vmx->guest_msrs[to];
tmp = vcpu->host_msrs[to]; vmx->guest_msrs[to] = vmx->guest_msrs[from];
vcpu->host_msrs[to] = vcpu->host_msrs[from]; vmx->guest_msrs[from] = tmp;
vcpu->host_msrs[from] = tmp; tmp = vmx->host_msrs[to];
vmx->host_msrs[to] = vmx->host_msrs[from];
vmx->host_msrs[from] = tmp;
} }
/* /*
...@@ -519,6 +574,7 @@ void move_msr_up(struct kvm_vcpu *vcpu, int from, int to) ...@@ -519,6 +574,7 @@ void move_msr_up(struct kvm_vcpu *vcpu, int from, int to)
*/ */
static void setup_msrs(struct kvm_vcpu *vcpu) static void setup_msrs(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu);
int save_nmsrs; int save_nmsrs;
save_nmsrs = 0; save_nmsrs = 0;
...@@ -547,13 +603,13 @@ static void setup_msrs(struct kvm_vcpu *vcpu) ...@@ -547,13 +603,13 @@ static void setup_msrs(struct kvm_vcpu *vcpu)
move_msr_up(vcpu, index, save_nmsrs++); move_msr_up(vcpu, index, save_nmsrs++);
} }
#endif #endif
vcpu->save_nmsrs = save_nmsrs; vmx->save_nmsrs = save_nmsrs;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
vcpu->msr_offset_kernel_gs_base = vmx->msr_offset_kernel_gs_base =
__find_msr_index(vcpu, MSR_KERNEL_GS_BASE); __find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
#endif #endif
vcpu->msr_offset_efer = __find_msr_index(vcpu, MSR_EFER); vmx->msr_offset_efer = __find_msr_index(vcpu, MSR_EFER);
} }
/* /*
...@@ -589,7 +645,7 @@ static void guest_write_tsc(u64 guest_tsc) ...@@ -589,7 +645,7 @@ static void guest_write_tsc(u64 guest_tsc)
static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
{ {
u64 data; u64 data;
struct vmx_msr_entry *msr; struct kvm_msr_entry *msr;
if (!pdata) { if (!pdata) {
printk(KERN_ERR "BUG: get_msr called with NULL pdata\n"); printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
...@@ -639,14 +695,15 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) ...@@ -639,14 +695,15 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
*/ */
static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
{ {
struct vmx_msr_entry *msr; struct vcpu_vmx *vmx = to_vmx(vcpu);
struct kvm_msr_entry *msr;
int ret = 0; int ret = 0;
switch (msr_index) { switch (msr_index) {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
case MSR_EFER: case MSR_EFER:
ret = kvm_set_msr_common(vcpu, msr_index, data); ret = kvm_set_msr_common(vcpu, msr_index, data);
if (vcpu->vmx_host_state.loaded) if (vmx->host_state.loaded)
load_transition_efer(vcpu); load_transition_efer(vcpu);
break; break;
case MSR_FS_BASE: case MSR_FS_BASE:
...@@ -672,8 +729,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) ...@@ -672,8 +729,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
msr = find_msr_entry(vcpu, msr_index); msr = find_msr_entry(vcpu, msr_index);
if (msr) { if (msr) {
msr->data = data; msr->data = data;
if (vcpu->vmx_host_state.loaded) if (vmx->host_state.loaded)
load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs); load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
break; break;
} }
ret = kvm_set_msr_common(vcpu, msr_index, data); ret = kvm_set_msr_common(vcpu, msr_index, data);
...@@ -1053,7 +1110,7 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) ...@@ -1053,7 +1110,7 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{ {
struct vmx_msr_entry *msr = find_msr_entry(vcpu, MSR_EFER); struct kvm_msr_entry *msr = find_msr_entry(vcpu, MSR_EFER);
vcpu->shadow_efer = efer; vcpu->shadow_efer = efer;
if (efer & EFER_LMA) { if (efer & EFER_LMA) {
...@@ -1244,6 +1301,7 @@ static void seg_setup(int seg) ...@@ -1244,6 +1301,7 @@ static void seg_setup(int seg)
*/ */
static int vmx_vcpu_setup(struct kvm_vcpu *vcpu) static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 host_sysenter_cs; u32 host_sysenter_cs;
u32 junk; u32 junk;
unsigned long a; unsigned long a;
...@@ -1385,18 +1443,18 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -1385,18 +1443,18 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
u32 index = vmx_msr_index[i]; u32 index = vmx_msr_index[i];
u32 data_low, data_high; u32 data_low, data_high;
u64 data; u64 data;
int j = vcpu->nmsrs; int j = vmx->nmsrs;
if (rdmsr_safe(index, &data_low, &data_high) < 0) if (rdmsr_safe(index, &data_low, &data_high) < 0)
continue; continue;
if (wrmsr_safe(index, data_low, data_high) < 0) if (wrmsr_safe(index, data_low, data_high) < 0)
continue; continue;
data = data_low | ((u64)data_high << 32); data = data_low | ((u64)data_high << 32);
vcpu->host_msrs[j].index = index; vmx->host_msrs[j].index = index;
vcpu->host_msrs[j].reserved = 0; vmx->host_msrs[j].reserved = 0;
vcpu->host_msrs[j].data = data; vmx->host_msrs[j].data = data;
vcpu->guest_msrs[j] = vcpu->host_msrs[j]; vmx->guest_msrs[j] = vmx->host_msrs[j];
++vcpu->nmsrs; ++vmx->nmsrs;
} }
setup_msrs(vcpu); setup_msrs(vcpu);
...@@ -1999,6 +2057,7 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu) ...@@ -1999,6 +2057,7 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu);
u8 fail; u8 fail;
int r; int r;
...@@ -2123,7 +2182,7 @@ static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -2123,7 +2182,7 @@ static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
#endif #endif
"setbe %0 \n\t" "setbe %0 \n\t"
: "=q" (fail) : "=q" (fail)
: "r"(vcpu->launched), "d"((unsigned long)HOST_RSP), : "r"(vmx->launched), "d"((unsigned long)HOST_RSP),
"c"(vcpu), "c"(vcpu),
[rax]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RAX])), [rax]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RAX])),
[rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])), [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])),
...@@ -2167,7 +2226,7 @@ static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -2167,7 +2226,7 @@ static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (unlikely(prof_on == KVM_PROFILING)) if (unlikely(prof_on == KVM_PROFILING))
profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP)); profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
vcpu->launched = 1; vmx->launched = 1;
r = kvm_handle_exit(kvm_run, vcpu); r = kvm_handle_exit(kvm_run, vcpu);
if (r > 0) { if (r > 0) {
/* Give scheduler a change to reschedule. */ /* Give scheduler a change to reschedule. */
...@@ -2232,10 +2291,12 @@ static void vmx_inject_page_fault(struct kvm_vcpu *vcpu, ...@@ -2232,10 +2291,12 @@ static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
static void vmx_free_vmcs(struct kvm_vcpu *vcpu) static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
{ {
if (vcpu->vmcs) { struct vcpu_vmx *vmx = to_vmx(vcpu);
if (vmx->vmcs) {
on_each_cpu(__vcpu_clear, vcpu, 0, 1); on_each_cpu(__vcpu_clear, vcpu, 0, 1);
free_vmcs(vcpu->vmcs); free_vmcs(vmx->vmcs);
vcpu->vmcs = NULL; vmx->vmcs = NULL;
} }
} }
...@@ -2246,33 +2307,39 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu) ...@@ -2246,33 +2307,39 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
static int vmx_create_vcpu(struct kvm_vcpu *vcpu) static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
{ {
struct vmcs *vmcs; struct vcpu_vmx *vmx;
vcpu->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); vmx = kzalloc(sizeof(*vmx), GFP_KERNEL);
if (!vcpu->guest_msrs) if (!vmx)
return -ENOMEM; return -ENOMEM;
vcpu->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!vcpu->host_msrs) if (!vmx->guest_msrs)
goto out_free_guest_msrs; goto out_free;
vmcs = alloc_vmcs(); vmx->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!vmcs) if (!vmx->host_msrs)
goto out_free_msrs; goto out_free;
vmcs_clear(vmcs); vmx->vmcs = alloc_vmcs();
vcpu->vmcs = vmcs; if (!vmx->vmcs)
vcpu->launched = 0; goto out_free;
vmcs_clear(vmx->vmcs);
vmx->vcpu = vcpu;
vcpu->_priv = vmx;
return 0; return 0;
out_free_msrs: out_free:
kfree(vcpu->host_msrs); if (vmx->host_msrs)
vcpu->host_msrs = NULL; kfree(vmx->host_msrs);
if (vmx->guest_msrs)
kfree(vmx->guest_msrs);
out_free_guest_msrs: kfree(vmx);
kfree(vcpu->guest_msrs);
vcpu->guest_msrs = NULL;
return -ENOMEM; return -ENOMEM;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册