提交 b114b080 编写于 作者: R Rusty Russell 提交者: Avi Kivity

KVM: Use alignment properties of vcpu to simplify FPU ops

Now we use a kmem cache for allocating vcpus, we can get the 16-byte
alignment required by fxsave & fxrstor instructions, and avoid
manually aligning the buffer.
Signed-off-by: NRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: NAvi Kivity <avi@qumranet.com>
上级 c16f862d
...@@ -45,10 +45,6 @@ ...@@ -45,10 +45,6 @@
#define KVM_REFILL_PAGES 25 #define KVM_REFILL_PAGES 25
#define KVM_MAX_CPUID_ENTRIES 40 #define KVM_MAX_CPUID_ENTRIES 40
#define FX_IMAGE_SIZE 512
#define FX_IMAGE_ALIGN 16
#define FX_BUF_SIZE (2 * FX_IMAGE_SIZE + FX_IMAGE_ALIGN)
#define DE_VECTOR 0 #define DE_VECTOR 0
#define NM_VECTOR 7 #define NM_VECTOR 7
#define DF_VECTOR 8 #define DF_VECTOR 8
...@@ -342,9 +338,8 @@ struct kvm_vcpu { ...@@ -342,9 +338,8 @@ struct kvm_vcpu {
struct kvm_guest_debug guest_debug; struct kvm_guest_debug guest_debug;
char fx_buf[FX_BUF_SIZE]; struct i387_fxsave_struct host_fx_image;
char *host_fx_image; struct i387_fxsave_struct guest_fx_image;
char *guest_fx_image;
int fpu_active; int fpu_active;
int guest_fpu_loaded; int guest_fpu_loaded;
...@@ -704,12 +699,12 @@ static inline unsigned long read_msr(unsigned long msr) ...@@ -704,12 +699,12 @@ static inline unsigned long read_msr(unsigned long msr)
} }
#endif #endif
static inline void fx_save(void *image) static inline void fx_save(struct i387_fxsave_struct *image)
{ {
asm ("fxsave (%0)":: "r" (image)); asm ("fxsave (%0)":: "r" (image));
} }
static inline void fx_restore(void *image) static inline void fx_restore(struct i387_fxsave_struct *image)
{ {
asm ("fxrstor (%0)":: "r" (image)); asm ("fxrstor (%0)":: "r" (image));
} }
......
...@@ -154,8 +154,8 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) ...@@ -154,8 +154,8 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
return; return;
vcpu->guest_fpu_loaded = 1; vcpu->guest_fpu_loaded = 1;
fx_save(vcpu->host_fx_image); fx_save(&vcpu->host_fx_image);
fx_restore(vcpu->guest_fx_image); fx_restore(&vcpu->guest_fx_image);
} }
EXPORT_SYMBOL_GPL(kvm_load_guest_fpu); EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
...@@ -165,8 +165,8 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) ...@@ -165,8 +165,8 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
return; return;
vcpu->guest_fpu_loaded = 0; vcpu->guest_fpu_loaded = 0;
fx_save(vcpu->guest_fx_image); fx_save(&vcpu->guest_fx_image);
fx_restore(vcpu->host_fx_image); fx_restore(&vcpu->host_fx_image);
} }
EXPORT_SYMBOL_GPL(kvm_put_guest_fpu); EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
...@@ -262,10 +262,6 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) ...@@ -262,10 +262,6 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
} }
vcpu->pio_data = page_address(page); vcpu->pio_data = page_address(page);
vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
FX_IMAGE_ALIGN);
vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
r = kvm_mmu_create(vcpu); r = kvm_mmu_create(vcpu);
if (r < 0) if (r < 0)
goto fail_free_pio_data; goto fail_free_pio_data;
...@@ -615,30 +611,20 @@ EXPORT_SYMBOL_GPL(set_cr8); ...@@ -615,30 +611,20 @@ EXPORT_SYMBOL_GPL(set_cr8);
void fx_init(struct kvm_vcpu *vcpu) void fx_init(struct kvm_vcpu *vcpu)
{ {
struct __attribute__ ((__packed__)) fx_image_s { unsigned after_mxcsr_mask;
u16 control; //fcw
u16 status; //fsw
u16 tag; // ftw
u16 opcode; //fop
u64 ip; // fpu ip
u64 operand;// fpu dp
u32 mxcsr;
u32 mxcsr_mask;
} *fx_image;
/* Initialize guest FPU by resetting ours and saving into guest's */ /* Initialize guest FPU by resetting ours and saving into guest's */
preempt_disable(); preempt_disable();
fx_save(vcpu->host_fx_image); fx_save(&vcpu->host_fx_image);
fpu_init(); fpu_init();
fx_save(vcpu->guest_fx_image); fx_save(&vcpu->guest_fx_image);
fx_restore(vcpu->host_fx_image); fx_restore(&vcpu->host_fx_image);
preempt_enable(); preempt_enable();
fx_image = (struct fx_image_s *)vcpu->guest_fx_image; after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
fx_image->mxcsr = 0x1f80; vcpu->guest_fx_image.mxcsr = 0x1f80;
memset(vcpu->guest_fx_image + sizeof(struct fx_image_s), memset((void *)&vcpu->guest_fx_image + after_mxcsr_mask,
0, FX_IMAGE_SIZE - sizeof(struct fx_image_s)); 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
} }
EXPORT_SYMBOL_GPL(fx_init); EXPORT_SYMBOL_GPL(fx_init);
...@@ -2356,6 +2342,9 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n) ...@@ -2356,6 +2342,9 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
/* We do fxsave: this must be aligned. */
BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
vcpu_load(vcpu); vcpu_load(vcpu);
r = kvm_mmu_setup(vcpu); r = kvm_mmu_setup(vcpu);
vcpu_put(vcpu); vcpu_put(vcpu);
...@@ -2468,7 +2457,7 @@ struct fxsave { ...@@ -2468,7 +2457,7 @@ struct fxsave {
static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{ {
struct fxsave *fxsave = (struct fxsave *)vcpu->guest_fx_image; struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
vcpu_load(vcpu); vcpu_load(vcpu);
...@@ -2488,7 +2477,7 @@ static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) ...@@ -2488,7 +2477,7 @@ static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{ {
struct fxsave *fxsave = (struct fxsave *)vcpu->guest_fx_image; struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
vcpu_load(vcpu); vcpu_load(vcpu);
......
...@@ -1557,8 +1557,8 @@ static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -1557,8 +1557,8 @@ static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
} }
if (vcpu->fpu_active) { if (vcpu->fpu_active) {
fx_save(vcpu->host_fx_image); fx_save(&vcpu->host_fx_image);
fx_restore(vcpu->guest_fx_image); fx_restore(&vcpu->guest_fx_image);
} }
asm volatile ( asm volatile (
...@@ -1670,8 +1670,8 @@ static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -1670,8 +1670,8 @@ static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu->guest_mode = 0; vcpu->guest_mode = 0;
if (vcpu->fpu_active) { if (vcpu->fpu_active) {
fx_save(vcpu->guest_fx_image); fx_save(&vcpu->guest_fx_image);
fx_restore(vcpu->host_fx_image); fx_restore(&vcpu->host_fx_image);
} }
if ((svm->vmcb->save.dr7 & 0xff)) if ((svm->vmcb->save.dr7 & 0xff))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册