提交 dacccfdd 编写于 作者: A Avi Kivity

KVM: SVM: Move fs/gs/ldt save/restore to heavyweight exit path

ldt is never used in the kernel context; same goes for fs (x86_64) and gs
(i386).  So save/restore them in the heavyweight exit path instead
of the lightweight path.

By itself, this doesn't buy us much, but it paves the way for moving vmload
and vmsave to the heavyweight exit path, since they modify the same registers.

[jan: fix copy/pase mistake on i386]
Signed-off-by: NAvi Kivity <avi@redhat.com>
Signed-off-by: NJan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: NMarcelo Tosatti <mtosatti@redhat.com>
上级 afe9e66f
...@@ -125,6 +125,9 @@ struct vcpu_svm { ...@@ -125,6 +125,9 @@ struct vcpu_svm {
u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS]; u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
struct { struct {
u16 fs;
u16 gs;
u16 ldt;
u64 gs_base; u64 gs_base;
} host; } host;
...@@ -184,6 +187,9 @@ static int nested_svm_vmexit(struct vcpu_svm *svm); ...@@ -184,6 +187,9 @@ static int nested_svm_vmexit(struct vcpu_svm *svm);
static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
bool has_error_code, u32 error_code); bool has_error_code, u32 error_code);
static void save_host_msrs(struct kvm_vcpu *vcpu);
static void load_host_msrs(struct kvm_vcpu *vcpu);
static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
{ {
return container_of(vcpu, struct vcpu_svm, vcpu); return container_of(vcpu, struct vcpu_svm, vcpu);
...@@ -996,6 +1002,11 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -996,6 +1002,11 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
svm->asid_generation = 0; svm->asid_generation = 0;
} }
save_host_msrs(vcpu);
savesegment(fs, svm->host.fs);
savesegment(gs, svm->host.gs);
svm->host.ldt = kvm_read_ldt();
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
} }
...@@ -1006,6 +1017,14 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -1006,6 +1017,14 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
int i; int i;
++vcpu->stat.host_state_reload; ++vcpu->stat.host_state_reload;
kvm_load_ldt(svm->host.ldt);
#ifdef CONFIG_X86_64
loadsegment(fs, svm->host.fs);
load_gs_index(svm->host.gs);
wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
#else
loadsegment(gs, svm->host.gs);
#endif
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
} }
...@@ -3314,9 +3333,6 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu) ...@@ -3314,9 +3333,6 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu)
static void svm_vcpu_run(struct kvm_vcpu *vcpu) static void svm_vcpu_run(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
u16 fs_selector;
u16 gs_selector;
u16 ldt_selector;
svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
...@@ -3333,10 +3349,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -3333,10 +3349,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
sync_lapic_to_cr8(vcpu); sync_lapic_to_cr8(vcpu);
save_host_msrs(vcpu);
savesegment(fs, fs_selector);
savesegment(gs, gs_selector);
ldt_selector = kvm_read_ldt();
svm->vmcb->save.cr2 = vcpu->arch.cr2; svm->vmcb->save.cr2 = vcpu->arch.cr2;
clgi(); clgi();
...@@ -3415,13 +3427,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -3415,13 +3427,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
); );
load_host_msrs(vcpu); load_host_msrs(vcpu);
kvm_load_ldt(ldt_selector); #ifndef CONFIG_X86_64
loadsegment(fs, fs_selector); loadsegment(fs, svm->host.fs);
#ifdef CONFIG_X86_64
load_gs_index(gs_selector);
wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
#else
loadsegment(gs, gs_selector);
#endif #endif
reload_tss(vcpu); reload_tss(vcpu);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册