提交 42b933b5 编写于 作者: V Vitaly Kuznetsov 提交者: Paolo Bonzini

x86/kvm/vmx: read MSR_{FS,KERNEL_GS}_BASE from current->thread

vmx_save_host_state() is only called from kvm_arch_vcpu_ioctl_run() so
the context is pretty well defined. Read MSR_{FS,KERNEL_GS}_BASE from
current->thread after calling save_fsgs() which takes care of
X86_BUG_NULL_SEG case now and will do RD[FG,GS]BASE when FSGSBASE
extensions are exposed to userspace (currently they are not).
Acked-by: NAndy Lutomirski <luto@kernel.org>
Signed-off-by: NVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 b31c114b
...@@ -410,6 +410,11 @@ DECLARE_INIT_PER_CPU(irq_stack_union); ...@@ -410,6 +410,11 @@ DECLARE_INIT_PER_CPU(irq_stack_union);
DECLARE_PER_CPU(char *, irq_stack_ptr); DECLARE_PER_CPU(char *, irq_stack_ptr);
DECLARE_PER_CPU(unsigned int, irq_count); DECLARE_PER_CPU(unsigned int, irq_count);
extern asmlinkage void ignore_sysret(void); extern asmlinkage void ignore_sysret(void);
#if IS_ENABLED(CONFIG_KVM)
/* Save actual FS/GS selectors and bases to current->thread */
void save_fsgs_for_kvm(void);
#endif
#else /* X86_64 */ #else /* X86_64 */
#ifdef CONFIG_CC_STACKPROTECTOR #ifdef CONFIG_CC_STACKPROTECTOR
/* /*
......
...@@ -205,6 +205,20 @@ static __always_inline void save_fsgs(struct task_struct *task) ...@@ -205,6 +205,20 @@ static __always_inline void save_fsgs(struct task_struct *task)
save_base_legacy(task, task->thread.gsindex, GS); save_base_legacy(task, task->thread.gsindex, GS);
} }
#if IS_ENABLED(CONFIG_KVM)
/*
* While a process is running,current->thread.fsbase and current->thread.gsbase
* may not match the corresponding CPU registers (see save_base_legacy()). KVM
* wants an efficient way to save and restore FSBASE and GSBASE.
* When FSGSBASE extensions are enabled, this will have to use RD{FS,GS}BASE.
*/
void save_fsgs_for_kvm(void)
{
save_fsgs(current);
}
EXPORT_SYMBOL_GPL(save_fsgs_for_kvm);
#endif
static __always_inline void loadseg(enum which_selector which, static __always_inline void loadseg(enum which_selector which,
unsigned short sel) unsigned short sel)
{ {
......
...@@ -2165,7 +2165,15 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) ...@@ -2165,7 +2165,15 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
*/ */
vmx->host_state.ldt_sel = kvm_read_ldt(); vmx->host_state.ldt_sel = kvm_read_ldt();
vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
#ifdef CONFIG_X86_64
save_fsgs_for_kvm();
vmx->host_state.fs_sel = current->thread.fsindex;
vmx->host_state.gs_sel = current->thread.gsindex;
#else
savesegment(fs, vmx->host_state.fs_sel); savesegment(fs, vmx->host_state.fs_sel);
savesegment(gs, vmx->host_state.gs_sel);
#endif
if (!(vmx->host_state.fs_sel & 7)) { if (!(vmx->host_state.fs_sel & 7)) {
vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
vmx->host_state.fs_reload_needed = 0; vmx->host_state.fs_reload_needed = 0;
...@@ -2173,7 +2181,6 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) ...@@ -2173,7 +2181,6 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
vmcs_write16(HOST_FS_SELECTOR, 0); vmcs_write16(HOST_FS_SELECTOR, 0);
vmx->host_state.fs_reload_needed = 1; vmx->host_state.fs_reload_needed = 1;
} }
savesegment(gs, vmx->host_state.gs_sel);
if (!(vmx->host_state.gs_sel & 7)) if (!(vmx->host_state.gs_sel & 7))
vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
else { else {
...@@ -2187,7 +2194,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) ...@@ -2187,7 +2194,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
#endif #endif
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE)); vmcs_writel(HOST_FS_BASE, current->thread.fsbase);
vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE)); vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
#else #else
vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel)); vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
...@@ -2195,7 +2202,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) ...@@ -2195,7 +2202,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
#endif #endif
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); vmx->msr_host_kernel_gs_base = current->thread.gsbase;
if (is_long_mode(&vmx->vcpu)) if (is_long_mode(&vmx->vcpu))
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
#endif #endif
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册