提交 5897297b 编写于 作者: A Avi Kivity

KVM: VMX: Don't intercept MSR_KERNEL_GS_BASE

Windows 2008 accesses this MSR often on context switch intensive workloads;
since we run in guest context with the guest MSR value loaded (so swapgs can
work correctly), we can simply disable interception of rdmsr/wrmsr for this
MSR.

A complication occurs since in legacy mode, we run with the host MSR value
loaded. In this case we enable interception.  This means we need two MSR
bitmaps, one for legacy mode and one for long mode.
Signed-off-by: NAvi Kivity <avi@redhat.com>
上级 3e7c73e9
...@@ -113,7 +113,8 @@ static DEFINE_PER_CPU(struct list_head, vcpus_on_cpu); ...@@ -113,7 +113,8 @@ static DEFINE_PER_CPU(struct list_head, vcpus_on_cpu);
static unsigned long *vmx_io_bitmap_a; static unsigned long *vmx_io_bitmap_a;
static unsigned long *vmx_io_bitmap_b; static unsigned long *vmx_io_bitmap_b;
static unsigned long *vmx_msr_bitmap; static unsigned long *vmx_msr_bitmap_legacy;
static unsigned long *vmx_msr_bitmap_longmode;
static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
static DEFINE_SPINLOCK(vmx_vpid_lock); static DEFINE_SPINLOCK(vmx_vpid_lock);
...@@ -812,6 +813,7 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) ...@@ -812,6 +813,7 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
static void setup_msrs(struct vcpu_vmx *vmx) static void setup_msrs(struct vcpu_vmx *vmx)
{ {
int save_nmsrs; int save_nmsrs;
unsigned long *msr_bitmap;
vmx_load_host_state(vmx); vmx_load_host_state(vmx);
save_nmsrs = 0; save_nmsrs = 0;
...@@ -847,6 +849,15 @@ static void setup_msrs(struct vcpu_vmx *vmx) ...@@ -847,6 +849,15 @@ static void setup_msrs(struct vcpu_vmx *vmx)
__find_msr_index(vmx, MSR_KERNEL_GS_BASE); __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
#endif #endif
vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER); vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER);
if (cpu_has_vmx_msr_bitmap()) {
if (is_long_mode(&vmx->vcpu))
msr_bitmap = vmx_msr_bitmap_longmode;
else
msr_bitmap = vmx_msr_bitmap_legacy;
vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
}
} }
/* /*
...@@ -2082,7 +2093,7 @@ static void allocate_vpid(struct vcpu_vmx *vmx) ...@@ -2082,7 +2093,7 @@ static void allocate_vpid(struct vcpu_vmx *vmx)
spin_unlock(&vmx_vpid_lock); spin_unlock(&vmx_vpid_lock);
} }
static void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr) static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr)
{ {
int f = sizeof(unsigned long); int f = sizeof(unsigned long);
...@@ -2104,6 +2115,13 @@ static void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr) ...@@ -2104,6 +2115,13 @@ static void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr)
} }
} }
static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
{
if (!longmode_only)
__vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy, msr);
__vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, msr);
}
/* /*
* Sets up the vmcs for emulated real mode. * Sets up the vmcs for emulated real mode.
*/ */
...@@ -2123,7 +2141,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) ...@@ -2123,7 +2141,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b)); vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b));
if (cpu_has_vmx_msr_bitmap()) if (cpu_has_vmx_msr_bitmap())
vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap)); vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy));
vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
...@@ -3705,12 +3723,18 @@ static int __init vmx_init(void) ...@@ -3705,12 +3723,18 @@ static int __init vmx_init(void)
goto out; goto out;
} }
vmx_msr_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL); vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_msr_bitmap) { if (!vmx_msr_bitmap_legacy) {
r = -ENOMEM; r = -ENOMEM;
goto out1; goto out1;
} }
vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_msr_bitmap_longmode) {
r = -ENOMEM;
goto out2;
}
/* /*
* Allow direct access to the PC debug port (it is often used for I/O * Allow direct access to the PC debug port (it is often used for I/O
* delays, but the vmexits simply slow things down). * delays, but the vmexits simply slow things down).
...@@ -3720,19 +3744,21 @@ static int __init vmx_init(void) ...@@ -3720,19 +3744,21 @@ static int __init vmx_init(void)
memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE); memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
memset(vmx_msr_bitmap, 0xff, PAGE_SIZE); memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE); r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
if (r) if (r)
goto out2; goto out3;
vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_FS_BASE); vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_GS_BASE); vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_CS); vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_ESP); vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_EIP); vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
if (vm_need_ept()) { if (vm_need_ept()) {
bypass_guest_pf = 0; bypass_guest_pf = 0;
...@@ -3752,8 +3778,10 @@ static int __init vmx_init(void) ...@@ -3752,8 +3778,10 @@ static int __init vmx_init(void)
return 0; return 0;
out3:
free_page((unsigned long)vmx_msr_bitmap_longmode);
out2: out2:
free_page((unsigned long)vmx_msr_bitmap); free_page((unsigned long)vmx_msr_bitmap_legacy);
out1: out1:
free_page((unsigned long)vmx_io_bitmap_b); free_page((unsigned long)vmx_io_bitmap_b);
out: out:
...@@ -3763,7 +3791,8 @@ static int __init vmx_init(void) ...@@ -3763,7 +3791,8 @@ static int __init vmx_init(void)
static void __exit vmx_exit(void) static void __exit vmx_exit(void)
{ {
free_page((unsigned long)vmx_msr_bitmap); free_page((unsigned long)vmx_msr_bitmap_legacy);
free_page((unsigned long)vmx_msr_bitmap_longmode);
free_page((unsigned long)vmx_io_bitmap_b); free_page((unsigned long)vmx_io_bitmap_b);
free_page((unsigned long)vmx_io_bitmap_a); free_page((unsigned long)vmx_io_bitmap_a);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册