提交 25c5f225 编写于 作者: S Sheng Yang 提交者: Avi Kivity

KVM: VMX: Enable MSR Bitmap feature

MSR Bitmap controls whether the accessing of an MSR causes VM Exit.
Eliminating exits on automatically saved and restored MSRs yields a
small performance gain.
Signed-off-by: NSheng Yang <sheng.yang@intel.com>
Signed-off-by: NAvi Kivity <avi@qumranet.com>
上级 e976a2b9
...@@ -91,6 +91,7 @@ static DEFINE_PER_CPU(struct vmcs *, current_vmcs); ...@@ -91,6 +91,7 @@ static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
static struct page *vmx_io_bitmap_a; static struct page *vmx_io_bitmap_a;
static struct page *vmx_io_bitmap_b; static struct page *vmx_io_bitmap_b;
static struct page *vmx_msr_bitmap;
static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
static DEFINE_SPINLOCK(vmx_vpid_lock); static DEFINE_SPINLOCK(vmx_vpid_lock);
...@@ -185,6 +186,11 @@ static inline int is_external_interrupt(u32 intr_info) ...@@ -185,6 +186,11 @@ static inline int is_external_interrupt(u32 intr_info)
== (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
} }
static inline int cpu_has_vmx_msr_bitmap(void)
{
return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS);
}
static inline int cpu_has_vmx_tpr_shadow(void) static inline int cpu_has_vmx_tpr_shadow(void)
{ {
return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW); return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW);
...@@ -1001,6 +1007,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) ...@@ -1001,6 +1007,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
CPU_BASED_MOV_DR_EXITING | CPU_BASED_MOV_DR_EXITING |
CPU_BASED_USE_TSC_OFFSETING; CPU_BASED_USE_TSC_OFFSETING;
opt = CPU_BASED_TPR_SHADOW | opt = CPU_BASED_TPR_SHADOW |
CPU_BASED_USE_MSR_BITMAPS |
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
&_cpu_based_exec_control) < 0) &_cpu_based_exec_control) < 0)
...@@ -1575,6 +1582,30 @@ static void allocate_vpid(struct vcpu_vmx *vmx) ...@@ -1575,6 +1582,30 @@ static void allocate_vpid(struct vcpu_vmx *vmx)
spin_unlock(&vmx_vpid_lock); spin_unlock(&vmx_vpid_lock);
} }
void vmx_disable_intercept_for_msr(struct page *msr_bitmap, u32 msr)
{
void *va;
if (!cpu_has_vmx_msr_bitmap())
return;
/*
* See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
* have the write-low and read-high bitmap offsets the wrong way round.
* We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
*/
va = kmap(msr_bitmap);
if (msr <= 0x1fff) {
__clear_bit(msr, va + 0x000); /* read-low */
__clear_bit(msr, va + 0x800); /* write-low */
} else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
msr &= 0x1fff;
__clear_bit(msr, va + 0x400); /* read-high */
__clear_bit(msr, va + 0xc00); /* write-high */
}
kunmap(msr_bitmap);
}
/* /*
* Sets up the vmcs for emulated real mode. * Sets up the vmcs for emulated real mode.
*/ */
...@@ -1592,6 +1623,9 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) ...@@ -1592,6 +1623,9 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a)); vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a));
vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b)); vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b));
if (cpu_has_vmx_msr_bitmap())
vmcs_write64(MSR_BITMAP, page_to_phys(vmx_msr_bitmap));
vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
/* Control */ /* Control */
...@@ -2728,7 +2762,7 @@ static struct kvm_x86_ops vmx_x86_ops = { ...@@ -2728,7 +2762,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
static int __init vmx_init(void) static int __init vmx_init(void)
{ {
void *iova; void *va;
int r; int r;
vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
...@@ -2741,30 +2775,48 @@ static int __init vmx_init(void) ...@@ -2741,30 +2775,48 @@ static int __init vmx_init(void)
goto out; goto out;
} }
vmx_msr_bitmap = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
if (!vmx_msr_bitmap) {
r = -ENOMEM;
goto out1;
}
/* /*
* Allow direct access to the PC debug port (it is often used for I/O * Allow direct access to the PC debug port (it is often used for I/O
* delays, but the vmexits simply slow things down). * delays, but the vmexits simply slow things down).
*/ */
iova = kmap(vmx_io_bitmap_a); va = kmap(vmx_io_bitmap_a);
memset(iova, 0xff, PAGE_SIZE); memset(va, 0xff, PAGE_SIZE);
clear_bit(0x80, iova); clear_bit(0x80, va);
kunmap(vmx_io_bitmap_a); kunmap(vmx_io_bitmap_a);
iova = kmap(vmx_io_bitmap_b); va = kmap(vmx_io_bitmap_b);
memset(iova, 0xff, PAGE_SIZE); memset(va, 0xff, PAGE_SIZE);
kunmap(vmx_io_bitmap_b); kunmap(vmx_io_bitmap_b);
va = kmap(vmx_msr_bitmap);
memset(va, 0xff, PAGE_SIZE);
kunmap(vmx_msr_bitmap);
set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE); r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
if (r) if (r)
goto out1; goto out2;
vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_FS_BASE);
vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_GS_BASE);
vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_CS);
vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_ESP);
vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_EIP);
if (bypass_guest_pf) if (bypass_guest_pf)
kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull); kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
return 0; return 0;
out2:
__free_page(vmx_msr_bitmap);
out1: out1:
__free_page(vmx_io_bitmap_b); __free_page(vmx_io_bitmap_b);
out: out:
...@@ -2774,6 +2826,7 @@ static int __init vmx_init(void) ...@@ -2774,6 +2826,7 @@ static int __init vmx_init(void)
static void __exit vmx_exit(void) static void __exit vmx_exit(void)
{ {
__free_page(vmx_msr_bitmap);
__free_page(vmx_io_bitmap_b); __free_page(vmx_io_bitmap_b);
__free_page(vmx_io_bitmap_a); __free_page(vmx_io_bitmap_a);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册