提交 7b6f8a06 编写于 作者: K Kai Huang 提交者: Paolo Bonzini

kvm: x86: Move kvm_set_mmio_spte_mask() from x86.c to mmu.c

As a prerequisite to fix several SPTE reserved bits related calculation
errors caused by MKTME, which requires kvm_set_mmio_spte_mask() to use
local static variable defined in mmu.c.

Also move call site of kvm_set_mmio_spte_mask() from kvm_arch_init() to
kvm_mmu_module_init() so that kvm_set_mmio_spte_mask() can be static.
Reviewed-by: NSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: NKai Huang <kai.huang@linux.intel.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 f8d221d2
......@@ -5998,6 +5998,35 @@ static void mmu_destroy_caches(void)
kmem_cache_destroy(mmu_page_header_cache);
}
static void kvm_set_mmio_spte_mask(void)
{
u64 mask;
int maxphyaddr = boot_cpu_data.x86_phys_bits;
/*
* Set the reserved bits and the present bit of an paging-structure
* entry to generate page fault with PFER.RSV = 1.
*/
/*
* Mask the uppermost physical address bit, which would be reserved as
* long as the supported physical address width is less than 52.
*/
mask = 1ull << 51;
/* Set the present bit. */
mask |= 1ull;
/*
* If reserved bit is not supported, clear the present bit to disable
* mmio page fault.
*/
if (IS_ENABLED(CONFIG_X86_64) && maxphyaddr == 52)
mask &= ~1ull;
kvm_mmu_set_mmio_spte_mask(mask, mask);
}
int kvm_mmu_module_init(void)
{
int ret = -ENOMEM;
......@@ -6014,6 +6043,8 @@ int kvm_mmu_module_init(void)
kvm_mmu_reset_all_pte_masks();
kvm_set_mmio_spte_mask();
pte_list_desc_cache = kmem_cache_create("pte_list_desc",
sizeof(struct pte_list_desc),
0, SLAB_ACCOUNT, NULL);
......
......@@ -6910,35 +6910,6 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = {
.handle_intel_pt_intr = kvm_handle_intel_pt_intr,
};
static void kvm_set_mmio_spte_mask(void)
{
u64 mask;
int maxphyaddr = boot_cpu_data.x86_phys_bits;
/*
* Set the reserved bits and the present bit of an paging-structure
* entry to generate page fault with PFER.RSV = 1.
*/
/*
* Mask the uppermost physical address bit, which would be reserved as
* long as the supported physical address width is less than 52.
*/
mask = 1ull << 51;
/* Set the present bit. */
mask |= 1ull;
/*
* If reserved bit is not supported, clear the present bit to disable
* mmio page fault.
*/
if (IS_ENABLED(CONFIG_X86_64) && maxphyaddr == 52)
mask &= ~1ull;
kvm_mmu_set_mmio_spte_mask(mask, mask);
}
#ifdef CONFIG_X86_64
static void pvclock_gtod_update_fn(struct work_struct *work)
{
......@@ -7035,8 +7006,6 @@ int kvm_arch_init(void *opaque)
if (r)
goto out_free_percpu;
kvm_set_mmio_spte_mask();
kvm_x86_ops = ops;
kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册