提交 66592e53 编写于 作者: W Wei Huang 提交者: Xie Haocheng

KVM: x86/mmu: Support shadowing NPT when 5-level paging is enabled in host

mainline inclusion
from mainline-5.15
commit cb0f722a
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I5NGRU
CVE: NA

-------------------------------------------------

When the 5-level page table CPU flag is set in the host, but the guest
has CR4.LA57=0 (including the case of a 32-bit guest), the top level of
the shadow NPT page tables will be fixed, consisting of one pointer to
a lower-level table and 511 non-present entries.  Extend the existing
code that creates the fixed PML4 or PDP table, to provide a fixed PML5
table if needed.

This is not needed on EPT because the number of layers in the tables
is specified in the EPTP instead of depending on the host CR4.
Suggested-by: NPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: NWei Huang <wei.huang2@amd.com>
Message-Id: <20210818165549.3771014-3-wei.huang2@amd.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: NXie Haocheng <haocheng.xie@amd.com>
上级 63ae82c3
......@@ -387,6 +387,7 @@ struct kvm_mmu {
u64 *pae_root;
u64 *pml4_root;
u64 *pml5_root;
/*
* check zero bits on shadow page table entries, these
......
......@@ -3305,7 +3305,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
* the shadow page table may be a PAE or a long mode page table.
*/
pm_mask = PT_PRESENT_MASK;
if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
/*
......@@ -3365,6 +3365,33 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pml4_root);
}
#ifdef CONFIG_X86_64
if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_5LEVEL) {
if (vcpu->arch.mmu->pml4_root == NULL) {
u64 *pml4_root;
pml4_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT);
if (!pml4_root)
return -ENOMEM;
pml4_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask;
vcpu->arch.mmu->pml4_root = pml4_root;
}
if (vcpu->arch.mmu->pml5_root == NULL) {
u64 *pml5_root;
pml5_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT);
if (!pml5_root)
return -ENOMEM;
pml5_root[0] = __pa(vcpu->arch.mmu->pml4_root) | pm_mask;
vcpu->arch.mmu->pml5_root = pml5_root;
}
vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pml5_root);
}
#endif
set_root_pgd:
vcpu->arch.mmu->root_pgd = root_pgd;
......@@ -5303,6 +5330,9 @@ static void free_mmu_pages(struct kvm_mmu *mmu)
{
free_page((unsigned long)mmu->pae_root);
free_page((unsigned long)mmu->pml4_root);
#ifdef CONFIG_X86_64
free_page((unsigned long)mmu->pml5_root);
#endif
}
static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册