diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 6ecc24e335ffa7392c8d52a8a9a8fbcaced10858..39ccceb5f21e383155dbd652b708da7618eac87c 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -386,7 +386,7 @@ struct kvm_mmu { u32 pkru_mask; u64 *pae_root; - u64 *lm_root; + u64 *pml4_root; /* * check zero bits on shadow page table entries, these diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 6096d0f1a62af06b7c16d9fb73a1f5acfc3dec52..0dad71f72c64720367b2937e7acacbb19f879f6d 100755 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3311,7 +3311,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) /* * Allocate the page for the PDPTEs when shadowing 32-bit NPT * with 64-bit only when needed. Unlike 32-bit NPT, it doesn't - * need to be in low mem. See also lm_root below. + * need to be in low mem. See also pml4_root below. */ if (!vcpu->arch.mmu->pae_root) { WARN_ON_ONCE(!tdp_enabled); @@ -3351,19 +3351,19 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) * handled above (to share logic with PAE), deal with the PML4 here. */ if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) { - if (vcpu->arch.mmu->lm_root == NULL) { - u64 *lm_root; + if (vcpu->arch.mmu->pml4_root == NULL) { + u64 *pml4_root; - lm_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT); - if (!lm_root) + pml4_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT); + if (!pml4_root) return -ENOMEM; - lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask; + pml4_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask; - vcpu->arch.mmu->lm_root = lm_root; + vcpu->arch.mmu->pml4_root = pml4_root; } - vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root); + vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pml4_root); } set_root_pgd: @@ -5302,7 +5302,7 @@ slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot, static void free_mmu_pages(struct kvm_mmu *mmu) { free_page((unsigned long)mmu->pae_root); - free_page((unsigned long)mmu->lm_root); + free_page((unsigned long)mmu->pml4_root); } static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 2124fe54abfb5e2ff655a8793c9813ebade5096a..aa693476f78db414dd4090edeaf8d597ddc6da45 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -443,6 +443,11 @@ static int has_svm(void) return 0; } + if (pgtable_l5_enabled()) { + pr_info("KVM doesn't yet support 5-level paging on AMD SVM\n"); + return 0; + } + return 1; }