未验证 提交 e2614892 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!362 AMD: Support svm guest when host CR4.LA57 is set.

Merge Pull Request from: @haochengxie 
 
Add svm guest support when host CR4.LA57 is set. This issue can be fixed by backporting upstream patch.
Reference link:https://lore.kernel.org/lkml/20210818165549.3771014-1-wei.huang2@amd.com/ 
 
Link:https://gitee.com/openeuler/kernel/pulls/362 

Reviewed-by: Kevin Zhu <zhukeqian1@huawei.com> 
Reviewed-by: Jason Zeng <jason.zeng@intel.com> 
Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com> 
Acked-by: Zheng Zengkai <zhengzengkai@huawei.com> 
...@@ -662,7 +662,6 @@ struct kvm_vcpu_arch { ...@@ -662,7 +662,6 @@ struct kvm_vcpu_arch {
unsigned long cr3_lm_rsvd_bits; unsigned long cr3_lm_rsvd_bits;
int maxphyaddr; int maxphyaddr;
int max_tdp_level;
/* emulate context */ /* emulate context */
...@@ -1626,8 +1625,8 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid); ...@@ -1626,8 +1625,8 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush, void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush,
bool skip_mmu_sync); bool skip_mmu_sync);
void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level, void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
int tdp_huge_page_level); int tdp_max_root_level, int tdp_huge_page_level);
static inline u16 kvm_read_ldt(void) static inline u16 kvm_read_ldt(void)
{ {
......
...@@ -96,6 +96,7 @@ module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644); ...@@ -96,6 +96,7 @@ module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644);
bool tdp_enabled = false; bool tdp_enabled = false;
static int max_huge_page_level __read_mostly; static int max_huge_page_level __read_mostly;
static int tdp_root_level __read_mostly;
static int max_tdp_level __read_mostly; static int max_tdp_level __read_mostly;
enum { enum {
...@@ -4515,6 +4516,10 @@ static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu, ...@@ -4515,6 +4516,10 @@ static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu) static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
{ {
/* tdp_root_level is architecture forced level, use it if nonzero */
if (tdp_root_level)
return tdp_root_level;
/* Use 5-level TDP if and only if it's useful/necessary. */ /* Use 5-level TDP if and only if it's useful/necessary. */
if (max_tdp_level == 5 && cpuid_maxphyaddr(vcpu) <= 48) if (max_tdp_level == 5 && cpuid_maxphyaddr(vcpu) <= 48)
return 4; return 4;
...@@ -5232,10 +5237,11 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid) ...@@ -5232,10 +5237,11 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
} }
EXPORT_SYMBOL_GPL(kvm_mmu_invpcid_gva); EXPORT_SYMBOL_GPL(kvm_mmu_invpcid_gva);
void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level, void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
int tdp_huge_page_level) int tdp_max_root_level, int tdp_huge_page_level)
{ {
tdp_enabled = enable_tdp; tdp_enabled = enable_tdp;
tdp_root_level = tdp_forced_root_level;
max_tdp_level = tdp_max_root_level; max_tdp_level = tdp_max_root_level;
/* /*
......
...@@ -989,7 +989,9 @@ static __init int svm_hardware_setup(void) ...@@ -989,7 +989,9 @@ static __init int svm_hardware_setup(void)
if (npt_enabled && !npt) if (npt_enabled && !npt)
npt_enabled = false; npt_enabled = false;
kvm_configure_mmu(npt_enabled, get_max_npt_level(), PG_LEVEL_1G); /* Force VM NPT level equal to the host's max NPT level */
kvm_configure_mmu(npt_enabled, get_max_npt_level(),
get_max_npt_level(), PG_LEVEL_1G);
pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis"); pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
if (nrips) { if (nrips) {
......
...@@ -8361,7 +8361,8 @@ static __init int hardware_setup(void) ...@@ -8361,7 +8361,8 @@ static __init int hardware_setup(void)
ept_lpage_level = PG_LEVEL_2M; ept_lpage_level = PG_LEVEL_2M;
else else
ept_lpage_level = PG_LEVEL_4K; ept_lpage_level = PG_LEVEL_4K;
kvm_configure_mmu(enable_ept, vmx_get_max_tdp_level(), ept_lpage_level); kvm_configure_mmu(enable_ept, 0, vmx_get_max_tdp_level(),
ept_lpage_level);
/* /*
* Only enable PML when hardware supports PML feature, and both EPT * Only enable PML when hardware supports PML feature, and both EPT
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册