提交 64d4d521 编写于 作者: S Sheng Yang 提交者: Avi Kivity

KVM: Enable MTRR for EPT

The effective memory type of EPT is the mixture of MSR_IA32_CR_PAT and memory
type field of EPT entry.
Signed-off-by: NSheng Yang <sheng@linux.intel.com>
Signed-off-by: NAvi Kivity <avi@redhat.com>
上级 74be52e3
...@@ -483,6 +483,7 @@ struct kvm_x86_ops { ...@@ -483,6 +483,7 @@ struct kvm_x86_ops {
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
int (*get_tdp_level)(void); int (*get_tdp_level)(void);
int (*get_mt_mask_shift)(void);
}; };
extern struct kvm_x86_ops *kvm_x86_ops; extern struct kvm_x86_ops *kvm_x86_ops;
...@@ -496,7 +497,7 @@ int kvm_mmu_setup(struct kvm_vcpu *vcpu); ...@@ -496,7 +497,7 @@ int kvm_mmu_setup(struct kvm_vcpu *vcpu);
void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
void kvm_mmu_set_base_ptes(u64 base_pte); void kvm_mmu_set_base_ptes(u64 base_pte);
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
u64 dirty_mask, u64 nx_mask, u64 x_mask); u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 mt_mask);
int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
......
...@@ -168,6 +168,7 @@ static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ ...@@ -168,6 +168,7 @@ static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
static u64 __read_mostly shadow_user_mask; static u64 __read_mostly shadow_user_mask;
static u64 __read_mostly shadow_accessed_mask; static u64 __read_mostly shadow_accessed_mask;
static u64 __read_mostly shadow_dirty_mask; static u64 __read_mostly shadow_dirty_mask;
static u64 __read_mostly shadow_mt_mask;
void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte) void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
{ {
...@@ -183,13 +184,14 @@ void kvm_mmu_set_base_ptes(u64 base_pte) ...@@ -183,13 +184,14 @@ void kvm_mmu_set_base_ptes(u64 base_pte)
EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes); EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
u64 dirty_mask, u64 nx_mask, u64 x_mask) u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 mt_mask)
{ {
shadow_user_mask = user_mask; shadow_user_mask = user_mask;
shadow_accessed_mask = accessed_mask; shadow_accessed_mask = accessed_mask;
shadow_dirty_mask = dirty_mask; shadow_dirty_mask = dirty_mask;
shadow_nx_mask = nx_mask; shadow_nx_mask = nx_mask;
shadow_x_mask = x_mask; shadow_x_mask = x_mask;
shadow_mt_mask = mt_mask;
} }
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
...@@ -1546,6 +1548,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, ...@@ -1546,6 +1548,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
{ {
u64 spte; u64 spte;
int ret = 0; int ret = 0;
u64 mt_mask = shadow_mt_mask;
/* /*
* We don't set the accessed bit, since we sometimes want to see * We don't set the accessed bit, since we sometimes want to see
* whether the guest actually used the pte (in order to detect * whether the guest actually used the pte (in order to detect
...@@ -1564,6 +1568,11 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, ...@@ -1564,6 +1568,11 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
spte |= shadow_user_mask; spte |= shadow_user_mask;
if (largepage) if (largepage)
spte |= PT_PAGE_SIZE_MASK; spte |= PT_PAGE_SIZE_MASK;
if (mt_mask) {
mt_mask = get_memory_type(vcpu, gfn) <<
kvm_x86_ops->get_mt_mask_shift();
spte |= mt_mask;
}
spte |= (u64)pfn << PAGE_SHIFT; spte |= (u64)pfn << PAGE_SHIFT;
......
...@@ -1912,6 +1912,11 @@ static int get_npt_level(void) ...@@ -1912,6 +1912,11 @@ static int get_npt_level(void)
#endif #endif
} }
static int svm_get_mt_mask_shift(void)
{
return 0;
}
static struct kvm_x86_ops svm_x86_ops = { static struct kvm_x86_ops svm_x86_ops = {
.cpu_has_kvm_support = has_svm, .cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled, .disabled_by_bios = is_disabled,
...@@ -1967,6 +1972,7 @@ static struct kvm_x86_ops svm_x86_ops = { ...@@ -1967,6 +1972,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.set_tss_addr = svm_set_tss_addr, .set_tss_addr = svm_set_tss_addr,
.get_tdp_level = get_npt_level, .get_tdp_level = get_npt_level,
.get_mt_mask_shift = svm_get_mt_mask_shift,
}; };
static int __init svm_init(void) static int __init svm_init(void)
......
...@@ -3574,6 +3574,11 @@ static int get_ept_level(void) ...@@ -3574,6 +3574,11 @@ static int get_ept_level(void)
return VMX_EPT_DEFAULT_GAW + 1; return VMX_EPT_DEFAULT_GAW + 1;
} }
static int vmx_get_mt_mask_shift(void)
{
return VMX_EPT_MT_EPTE_SHIFT;
}
static struct kvm_x86_ops vmx_x86_ops = { static struct kvm_x86_ops vmx_x86_ops = {
.cpu_has_kvm_support = cpu_has_kvm_support, .cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios, .disabled_by_bios = vmx_disabled_by_bios,
...@@ -3629,6 +3634,7 @@ static struct kvm_x86_ops vmx_x86_ops = { ...@@ -3629,6 +3634,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
.set_tss_addr = vmx_set_tss_addr, .set_tss_addr = vmx_set_tss_addr,
.get_tdp_level = get_ept_level, .get_tdp_level = get_ept_level,
.get_mt_mask_shift = vmx_get_mt_mask_shift,
}; };
static int __init vmx_init(void) static int __init vmx_init(void)
...@@ -3685,10 +3691,10 @@ static int __init vmx_init(void) ...@@ -3685,10 +3691,10 @@ static int __init vmx_init(void)
bypass_guest_pf = 0; bypass_guest_pf = 0;
kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
VMX_EPT_WRITABLE_MASK | VMX_EPT_WRITABLE_MASK |
VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT |
VMX_EPT_IGMT_BIT); VMX_EPT_IGMT_BIT);
kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull, kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
VMX_EPT_EXECUTABLE_MASK); VMX_EPT_EXECUTABLE_MASK,
VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);
kvm_enable_tdp(); kvm_enable_tdp();
} else } else
kvm_disable_tdp(); kvm_disable_tdp();
......
...@@ -2615,7 +2615,7 @@ int kvm_arch_init(void *opaque) ...@@ -2615,7 +2615,7 @@ int kvm_arch_init(void *opaque)
kvm_mmu_set_nonpresent_ptes(0ull, 0ull); kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
kvm_mmu_set_base_ptes(PT_PRESENT_MASK); kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
PT_DIRTY_MASK, PT64_NX_MASK, 0); PT_DIRTY_MASK, PT64_NX_MASK, 0, 0);
return 0; return 0;
out: out:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册