提交 bb97a016 编写于 作者: D David Hildenbrand 提交者: Radim Krčmář

KVM: VMX: cleanup EPTP definitions

Don't use shifts, tag them correctly as EPTP and use better matching
names (PWL vs. GAW).
Signed-off-by: NDavid Hildenbrand <david@redhat.com>
Signed-off-by: NRadim Krčmář <rkrcmar@redhat.com>
上级 3f0d4db7
...@@ -468,12 +468,13 @@ enum vmcs_field { ...@@ -468,12 +468,13 @@ enum vmcs_field {
#define VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT (1ull << 10) /* (42 - 32) */ #define VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT (1ull << 10) /* (42 - 32) */
#define VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT (1ull << 11) /* (43 - 32) */ #define VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT (1ull << 11) /* (43 - 32) */
#define VMX_EPT_DEFAULT_GAW 3
#define VMX_EPT_MAX_GAW 0x4
#define VMX_EPT_MT_EPTE_SHIFT 3 #define VMX_EPT_MT_EPTE_SHIFT 3
#define VMX_EPT_GAW_EPTP_SHIFT 3 #define VMX_EPTP_PWL_MASK 0x38ull
#define VMX_EPT_AD_ENABLE_BIT (1ull << 6) #define VMX_EPTP_PWL_4 0x18ull
#define VMX_EPT_DEFAULT_MT 0x6ull #define VMX_EPTP_AD_ENABLE_BIT (1ull << 6)
#define VMX_EPTP_MT_MASK 0x7ull
#define VMX_EPTP_MT_WB 0x6ull
#define VMX_EPTP_MT_UC 0x0ull
#define VMX_EPT_READABLE_MASK 0x1ull #define VMX_EPT_READABLE_MASK 0x1ull
#define VMX_EPT_WRITABLE_MASK 0x2ull #define VMX_EPT_WRITABLE_MASK 0x2ull
#define VMX_EPT_EXECUTABLE_MASK 0x4ull #define VMX_EPT_EXECUTABLE_MASK 0x4ull
......
...@@ -4298,14 +4298,12 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -4298,14 +4298,12 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa) static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
{ {
u64 eptp; u64 eptp = VMX_EPTP_MT_WB | VMX_EPTP_PWL_4;
/* TODO write the value reading from MSR */ /* TODO write the value reading from MSR */
eptp = VMX_EPT_DEFAULT_MT |
VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT;
if (enable_ept_ad_bits && if (enable_ept_ad_bits &&
(!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu))) (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu)))
eptp |= VMX_EPT_AD_ENABLE_BIT; eptp |= VMX_EPTP_AD_ENABLE_BIT;
eptp |= (root_hpa & PAGE_MASK); eptp |= (root_hpa & PAGE_MASK);
return eptp; return eptp;
...@@ -7882,16 +7880,15 @@ static int handle_preemption_timer(struct kvm_vcpu *vcpu) ...@@ -7882,16 +7880,15 @@ static int handle_preemption_timer(struct kvm_vcpu *vcpu)
static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address) static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
u64 mask = address & 0x7;
int maxphyaddr = cpuid_maxphyaddr(vcpu); int maxphyaddr = cpuid_maxphyaddr(vcpu);
/* Check for memory type validity */ /* Check for memory type validity */
switch (mask) { switch (address & VMX_EPTP_MT_MASK) {
case 0: case VMX_EPTP_MT_UC:
if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPTP_UC_BIT)) if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPTP_UC_BIT))
return false; return false;
break; break;
case 6: case VMX_EPTP_MT_WB:
if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPTP_WB_BIT)) if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPTP_WB_BIT))
return false; return false;
break; break;
...@@ -7899,8 +7896,8 @@ static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address) ...@@ -7899,8 +7896,8 @@ static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
return false; return false;
} }
/* Bits 5:3 must be 3 */ /* only 4 levels page-walk length are valid */
if (((address >> VMX_EPT_GAW_EPTP_SHIFT) & 0x7) != VMX_EPT_DEFAULT_GAW) if ((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4)
return false; return false;
/* Reserved bits should not be set */ /* Reserved bits should not be set */
...@@ -7908,7 +7905,7 @@ static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address) ...@@ -7908,7 +7905,7 @@ static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
return false; return false;
/* AD, if set, should be supported */ /* AD, if set, should be supported */
if ((address & VMX_EPT_AD_ENABLE_BIT)) { if (address & VMX_EPTP_AD_ENABLE_BIT) {
if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPT_AD_BIT)) if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPT_AD_BIT))
return false; return false;
} }
...@@ -7936,7 +7933,7 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu, ...@@ -7936,7 +7933,7 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
&address, index * 8, 8)) &address, index * 8, 8))
return 1; return 1;
accessed_dirty = !!(address & VMX_EPT_AD_ENABLE_BIT); accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT);
/* /*
* If the (L2) guest does a vmfunc to the currently * If the (L2) guest does a vmfunc to the currently
...@@ -9501,7 +9498,7 @@ static void __init vmx_check_processor_compat(void *rtn) ...@@ -9501,7 +9498,7 @@ static void __init vmx_check_processor_compat(void *rtn)
static int get_ept_level(void) static int get_ept_level(void)
{ {
return VMX_EPT_DEFAULT_GAW + 1; return 4;
} }
static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
...@@ -9702,7 +9699,7 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, ...@@ -9702,7 +9699,7 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu) static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
{ {
return nested_ept_get_cr3(vcpu) & VMX_EPT_AD_ENABLE_BIT; return nested_ept_get_cr3(vcpu) & VMX_EPTP_AD_ENABLE_BIT;
} }
/* Callbacks for nested_ept_init_mmu_context: */ /* Callbacks for nested_ept_init_mmu_context: */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册