提交 312b616b 编写于 作者: J Junaid Shahid 提交者: Paolo Bonzini

kvm: x86: mmu: Set SPTE_SPECIAL_MASK within mmu.c

Instead of the caller including the SPTE_SPECIAL_MASK in the masks being
supplied to kvm_mmu_set_mmio_spte_mask() and kvm_mmu_set_mask_ptes(),
those functions now themselves include the SPTE_SPECIAL_MASK.

Note that bit 63 is now reset in the default MMIO mask.
Signed-off-by: NJunaid Shahid <junaids@google.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 ab22a473
...@@ -208,7 +208,7 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu); ...@@ -208,7 +208,7 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu);
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask) void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask)
{ {
shadow_mmio_mask = mmio_mask; shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK;
} }
EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
...@@ -318,6 +318,9 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, ...@@ -318,6 +318,9 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask, u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
u64 acc_track_mask) u64 acc_track_mask)
{ {
if (acc_track_mask != 0)
acc_track_mask |= SPTE_SPECIAL_MASK;
shadow_user_mask = user_mask; shadow_user_mask = user_mask;
shadow_accessed_mask = accessed_mask; shadow_accessed_mask = accessed_mask;
shadow_dirty_mask = dirty_mask; shadow_dirty_mask = dirty_mask;
......
...@@ -5236,10 +5236,8 @@ static void ept_set_mmio_spte_mask(void) ...@@ -5236,10 +5236,8 @@ static void ept_set_mmio_spte_mask(void)
/* /*
* EPT Misconfigurations can be generated if the value of bits 2:0 * EPT Misconfigurations can be generated if the value of bits 2:0
* of an EPT paging-structure entry is 110b (write/execute). * of an EPT paging-structure entry is 110b (write/execute).
* Also, special bit (62) is set to quickly identify mmio spte.
*/ */
kvm_mmu_set_mmio_spte_mask(SPTE_SPECIAL_MASK | kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE);
VMX_EPT_MISCONFIG_WX_VALUE);
} }
#define VMX_XSS_EXIT_BITMAP 0 #define VMX_XSS_EXIT_BITMAP 0
...@@ -6585,7 +6583,7 @@ void vmx_enable_tdp(void) ...@@ -6585,7 +6583,7 @@ void vmx_enable_tdp(void)
enable_ept_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull, enable_ept_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull,
0ull, VMX_EPT_EXECUTABLE_MASK, 0ull, VMX_EPT_EXECUTABLE_MASK,
cpu_has_vmx_ept_execute_only() ? 0ull : VMX_EPT_READABLE_MASK, cpu_has_vmx_ept_execute_only() ? 0ull : VMX_EPT_READABLE_MASK,
enable_ept_ad_bits ? 0ull : SPTE_SPECIAL_MASK | VMX_EPT_RWX_MASK); enable_ept_ad_bits ? 0ull : VMX_EPT_RWX_MASK);
ept_set_mmio_spte_mask(); ept_set_mmio_spte_mask();
kvm_enable_tdp(); kvm_enable_tdp();
......
...@@ -5952,9 +5952,6 @@ static void kvm_set_mmio_spte_mask(void) ...@@ -5952,9 +5952,6 @@ static void kvm_set_mmio_spte_mask(void)
/* Mask the reserved physical address bits. */ /* Mask the reserved physical address bits. */
mask = rsvd_bits(maxphyaddr, 51); mask = rsvd_bits(maxphyaddr, 51);
/* Bit 62 is always reserved for 32bit host. */
mask |= 0x3ull << 62;
/* Set the present bit. */ /* Set the present bit. */
mask |= 1ull; mask |= 1ull;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册