提交 e88b8093 编写于 作者: S Sean Christopherson 提交者: Paolo Bonzini

KVM: x86/mmu: Track write/user faults using bools

Use bools to track write and user faults throughout the page fault paths
and down into mmu_set_spte().  The actual usage is purely boolean, but
that's not obvious without digging into all paths as the current code
uses a mix of bools (TDP and try_async_pf) and ints (shadow paging and
mmu_set_spte()).

No true functional change intended (although the pgprintk() will now
print 0/1 instead of 0/PFERR_WRITE_MASK).
Signed-off-by: NSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200923183735.584-9-sean.j.christopherson@intel.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 dcc70651
...@@ -3082,7 +3082,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, ...@@ -3082,7 +3082,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
} }
static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
unsigned int pte_access, int write_fault, int level, unsigned int pte_access, bool write_fault, int level,
gfn_t gfn, kvm_pfn_t pfn, bool speculative, gfn_t gfn, kvm_pfn_t pfn, bool speculative,
bool host_writable) bool host_writable)
{ {
...@@ -3188,7 +3188,7 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, ...@@ -3188,7 +3188,7 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
return -1; return -1;
for (i = 0; i < ret; i++, gfn++, start++) { for (i = 0; i < ret; i++, gfn++, start++) {
mmu_set_spte(vcpu, start, access, 0, sp->role.level, gfn, mmu_set_spte(vcpu, start, access, false, sp->role.level, gfn,
page_to_pfn(pages[i]), true, true); page_to_pfn(pages[i]), true, true);
put_page(pages[i]); put_page(pages[i]);
} }
......
...@@ -550,7 +550,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, ...@@ -550,7 +550,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
* we call mmu_set_spte() with host_writable = true because * we call mmu_set_spte() with host_writable = true because
* pte_prefetch_gfn_to_pfn always gets a writable pfn. * pte_prefetch_gfn_to_pfn always gets a writable pfn.
*/ */
mmu_set_spte(vcpu, spte, pte_access, 0, PG_LEVEL_4K, gfn, pfn, mmu_set_spte(vcpu, spte, pte_access, false, PG_LEVEL_4K, gfn, pfn,
true, true); true, true);
kvm_release_pfn_clean(pfn); kvm_release_pfn_clean(pfn);
...@@ -630,7 +630,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr, ...@@ -630,7 +630,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
bool prefault) bool prefault)
{ {
bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(); bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
int write_fault = error_code & PFERR_WRITE_MASK; bool write_fault = error_code & PFERR_WRITE_MASK;
bool exec = error_code & PFERR_FETCH_MASK; bool exec = error_code & PFERR_FETCH_MASK;
bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled; bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
struct kvm_mmu_page *sp = NULL; struct kvm_mmu_page *sp = NULL;
...@@ -746,7 +746,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr, ...@@ -746,7 +746,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
*/ */
static bool static bool
FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu, FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
struct guest_walker *walker, int user_fault, struct guest_walker *walker, bool user_fault,
bool *write_fault_to_shadow_pgtable) bool *write_fault_to_shadow_pgtable)
{ {
int level; int level;
...@@ -784,8 +784,8 @@ FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu, ...@@ -784,8 +784,8 @@ FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
bool prefault) bool prefault)
{ {
int write_fault = error_code & PFERR_WRITE_MASK; bool write_fault = error_code & PFERR_WRITE_MASK;
int user_fault = error_code & PFERR_USER_MASK; bool user_fault = error_code & PFERR_USER_MASK;
struct guest_walker walker; struct guest_walker walker;
int r; int r;
kvm_pfn_t pfn; kvm_pfn_t pfn;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册