diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 0b2954b5fbc6c14dcb41a56d6e287d29ed2f827a..6f5910b7b9bc440ab77a16111dd69a8481a11a7e 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3103,8 +3103,7 @@ static bool is_access_allowed(u32 fault_err_code, u64 spte) /* * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS. */ -static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, - u32 error_code) +static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code) { struct kvm_shadow_walk_iterator iterator; struct kvm_mmu_page *sp; @@ -3120,7 +3119,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, do { u64 new_spte; - for_each_shadow_entry_lockless(vcpu, cr2_or_gpa, iterator, spte) + for_each_shadow_entry_lockless(vcpu, gpa, iterator, spte) if (!is_shadow_present_pte(spte)) break; @@ -3199,8 +3198,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, } while (true); - trace_fast_page_fault(vcpu, cr2_or_gpa, error_code, iterator.sptep, - spte, ret); + trace_fast_page_fault(vcpu, gpa, error_code, iterator.sptep, spte, ret); walk_shadow_page_lockless_end(vcpu); return ret;