Message ID | 20210611235701.3941724-3-dmatlack@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: x86/mmu: Fast page fault support for the TDP MMU | expand |
On Fri, Jun 11, 2021 at 4:57 PM David Matlack <dmatlack@google.com> wrote: > > fast_page_fault is only called from direct_page_fault where we know the > address is a gpa. > > Fixes: 736c291c9f36 ("KVM: x86: Use gpa_t for cr2/gpa to fix TDP support on 32-bit KVM") > Signed-off-by: David Matlack <dmatlack@google.com> Reviewed-by: Ben Gardon <bgardon@google.com> > --- > arch/x86/kvm/mmu/mmu.c | 8 +++----- > 1 file changed, 3 insertions(+), 5 deletions(-) > > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c > index eccd889d20a5..1d0fe1445e04 100644 > --- a/arch/x86/kvm/mmu/mmu.c > +++ b/arch/x86/kvm/mmu/mmu.c > @@ -3007,8 +3007,7 @@ static bool is_access_allowed(u32 fault_err_code, u64 spte) > /* > * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS. > */ > -static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, > - u32 error_code) > +static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code) > { > struct kvm_shadow_walk_iterator iterator; > struct kvm_mmu_page *sp; > @@ -3024,7 +3023,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, > do { > u64 new_spte; > > - for_each_shadow_entry_lockless(vcpu, cr2_or_gpa, iterator, spte) > + for_each_shadow_entry_lockless(vcpu, gpa, iterator, spte) > if (!is_shadow_present_pte(spte)) > break; > > @@ -3103,8 +3102,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, > > } while (true); > > - trace_fast_page_fault(vcpu, cr2_or_gpa, error_code, iterator.sptep, > - spte, ret); > + trace_fast_page_fault(vcpu, gpa, error_code, iterator.sptep, spte, ret); > walk_shadow_page_lockless_end(vcpu); > > return ret; > -- > 2.32.0.272.g935e593368-goog >
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index eccd889d20a5..1d0fe1445e04 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3007,8 +3007,7 @@ static bool is_access_allowed(u32 fault_err_code, u64 spte) /* * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS. */ -static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, - u32 error_code) +static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code) { struct kvm_shadow_walk_iterator iterator; struct kvm_mmu_page *sp; @@ -3024,7 +3023,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, do { u64 new_spte; - for_each_shadow_entry_lockless(vcpu, cr2_or_gpa, iterator, spte) + for_each_shadow_entry_lockless(vcpu, gpa, iterator, spte) if (!is_shadow_present_pte(spte)) break; @@ -3103,8 +3102,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, } while (true); - trace_fast_page_fault(vcpu, cr2_or_gpa, error_code, iterator.sptep, - spte, ret); + trace_fast_page_fault(vcpu, gpa, error_code, iterator.sptep, spte, ret); walk_shadow_page_lockless_end(vcpu); return ret;
fast_page_fault is only called from direct_page_fault where we know the address is a gpa. Fixes: 736c291c9f36 ("KVM: x86: Use gpa_t for cr2/gpa to fix TDP support on 32-bit KVM") Signed-off-by: David Matlack <dmatlack@google.com> --- arch/x86/kvm/mmu/mmu.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-)