Message ID | 20200923221406.16297-2-sean.j.christopherson@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: x86/mmu: Zap orphaned kids for nested TDP MMU | expand |
On Wed, Sep 23, 2020 at 3:14 PM Sean Christopherson <sean.j.christopherson@intel.com> wrote: > > Move the logic that controls whether or not FNAME(invlpg) needs to flush > fully into FNAME(invlpg) so that mmu_page_zap_pte() doesn't return a > value. This allows a future patch to redefine the return semantics for > mmu_page_zap_pte() so that it can recursively zap orphaned child shadow > pages for nested TDP MMUs. > > No functional change intended. > > Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Reviewed-by: Ben Gardon <bgardon@google.com> > > --- > arch/x86/kvm/mmu/mmu.c | 10 +++------- > arch/x86/kvm/mmu/paging_tmpl.h | 7 +++++-- > 2 files changed, 8 insertions(+), 9 deletions(-) > > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c > index 76c5826e29a2..a91e8601594d 100644 > --- a/arch/x86/kvm/mmu/mmu.c > +++ b/arch/x86/kvm/mmu/mmu.c > @@ -2615,7 +2615,7 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, > } > } > > -static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, > +static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, > u64 *spte) > { > u64 pte; > @@ -2631,13 +2631,9 @@ static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, > child = to_shadow_page(pte & PT64_BASE_ADDR_MASK); > drop_parent_pte(child, spte); > } > - return true; > - } > - > - if (is_mmio_spte(pte)) > + } else if (is_mmio_spte(pte)) { > mmu_spte_clear_no_track(spte); > - > - return false; > + } > } > > static void kvm_mmu_page_unlink_children(struct kvm *kvm, > diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h > index 4dd6b1e5b8cf..3bb624a3dda9 100644 > --- a/arch/x86/kvm/mmu/paging_tmpl.h > +++ b/arch/x86/kvm/mmu/paging_tmpl.h > @@ -895,6 +895,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) > { > struct kvm_shadow_walk_iterator iterator; > struct kvm_mmu_page *sp; > + u64 old_spte; > int level; > u64 *sptep; > > @@ -917,7 +918,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) > sptep = iterator.sptep; > > sp = sptep_to_sp(sptep); > - if (is_last_spte(*sptep, level)) { > + old_spte = *sptep; > + if (is_last_spte(old_spte, level)) { > pt_element_t gpte; > gpa_t pte_gpa; > > @@ -927,7 +929,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) > pte_gpa = FNAME(get_level1_sp_gpa)(sp); > pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); > > - if (mmu_page_zap_pte(vcpu->kvm, sp, sptep)) > + mmu_page_zap_pte(vcpu->kvm, sp, sptep); > + if (is_shadow_present_pte(old_spte)) > kvm_flush_remote_tlbs_with_address(vcpu->kvm, > sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level)); > > -- > 2.28.0 >
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 76c5826e29a2..a91e8601594d 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2615,7 +2615,7 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, } } -static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, +static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, u64 *spte) { u64 pte; @@ -2631,13 +2631,9 @@ static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, child = to_shadow_page(pte & PT64_BASE_ADDR_MASK); drop_parent_pte(child, spte); } - return true; - } - - if (is_mmio_spte(pte)) + } else if (is_mmio_spte(pte)) { mmu_spte_clear_no_track(spte); - - return false; + } } static void kvm_mmu_page_unlink_children(struct kvm *kvm, diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 4dd6b1e5b8cf..3bb624a3dda9 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -895,6 +895,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) { struct kvm_shadow_walk_iterator iterator; struct kvm_mmu_page *sp; + u64 old_spte; int level; u64 *sptep; @@ -917,7 +918,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) sptep = iterator.sptep; sp = sptep_to_sp(sptep); - if (is_last_spte(*sptep, level)) { + old_spte = *sptep; + if (is_last_spte(old_spte, level)) { pt_element_t gpte; gpa_t pte_gpa; @@ -927,7 +929,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) pte_gpa = FNAME(get_level1_sp_gpa)(sp); pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); - if (mmu_page_zap_pte(vcpu->kvm, sp, sptep)) + mmu_page_zap_pte(vcpu->kvm, sp, sptep); + if (is_shadow_present_pte(old_spte)) kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level));
Move the logic that controls whether or not FNAME(invlpg) needs to flush fully into FNAME(invlpg) so that mmu_page_zap_pte() doesn't return a value. This allows a future patch to redefine the return semantics for mmu_page_zap_pte() so that it can recursively zap orphaned child shadow pages for nested TDP MMUs. No functional change intended. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- arch/x86/kvm/mmu/mmu.c | 10 +++------- arch/x86/kvm/mmu/paging_tmpl.h | 7 +++++-- 2 files changed, 8 insertions(+), 9 deletions(-)