Message ID | 20220203010051.2813563-13-dmatlack@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Extend Eager Page Splitting to the shadow MMU | expand |
On Wed, Feb 2, 2022 at 5:02 PM David Matlack <dmatlack@google.com> wrote: > > Allow adding new entries to the rmap and linking shadow pages without a > struct kvm_vcpu pointer by moving the implementation of rmap_add() and > link_shadow_page() into inner helper functions. > > No functional change intended. > Reviewed-by: Ben Gardon <bgardon@google.com> > Signed-off-by: David Matlack <dmatlack@google.com> > --- > arch/x86/kvm/mmu/mmu.c | 43 +++++++++++++++++++++++++++--------------- > 1 file changed, 28 insertions(+), 15 deletions(-) > > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c > index de7c47ee0def..c2f7f026d414 100644 > --- a/arch/x86/kvm/mmu/mmu.c > +++ b/arch/x86/kvm/mmu/mmu.c > @@ -736,9 +736,9 @@ static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) > kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache); > } > > -static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu) > +static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_mmu_memory_cache *cache) > { > - return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache); > + return kvm_mmu_memory_cache_alloc(cache); > } > > static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc) > @@ -885,7 +885,7 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, > /* > * Returns the number of pointers in the rmap chain, not counting the new one. > */ > -static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, > +static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte, > struct kvm_rmap_head *rmap_head) > { > struct pte_list_desc *desc; > @@ -896,7 +896,7 @@ static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, > rmap_head->val = (unsigned long)spte; > } else if (!(rmap_head->val & 1)) { > rmap_printk("%p %llx 1->many\n", spte, *spte); > - desc = mmu_alloc_pte_list_desc(vcpu); > + desc = mmu_alloc_pte_list_desc(cache); > desc->sptes[0] = (u64 *)rmap_head->val; > desc->sptes[1] = spte; > desc->spte_count = 2; > @@ -908,7 +908,7 @@ static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, > while (desc->spte_count == PTE_LIST_EXT) { > count += PTE_LIST_EXT; > if (!desc->more) { > - desc->more = mmu_alloc_pte_list_desc(vcpu); > + desc->more = mmu_alloc_pte_list_desc(cache); > desc = desc->more; > desc->spte_count = 0; > break; > @@ -1607,8 +1607,10 @@ static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, > > #define RMAP_RECYCLE_THRESHOLD 1000 > > -static void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot, > - u64 *spte, gfn_t gfn) > +static void __rmap_add(struct kvm *kvm, > + struct kvm_mmu_memory_cache *cache, > + const struct kvm_memory_slot *slot, > + u64 *spte, gfn_t gfn) > { > struct kvm_mmu_page *sp; > struct kvm_rmap_head *rmap_head; > @@ -1617,15 +1619,21 @@ static void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot, > sp = sptep_to_sp(spte); > kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); > rmap_head = gfn_to_rmap(gfn, sp->role.level, slot); > - rmap_count = pte_list_add(vcpu, spte, rmap_head); > + rmap_count = pte_list_add(cache, spte, rmap_head); > > if (rmap_count > RMAP_RECYCLE_THRESHOLD) { > - kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0)); > + kvm_unmap_rmapp(kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0)); > kvm_flush_remote_tlbs_with_address( > - vcpu->kvm, sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level)); > + kvm, sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level)); > } > } > > +static void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot, > + u64 *spte, gfn_t gfn) > +{ > + __rmap_add(vcpu->kvm, &vcpu->arch.mmu_pte_list_desc_cache, slot, spte, gfn); > +} > + > bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) > { > bool young = false; > @@ -1693,13 +1701,13 @@ static unsigned kvm_page_table_hashfn(gfn_t gfn) > return hash_64(gfn, KVM_MMU_HASH_SHIFT); > } > > -static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, > +static void mmu_page_add_parent_pte(struct kvm_mmu_memory_cache *cache, > struct kvm_mmu_page *sp, u64 *parent_pte) > { > if (!parent_pte) > return; > > - pte_list_add(vcpu, parent_pte, &sp->parent_ptes); > + pte_list_add(cache, parent_pte, &sp->parent_ptes); > } > > static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp, > @@ -2297,8 +2305,8 @@ static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator) > __shadow_walk_next(iterator, *iterator->sptep); > } > > -static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep, > - struct kvm_mmu_page *sp) > +static void __link_shadow_page(struct kvm_mmu_memory_cache *cache, u64 *sptep, > + struct kvm_mmu_page *sp) > { > u64 spte; > > @@ -2308,12 +2316,17 @@ static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep, > > mmu_spte_set(sptep, spte); > > - mmu_page_add_parent_pte(vcpu, sp, sptep); > + mmu_page_add_parent_pte(cache, sp, sptep); > > if (sp->unsync_children || sp->unsync) > mark_unsync(sptep); > } > > +static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep, struct kvm_mmu_page *sp) > +{ > + __link_shadow_page(&vcpu->arch.mmu_pte_list_desc_cache, sptep, sp); > +} > + > static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, > unsigned direct_access) > { > -- > 2.35.0.rc2.247.g8bbb082509-goog >
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index de7c47ee0def..c2f7f026d414 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -736,9 +736,9 @@ static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache); } -static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu) +static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_mmu_memory_cache *cache) { - return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache); + return kvm_mmu_memory_cache_alloc(cache); } static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc) @@ -885,7 +885,7 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, /* * Returns the number of pointers in the rmap chain, not counting the new one. */ -static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, +static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte, struct kvm_rmap_head *rmap_head) { struct pte_list_desc *desc; @@ -896,7 +896,7 @@ static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, rmap_head->val = (unsigned long)spte; } else if (!(rmap_head->val & 1)) { rmap_printk("%p %llx 1->many\n", spte, *spte); - desc = mmu_alloc_pte_list_desc(vcpu); + desc = mmu_alloc_pte_list_desc(cache); desc->sptes[0] = (u64 *)rmap_head->val; desc->sptes[1] = spte; desc->spte_count = 2; @@ -908,7 +908,7 @@ static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, while (desc->spte_count == PTE_LIST_EXT) { count += PTE_LIST_EXT; if (!desc->more) { - desc->more = mmu_alloc_pte_list_desc(vcpu); + desc->more = mmu_alloc_pte_list_desc(cache); desc = desc->more; desc->spte_count = 0; break; @@ -1607,8 +1607,10 @@ static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, #define RMAP_RECYCLE_THRESHOLD 1000 -static void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot, - u64 *spte, gfn_t gfn) +static void __rmap_add(struct kvm *kvm, + struct kvm_mmu_memory_cache *cache, + const struct kvm_memory_slot *slot, + u64 *spte, gfn_t gfn) { struct kvm_mmu_page *sp; struct kvm_rmap_head *rmap_head; @@ -1617,15 +1619,21 @@ static void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot, sp = sptep_to_sp(spte); kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); rmap_head = gfn_to_rmap(gfn, sp->role.level, slot); - rmap_count = pte_list_add(vcpu, spte, rmap_head); + rmap_count = pte_list_add(cache, spte, rmap_head); if (rmap_count > RMAP_RECYCLE_THRESHOLD) { - kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0)); + kvm_unmap_rmapp(kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0)); kvm_flush_remote_tlbs_with_address( - vcpu->kvm, sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level)); + kvm, sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level)); } } +static void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot, + u64 *spte, gfn_t gfn) +{ + __rmap_add(vcpu->kvm, &vcpu->arch.mmu_pte_list_desc_cache, slot, spte, gfn); +} + bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { bool young = false; @@ -1693,13 +1701,13 @@ static unsigned kvm_page_table_hashfn(gfn_t gfn) return hash_64(gfn, KVM_MMU_HASH_SHIFT); } -static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, +static void mmu_page_add_parent_pte(struct kvm_mmu_memory_cache *cache, struct kvm_mmu_page *sp, u64 *parent_pte) { if (!parent_pte) return; - pte_list_add(vcpu, parent_pte, &sp->parent_ptes); + pte_list_add(cache, parent_pte, &sp->parent_ptes); } static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp, @@ -2297,8 +2305,8 @@ static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator) __shadow_walk_next(iterator, *iterator->sptep); } -static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep, - struct kvm_mmu_page *sp) +static void __link_shadow_page(struct kvm_mmu_memory_cache *cache, u64 *sptep, + struct kvm_mmu_page *sp) { u64 spte; @@ -2308,12 +2316,17 @@ static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep, mmu_spte_set(sptep, spte); - mmu_page_add_parent_pte(vcpu, sp, sptep); + mmu_page_add_parent_pte(cache, sp, sptep); if (sp->unsync_children || sp->unsync) mark_unsync(sptep); } +static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep, struct kvm_mmu_page *sp) +{ + __link_shadow_page(&vcpu->arch.mmu_pte_list_desc_cache, sptep, sp); +} + static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned direct_access) {
Allow adding new entries to the rmap and linking shadow pages without a struct kvm_vcpu pointer by moving the implementation of rmap_add() and link_shadow_page() into inner helper functions. No functional change intended. Signed-off-by: David Matlack <dmatlack@google.com> --- arch/x86/kvm/mmu/mmu.c | 43 +++++++++++++++++++++++++++--------------- 1 file changed, 28 insertions(+), 15 deletions(-)