Message ID | 20151120174805.a91793d1fce2f0a493f1b955@lab.ntt.co.jp (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
You can move this patch to the front of [PATCH 08/10] KVM: x86: MMU: Use for_each_rmap_spte macro instead of pte_list_walk() By moving kvm_mmu_mark_parents_unsync() to the behind of mmu_spte_set() (then the parent spte is present now), you can directly clean up for_each_rmap_spte(). On 11/20/2015 04:48 PM, Takuya Yoshikawa wrote: > Every time kvm_mmu_get_page() is called with a non-NULL parent_pte > argument, link_shadow_page() follows that to set the parent entry so > that the new mapping will point to the returned page table. > > Moving parent_pte handling there allows to clean up the code because > parent_pte is passed to kvm_mmu_get_page() just for mark_unsync() and > mmu_page_add_parent_pte(). > > Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp> > --- > arch/x86/kvm/mmu.c | 22 ++++++++-------------- > arch/x86/kvm/paging_tmpl.h | 6 ++---- > 2 files changed, 10 insertions(+), 18 deletions(-) > > diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c > index 4e29d9a..b020323 100644 > --- a/arch/x86/kvm/mmu.c > +++ b/arch/x86/kvm/mmu.c > @@ -2107,14 +2107,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, > if (sp->unsync_children) { > kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); > kvm_mmu_mark_parents_unsync(sp); > - if (parent_pte) > - mark_unsync(parent_pte); > } else if (sp->unsync) { > kvm_mmu_mark_parents_unsync(sp); > - if (parent_pte) > - mark_unsync(parent_pte); > } > - mmu_page_add_parent_pte(vcpu, sp, parent_pte); > > __clear_sp_write_flooding_count(sp); > trace_kvm_mmu_get_page(sp, false); > @@ -2125,8 +2120,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, > > sp = kvm_mmu_alloc_page(vcpu, direct); > > - mmu_page_add_parent_pte(vcpu, sp, parent_pte); > - > sp->gfn = gfn; > sp->role = role; > hlist_add_head(&sp->hash_link, > @@ -2194,7 +2187,8 @@ static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator) > return __shadow_walk_next(iterator, *iterator->sptep); > } > > -static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp) > +static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep, > + struct kvm_mmu_page *sp) > { > u64 spte; > > @@ -2205,6 +2199,11 @@ static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp) > shadow_user_mask | shadow_x_mask | shadow_accessed_mask; > > mmu_spte_set(sptep, spte); > + > + if (sp->unsync_children || sp->unsync) > + mark_unsync(sptep); > + > + mmu_page_add_parent_pte(vcpu, sp, sptep); > } > > static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, > @@ -2263,11 +2262,6 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm, > mmu_page_zap_pte(kvm, sp, sp->spt + i); > } > > -static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte) > -{ > - mmu_page_remove_parent_pte(sp, parent_pte); > -} > - > static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) > { > u64 *sptep; > @@ -2733,7 +2727,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, int write, int map_writable, > iterator.level - 1, > 1, ACC_ALL, iterator.sptep); > > - link_shadow_page(iterator.sptep, sp); > + link_shadow_page(vcpu, iterator.sptep, sp); > } > } > return emulate; > diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h > index 11650ea..0dcf9c8 100644 > --- a/arch/x86/kvm/paging_tmpl.h > +++ b/arch/x86/kvm/paging_tmpl.h > @@ -598,7 +598,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, > goto out_gpte_changed; > > if (sp) > - link_shadow_page(it.sptep, sp); > + link_shadow_page(vcpu, it.sptep, sp); > } > > for (; > @@ -618,7 +618,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, > > sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1, > true, direct_access, it.sptep); > - link_shadow_page(it.sptep, sp); > + link_shadow_page(vcpu, it.sptep, sp); > } > > clear_sp_write_flooding_count(it.sptep); > @@ -629,8 +629,6 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, > return emulate; > > out_gpte_changed: > - if (sp) > - kvm_mmu_put_page(sp, it.sptep); > kvm_release_pfn_clean(pfn); > return 0; > } > -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On 20/11/2015 09:57, Xiao Guangrong wrote: > > > You can move this patch to the front of > [PATCH 08/10] KVM: x86: MMU: Use for_each_rmap_spte macro instead of > pte_list_walk() > > By moving kvm_mmu_mark_parents_unsync() to the behind of mmu_spte_set() > (then the parent > spte is present now), you can directly clean up for_each_rmap_spte(). So basically squash together the two patches (8/10 and 9/10) except the change to kvm_mmu_mark_parents_unsync; then in the second patch switch from pte_list_walk to for_each_rmap_spte. That makes sense indeed. Paolo -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On 2015/11/26 1:32, Paolo Bonzini wrote: > On 20/11/2015 09:57, Xiao Guangrong wrote: >> You can move this patch to the front of >> [PATCH 08/10] KVM: x86: MMU: Use for_each_rmap_spte macro instead of >> pte_list_walk() >> >> By moving kvm_mmu_mark_parents_unsync() to the behind of mmu_spte_set() >> (then the parent >> spte is present now), you can directly clean up for_each_rmap_spte(). > > So basically squash together the two patches (8/10 and 9/10) except the > change to kvm_mmu_mark_parents_unsync; then in the second patch switch > from pte_list_walk to for_each_rmap_spte. > > That makes sense indeed. Sorry for my being late to respond to Xiao's suggestions. I could not use my development machine for a while this week. In short, this kvm_mmu_mark_parents_unsync() call in kvm_mmu_get_page() should have been mark_unsync() for the new parent_pte only, because we are constructing the mappings from/to it and other parents in the sp->parent_ptes are not related to this fault? As the code has been this way for some time, a bit scary to change it, but I'll do some tests without that extra kvm_mmu_mark_parents_unsync() with a guest (with ept=0) this afternoon. Takuya -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 4e29d9a..b020323 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2107,14 +2107,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, if (sp->unsync_children) { kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); kvm_mmu_mark_parents_unsync(sp); - if (parent_pte) - mark_unsync(parent_pte); } else if (sp->unsync) { kvm_mmu_mark_parents_unsync(sp); - if (parent_pte) - mark_unsync(parent_pte); } - mmu_page_add_parent_pte(vcpu, sp, parent_pte); __clear_sp_write_flooding_count(sp); trace_kvm_mmu_get_page(sp, false); @@ -2125,8 +2120,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, sp = kvm_mmu_alloc_page(vcpu, direct); - mmu_page_add_parent_pte(vcpu, sp, parent_pte); - sp->gfn = gfn; sp->role = role; hlist_add_head(&sp->hash_link, @@ -2194,7 +2187,8 @@ static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator) return __shadow_walk_next(iterator, *iterator->sptep); } -static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp) +static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep, + struct kvm_mmu_page *sp) { u64 spte; @@ -2205,6 +2199,11 @@ static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp) shadow_user_mask | shadow_x_mask | shadow_accessed_mask; mmu_spte_set(sptep, spte); + + if (sp->unsync_children || sp->unsync) + mark_unsync(sptep); + + mmu_page_add_parent_pte(vcpu, sp, sptep); } static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, @@ -2263,11 +2262,6 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm, mmu_page_zap_pte(kvm, sp, sp->spt + i); } -static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte) -{ - mmu_page_remove_parent_pte(sp, parent_pte); -} - static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) { u64 *sptep; @@ -2733,7 +2727,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, int write, int map_writable, iterator.level - 1, 1, ACC_ALL, iterator.sptep); - link_shadow_page(iterator.sptep, sp); + link_shadow_page(vcpu, iterator.sptep, sp); } } return emulate; diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 11650ea..0dcf9c8 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -598,7 +598,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, goto out_gpte_changed; if (sp) - link_shadow_page(it.sptep, sp); + link_shadow_page(vcpu, it.sptep, sp); } for (; @@ -618,7 +618,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1, true, direct_access, it.sptep); - link_shadow_page(it.sptep, sp); + link_shadow_page(vcpu, it.sptep, sp); } clear_sp_write_flooding_count(it.sptep); @@ -629,8 +629,6 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, return emulate; out_gpte_changed: - if (sp) - kvm_mmu_put_page(sp, it.sptep); kvm_release_pfn_clean(pfn); return 0; }
Every time kvm_mmu_get_page() is called with a non-NULL parent_pte argument, link_shadow_page() follows that to set the parent entry so that the new mapping will point to the returned page table. Moving parent_pte handling there allows to clean up the code because parent_pte is passed to kvm_mmu_get_page() just for mark_unsync() and mmu_page_add_parent_pte(). Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp> --- arch/x86/kvm/mmu.c | 22 ++++++++-------------- arch/x86/kvm/paging_tmpl.h | 6 ++---- 2 files changed, 10 insertions(+), 18 deletions(-)