diff mbox series

[2/5] KVM: x86/mmu: Fix wrong gfn range of tlb flushing in kvm_set_pte_rmapp()

Message ID a92b4b56116f0f71ffceab2b4ff3c03f47fd468f.1656039275.git.houwenlong.hwl@antgroup.com (mailing list archive)
State New, archived
Headers show
Series Fix wrong gfn range of tlb flushing with range | expand

Commit Message

Hou Wenlong June 24, 2022, 3:36 a.m. UTC
When the spte of hupe page is dropped in kvm_set_pte_rmapp(),
the whole gfn range covered by the spte should be flushed.
However, rmap_walk_init_level() doesn't align down the gfn
for new level like tdp iterator does, then the gfn used in
kvm_set_pte_rmapp() is not the base gfn of huge page. And
the size of gfn range is wrong too for huge page. Since
the base gfn of huge page is more meaningful during the
rmap walking, so align down the gfn for new level and use
the correct size of huge page for tlb flushing in
kvm_set_pte_rmapp().

Fixes: c3134ce240eed ("KVM: Replace old tlb flush function with new one to flush a specified range.")
Signed-off-by: Hou Wenlong <houwenlong.hwl@antgroup.com>
---
 arch/x86/kvm/mmu/mmu.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

Comments

Sean Christopherson June 24, 2022, 10:53 p.m. UTC | #1
On Fri, Jun 24, 2022, Hou Wenlong wrote:
> When the spte of hupe page is dropped in kvm_set_pte_rmapp(),
> the whole gfn range covered by the spte should be flushed.
> However, rmap_walk_init_level() doesn't align down the gfn
> for new level like tdp iterator does, then the gfn used in
> kvm_set_pte_rmapp() is not the base gfn of huge page. And
> the size of gfn range is wrong too for huge page. Since
> the base gfn of huge page is more meaningful during the
> rmap walking, so align down the gfn for new level and use
> the correct size of huge page for tlb flushing in
> kvm_set_pte_rmapp().

It's also worth noting that kvm_set_pte_rmapp() is the other user of the rmap
iterators that consumes @gfn, i.e. modifying iterator->gfn is safe-ish.

> Fixes: c3134ce240eed ("KVM: Replace old tlb flush function with new one to flush a specified range.")
> Signed-off-by: Hou Wenlong <houwenlong.hwl@antgroup.com>
> ---
>  arch/x86/kvm/mmu/mmu.c | 4 ++--
>  1 file changed, 2 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index b8a1f5b46b9d..37bfc88ea212 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -1427,7 +1427,7 @@ static bool kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
>  	}
>  
>  	if (need_flush && kvm_available_flush_tlb_with_range()) {
> -		kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
> +		kvm_flush_remote_tlbs_with_address(kvm, gfn, KVM_PAGES_PER_HPAGE(level));
>  		return false;
>  	}
>  
> @@ -1455,7 +1455,7 @@ static void
>  rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
>  {
>  	iterator->level = level;
> -	iterator->gfn = iterator->start_gfn;
> +	iterator->gfn = iterator->start_gfn & -KVM_PAGES_PER_HPAGE(level);

Hrm, arguably this be done on start_gfn in slot_rmap_walk_init().  Having iter->gfn
be less than iter->start_gfn will be odd.

>  	iterator->rmap = gfn_to_rmap(iterator->gfn, level, iterator->slot);
>  	iterator->end_rmap = gfn_to_rmap(iterator->end_gfn, level, iterator->slot);
>  }
> -- 
> 2.31.1
>
Hou Wenlong June 27, 2022, 4 a.m. UTC | #2
On Sat, Jun 25, 2022 at 06:53:25AM +0800, Sean Christopherson wrote:
> On Fri, Jun 24, 2022, Hou Wenlong wrote:
> > When the spte of hupe page is dropped in kvm_set_pte_rmapp(),
> > the whole gfn range covered by the spte should be flushed.
> > However, rmap_walk_init_level() doesn't align down the gfn
> > for new level like tdp iterator does, then the gfn used in
> > kvm_set_pte_rmapp() is not the base gfn of huge page. And
> > the size of gfn range is wrong too for huge page. Since
> > the base gfn of huge page is more meaningful during the
> > rmap walking, so align down the gfn for new level and use
> > the correct size of huge page for tlb flushing in
> > kvm_set_pte_rmapp().
> 
> It's also worth noting that kvm_set_pte_rmapp() is the other user of the rmap
> iterators that consumes @gfn, i.e. modifying iterator->gfn is safe-ish.
>
> > Fixes: c3134ce240eed ("KVM: Replace old tlb flush function with new one to flush a specified range.")
> > Signed-off-by: Hou Wenlong <houwenlong.hwl@antgroup.com>
> > ---
> >  arch/x86/kvm/mmu/mmu.c | 4 ++--
> >  1 file changed, 2 insertions(+), 2 deletions(-)
> > 
> > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> > index b8a1f5b46b9d..37bfc88ea212 100644
> > --- a/arch/x86/kvm/mmu/mmu.c
> > +++ b/arch/x86/kvm/mmu/mmu.c
> > @@ -1427,7 +1427,7 @@ static bool kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
> >  	}
> >  
> >  	if (need_flush && kvm_available_flush_tlb_with_range()) {
> > -		kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
> > +		kvm_flush_remote_tlbs_with_address(kvm, gfn, KVM_PAGES_PER_HPAGE(level));
> >  		return false;
> >  	}
> >  
> > @@ -1455,7 +1455,7 @@ static void
> >  rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
> >  {
> >  	iterator->level = level;
> > -	iterator->gfn = iterator->start_gfn;
> > +	iterator->gfn = iterator->start_gfn & -KVM_PAGES_PER_HPAGE(level);
> 
> Hrm, arguably this be done on start_gfn in slot_rmap_walk_init().  Having iter->gfn
> be less than iter->start_gfn will be odd.
>
Hrm, iter->gfn may be bigger than iter->end_gfn in slot_rmap_walk_next(), which would
also be odd. I just think it may be better to pass the base gfn of huge page. However,
as you said, kvm_set_pte_rmapp() is the only user of the rmap iterator that consumes @gfn,
only modifying it in that function is enough.

> >  	iterator->rmap = gfn_to_rmap(iterator->gfn, level, iterator->slot);
> >  	iterator->end_rmap = gfn_to_rmap(iterator->end_gfn, level, iterator->slot);
> >  }
> > -- 
> > 2.31.1
> >
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index b8a1f5b46b9d..37bfc88ea212 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1427,7 +1427,7 @@  static bool kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
 	}
 
 	if (need_flush && kvm_available_flush_tlb_with_range()) {
-		kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
+		kvm_flush_remote_tlbs_with_address(kvm, gfn, KVM_PAGES_PER_HPAGE(level));
 		return false;
 	}
 
@@ -1455,7 +1455,7 @@  static void
 rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
 {
 	iterator->level = level;
-	iterator->gfn = iterator->start_gfn;
+	iterator->gfn = iterator->start_gfn & -KVM_PAGES_PER_HPAGE(level);
 	iterator->rmap = gfn_to_rmap(iterator->gfn, level, iterator->slot);
 	iterator->end_rmap = gfn_to_rmap(iterator->end_gfn, level, iterator->slot);
 }