diff mbox series

KVM: x86/mmu: avoid accidentally go to shadow path for 0 count tdp root

Message ID 20221110034122.9892-1-yan.y.zhao@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/mmu: avoid accidentally go to shadow path for 0 count tdp root | expand

Commit Message

Yan Zhao Nov. 10, 2022, 3:41 a.m. UTC
kvm mmu uses "if (is_tdp_mmu(vcpu->arch.mmu))" to choose between tdp mmu
and shadow path.
If a root is a tdp mmu page while its root_count is 0, it's not valid to
go to the shadow path.

So, return true and add a warn on zero root count.

Signed-off-by: Yan Zhao <yan.y.zhao@intel.com>
---
 arch/x86/kvm/mmu/tdp_mmu.h | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

Comments

Sean Christopherson Nov. 10, 2022, 4:54 p.m. UTC | #1
On Thu, Nov 10, 2022, Yan Zhao wrote:
> kvm mmu uses "if (is_tdp_mmu(vcpu->arch.mmu))" to choose between tdp mmu
> and shadow path.
> If a root is a tdp mmu page while its root_count is 0, it's not valid to
> go to the shadow path.
> 
> So, return true and add a warn on zero root count.
> 
> Signed-off-by: Yan Zhao <yan.y.zhao@intel.com>
> ---
>  arch/x86/kvm/mmu/tdp_mmu.h | 6 +++++-
>  1 file changed, 5 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
> index c163f7cc23ca..58b4881654a9 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.h
> +++ b/arch/x86/kvm/mmu/tdp_mmu.h
> @@ -74,6 +74,7 @@ static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
>  {
>  	struct kvm_mmu_page *sp;
>  	hpa_t hpa = mmu->root.hpa;
> +	bool is_tdp;
>  
>  	if (WARN_ON(!VALID_PAGE(hpa)))
>  		return false;
> @@ -84,7 +85,10 @@ static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
>  	 * pae_root page, not a shadow page.
>  	 */
>  	sp = to_shadow_page(hpa);
> -	return sp && is_tdp_mmu_page(sp) && sp->root_count;
> +	is_tdp = sp && is_tdp_mmu_page(sp);
> +	WARN_ON(is_tdp && !refcount_read(&sp->tdp_mmu_root_count));
> +
> +	return is_tdp;

I have a series/patch that drops this code entirely, I would rather just go that
route directly.

https://lore.kernel.org/all/20221012181702.3663607-9-seanjc@google.com
Yan Zhao Nov. 11, 2022, 1:40 a.m. UTC | #2
On Thu, Nov 10, 2022 at 04:54:20PM +0000, Sean Christopherson wrote:
> On Thu, Nov 10, 2022, Yan Zhao wrote:
> > kvm mmu uses "if (is_tdp_mmu(vcpu->arch.mmu))" to choose between tdp mmu
> > and shadow path.
> > If a root is a tdp mmu page while its root_count is 0, it's not valid to
> > go to the shadow path.
> > 
> > So, return true and add a warn on zero root count.
> > 
> > Signed-off-by: Yan Zhao <yan.y.zhao@intel.com>
> > ---
> >  arch/x86/kvm/mmu/tdp_mmu.h | 6 +++++-
> >  1 file changed, 5 insertions(+), 1 deletion(-)
> > 
> > diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
> > index c163f7cc23ca..58b4881654a9 100644
> > --- a/arch/x86/kvm/mmu/tdp_mmu.h
> > +++ b/arch/x86/kvm/mmu/tdp_mmu.h
> > @@ -74,6 +74,7 @@ static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
> >  {
> >  	struct kvm_mmu_page *sp;
> >  	hpa_t hpa = mmu->root.hpa;
> > +	bool is_tdp;
> >  
> >  	if (WARN_ON(!VALID_PAGE(hpa)))
> >  		return false;
> > @@ -84,7 +85,10 @@ static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
> >  	 * pae_root page, not a shadow page.
> >  	 */
> >  	sp = to_shadow_page(hpa);
> > -	return sp && is_tdp_mmu_page(sp) && sp->root_count;
> > +	is_tdp = sp && is_tdp_mmu_page(sp);
> > +	WARN_ON(is_tdp && !refcount_read(&sp->tdp_mmu_root_count));
> > +
> > +	return is_tdp;
> 
> I have a series/patch that drops this code entirely, I would rather just go that
> route directly.
> 
> https://lore.kernel.org/all/20221012181702.3663607-9-seanjc@google.com
Thanks for pointing me to this link.
Yes, it's a much better fix!

Thanks
Yan
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index c163f7cc23ca..58b4881654a9 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -74,6 +74,7 @@  static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
 {
 	struct kvm_mmu_page *sp;
 	hpa_t hpa = mmu->root.hpa;
+	bool is_tdp;
 
 	if (WARN_ON(!VALID_PAGE(hpa)))
 		return false;
@@ -84,7 +85,10 @@  static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
 	 * pae_root page, not a shadow page.
 	 */
 	sp = to_shadow_page(hpa);
-	return sp && is_tdp_mmu_page(sp) && sp->root_count;
+	is_tdp = sp && is_tdp_mmu_page(sp);
+	WARN_ON(is_tdp && !refcount_read(&sp->tdp_mmu_root_count));
+
+	return is_tdp;
 }
 #else
 static inline int kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return 0; }