diff mbox series

[v2,08/15] KVM: x86/tdp_mmu: Introduce KVM MMU root types to specify page table type

Message ID 20240530210714.364118-9-rick.p.edgecombe@intel.com (mailing list archive)
State New, archived
Headers show
Series TDX MMU prep series part 1 | expand

Commit Message

Rick Edgecombe May 30, 2024, 9:07 p.m. UTC
From: Isaku Yamahata <isaku.yamahata@intel.com>

Define an enum kvm_tdp_mmu_root_types to specify the KVM MMU root type [1]
so that the iterator on the root page table can consistently filter the
root page table type instead of only_valid.

TDX KVM will operate on KVM page tables with specified types.  Shared page
table, private page table, or both.  Introduce an enum instead of bool
only_valid so that we can easily enhance page table types applicable to
shared, private, or both in addition to valid or not.  Replace
only_valid=false with KVM_ANY_ROOTS and only_valid=true with
KVM_ANY_VALID_ROOTS.  Use KVM_ANY_ROOTS and KVM_ANY_VALID_ROOTS to wrap
KVM_VALID_ROOTS to avoid further code churn when direct vs mirror root
concepts are introduced in future patches.

Link: https://lore.kernel.org/kvm/ZivazWQw1oCU8VBC@google.com/ [1]
Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
---
TDX MMU Prep:
 - Newly introduced.
---
 arch/x86/kvm/mmu/tdp_mmu.c | 39 +++++++++++++++++++-------------------
 arch/x86/kvm/mmu/tdp_mmu.h |  7 +++++++
 2 files changed, 27 insertions(+), 19 deletions(-)

Comments

Paolo Bonzini June 7, 2024, 8:10 a.m. UTC | #1
On Thu, May 30, 2024 at 11:07 PM Rick Edgecombe
<rick.p.edgecombe@intel.com> wrote:
> +enum kvm_tdp_mmu_root_types {
> +       KVM_VALID_ROOTS = BIT(0),
> +
> +       KVM_ANY_ROOTS = 0,
> +       KVM_ANY_VALID_ROOTS = KVM_VALID_ROOTS,

I would instead define it as

    KVM_INVALID_ROOTS = BIT(0),
    KVM_VALID_ROOTS = BIT(1),
    KVM_ALL_ROOTS = KVM_VALID_ROOTS | KVM_INVALID_ROOTS,

and then

  if (root->role.invalid)
    return types & KVM_INVALID_ROOTS;
  else
    return types & KVM_VALID_ROOTS;

Then in the next patch you can do

     KVM_INVALID_ROOTS = BIT(0),
-    KVM_VALID_ROOTS = BIT(1),
+   KVM_DIRECT_ROOTS = BIT(1),
+   KVM_MIRROR_ROOTS = BIT(2),
+   KVM_VALID_ROOTS = KVM_DIRECT_ROOTS | KVM_MIRROR_ROOTS,
     KVM_ALL_ROOTS = KVM_VALID_ROOTS | KVM_INVALID_ROOTS,

and likewise

  if (root->role.invalid)
    return types & KVM_INVALID_ROOTS;
  else if (likely(!is_mirror_sp(root)))
    return types & KVM_DIRECT_ROOTS;
  else
    return types & KVM_MIRROR_ROOTS;

This removes the need for KVM_ANY_VALID_ROOTS (btw I don't know if
it's me, but ALL sounds more grammatical than ANY in this context). So
the resulting code should be a bit clearer.

Apart from this small tweak, the overall idea is really good.

Paolo

> +};
> +
>  bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush);
>  bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
>  void kvm_tdp_mmu_zap_all(struct kvm *kvm);
> --
> 2.34.1
>
Rick Edgecombe June 7, 2024, 8:06 p.m. UTC | #2
On Fri, 2024-06-07 at 10:10 +0200, Paolo Bonzini wrote:
> On Thu, May 30, 2024 at 11:07 PM Rick Edgecombe
> <rick.p.edgecombe@intel.com> wrote:
> > +enum kvm_tdp_mmu_root_types {
> > +       KVM_VALID_ROOTS = BIT(0),
> > +
> > +       KVM_ANY_ROOTS = 0,
> > +       KVM_ANY_VALID_ROOTS = KVM_VALID_ROOTS,
> 
> I would instead define it as
> 
>     KVM_INVALID_ROOTS = BIT(0),
>     KVM_VALID_ROOTS = BIT(1),
>     KVM_ALL_ROOTS = KVM_VALID_ROOTS | KVM_INVALID_ROOTS,
> 
> and then
> 
>   if (root->role.invalid)
>     return types & KVM_INVALID_ROOTS;
>   else
>     return types & KVM_VALID_ROOTS;
> 
> Then in the next patch you can do
> 
>      KVM_INVALID_ROOTS = BIT(0),
> -    KVM_VALID_ROOTS = BIT(1),
> +   KVM_DIRECT_ROOTS = BIT(1),
> +   KVM_MIRROR_ROOTS = BIT(2),
> +   KVM_VALID_ROOTS = KVM_DIRECT_ROOTS | KVM_MIRROR_ROOTS,
>      KVM_ALL_ROOTS = KVM_VALID_ROOTS | KVM_INVALID_ROOTS,
> 
> and likewise
> 
>   if (root->role.invalid)
>     return types & KVM_INVALID_ROOTS;
>   else if (likely(!is_mirror_sp(root)))
>     return types & KVM_DIRECT_ROOTS;
>   else
>     return types & KVM_MIRROR_ROOTS;
> 
> This removes the need for KVM_ANY_VALID_ROOTS (btw I don't know if
> it's me, but ALL sounds more grammatical than ANY in this context). So
> the resulting code should be a bit clearer.
> 
> Apart from this small tweak, the overall idea is really good.

Yes, this makes more sense. Thanks.
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index d49abf1e3f37..5e8f652cd8b1 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -92,9 +92,10 @@  void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
 	call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
 }
 
-static bool tdp_mmu_root_match(struct kvm_mmu_page *root, bool only_valid)
+static bool tdp_mmu_root_match(struct kvm_mmu_page *root,
+			       enum kvm_tdp_mmu_root_types types)
 {
-	if (only_valid && root->role.invalid)
+	if ((types & KVM_VALID_ROOTS) && root->role.invalid)
 		return false;
 
 	return true;
@@ -102,17 +103,17 @@  static bool tdp_mmu_root_match(struct kvm_mmu_page *root, bool only_valid)
 
 /*
  * Returns the next root after @prev_root (or the first root if @prev_root is
- * NULL).  A reference to the returned root is acquired, and the reference to
- * @prev_root is released (the caller obviously must hold a reference to
- * @prev_root if it's non-NULL).
+ * NULL) that matches with @types.  A reference to the returned root is
+ * acquired, and the reference to @prev_root is released (the caller obviously
+ * must hold a reference to @prev_root if it's non-NULL).
  *
- * If @only_valid is true, invalid roots are skipped.
+ * Roots that doesn't match with @types are skipped.
  *
  * Returns NULL if the end of tdp_mmu_roots was reached.
  */
 static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
 					      struct kvm_mmu_page *prev_root,
-					      bool only_valid)
+					      enum kvm_tdp_mmu_root_types types)
 {
 	struct kvm_mmu_page *next_root;
 
@@ -133,7 +134,7 @@  static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
 						   typeof(*next_root), link);
 
 	while (next_root) {
-		if (tdp_mmu_root_match(next_root, only_valid) &&
+		if (tdp_mmu_root_match(next_root, types) &&
 		    kvm_tdp_mmu_get_root(next_root))
 			break;
 
@@ -158,20 +159,20 @@  static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
  * If shared is set, this function is operating under the MMU lock in read
  * mode.
  */
-#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _only_valid)	\
-	for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid);		\
+#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _types)	\
+	for (_root = tdp_mmu_next_root(_kvm, NULL, _types);		\
 	     ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root;		\
-	     _root = tdp_mmu_next_root(_kvm, _root, _only_valid))		\
+	     _root = tdp_mmu_next_root(_kvm, _root, _types))		\
 		if (_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) {	\
 		} else
 
 #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id)	\
-	__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, true)
+	__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, KVM_ANY_VALID_ROOTS)
 
 #define for_each_tdp_mmu_root_yield_safe(_kvm, _root)			\
-	for (_root = tdp_mmu_next_root(_kvm, NULL, false);		\
+	for (_root = tdp_mmu_next_root(_kvm, NULL, KVM_ANY_ROOTS);		\
 	     ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root;	\
-	     _root = tdp_mmu_next_root(_kvm, _root, false))
+	     _root = tdp_mmu_next_root(_kvm, _root, KVM_ANY_ROOTS))
 
 /*
  * Iterate over all TDP MMU roots.  Requires that mmu_lock be held for write,
@@ -180,18 +181,18 @@  static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
  * Holding mmu_lock for write obviates the need for RCU protection as the list
  * is guaranteed to be stable.
  */
-#define __for_each_tdp_mmu_root(_kvm, _root, _as_id, _only_valid)		\
+#define __for_each_tdp_mmu_root(_kvm, _root, _as_id, _types)			\
 	list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)		\
 		if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) &&		\
 		    ((_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) ||	\
-		     !tdp_mmu_root_match((_root), (_only_valid)))) {		\
+		     !tdp_mmu_root_match((_root), (_types)))) {			\
 		} else
 
 #define for_each_tdp_mmu_root(_kvm, _root, _as_id)			\
-	__for_each_tdp_mmu_root(_kvm, _root, _as_id, false)
+	__for_each_tdp_mmu_root(_kvm, _root, _as_id, KVM_ANY_ROOTS)
 
 #define for_each_valid_tdp_mmu_root(_kvm, _root, _as_id)		\
-	__for_each_tdp_mmu_root(_kvm, _root, _as_id, true)
+	__for_each_tdp_mmu_root(_kvm, _root, _as_id, KVM_ANY_VALID_ROOTS)
 
 static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)
 {
@@ -1196,7 +1197,7 @@  bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
 {
 	struct kvm_mmu_page *root;
 
-	__for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false)
+	__for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, KVM_ANY_ROOTS)
 		flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end,
 					  range->may_block, flush);
 
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index 437ddd4937a9..e7055a5333a8 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -19,6 +19,13 @@  __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
 
 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root);
 
+enum kvm_tdp_mmu_root_types {
+	KVM_VALID_ROOTS = BIT(0),
+
+	KVM_ANY_ROOTS = 0,
+	KVM_ANY_VALID_ROOTS = KVM_VALID_ROOTS,
+};
+
 bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush);
 bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
 void kvm_tdp_mmu_zap_all(struct kvm *kvm);