diff mbox series

[RFC,02/15] KVM: x86/mmu: Rename __rmap_write_protect to rmap_write_protect

Message ID 20211119235759.1304274-3-dmatlack@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/mmu: Eager Page Splitting for the TDP MMU | expand

Commit Message

David Matlack Nov. 19, 2021, 11:57 p.m. UTC
Now that rmap_write_protect has been renamed, there is no need for the
double underscores in front of __rmap_write_protect.

No functional change intended.

Signed-off-by: David Matlack <dmatlack@google.com>
---
 arch/x86/kvm/mmu/mmu.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

Comments

Ben Gardon Nov. 22, 2021, 6:52 p.m. UTC | #1
On Fri, Nov 19, 2021 at 3:58 PM David Matlack <dmatlack@google.com> wrote:
>
> Now that rmap_write_protect has been renamed, there is no need for the
> double underscores in front of __rmap_write_protect.
>
> No functional change intended.
>
> Signed-off-by: David Matlack <dmatlack@google.com>

Reviewed-by: Ben Gardon <bgardon@google.com>


> ---
>  arch/x86/kvm/mmu/mmu.c | 12 ++++++------
>  1 file changed, 6 insertions(+), 6 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 16ffb571bc75..1146f87044a6 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -1235,9 +1235,9 @@ static bool spte_write_protect(u64 *sptep, bool pt_protect)
>         return mmu_spte_update(sptep, spte);
>  }
>
> -static bool __rmap_write_protect(struct kvm *kvm,
> -                                struct kvm_rmap_head *rmap_head,
> -                                bool pt_protect)
> +static bool rmap_write_protect(struct kvm *kvm,
> +                              struct kvm_rmap_head *rmap_head,
> +                              bool pt_protect)
>  {
>         u64 *sptep;
>         struct rmap_iterator iter;
> @@ -1317,7 +1317,7 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
>         while (mask) {
>                 rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
>                                         PG_LEVEL_4K, slot);
> -               __rmap_write_protect(kvm, rmap_head, false);
> +               rmap_write_protect(kvm, rmap_head, false);
>
>                 /* clear the first set bit */
>                 mask &= mask - 1;
> @@ -1416,7 +1416,7 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
>         if (kvm_memslots_have_rmaps(kvm)) {
>                 for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
>                         rmap_head = gfn_to_rmap(gfn, i, slot);
> -                       write_protected |= __rmap_write_protect(kvm, rmap_head, true);
> +                       write_protected |= rmap_write_protect(kvm, rmap_head, true);
>                 }
>         }
>
> @@ -5780,7 +5780,7 @@ static bool slot_rmap_write_protect(struct kvm *kvm,
>                                     struct kvm_rmap_head *rmap_head,
>                                     const struct kvm_memory_slot *slot)
>  {
> -       return __rmap_write_protect(kvm, rmap_head, false);
> +       return rmap_write_protect(kvm, rmap_head, false);
>  }
>
>  void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
> --
> 2.34.0.rc2.393.gf8c9666880-goog
>
Peter Xu Nov. 26, 2021, 12:18 p.m. UTC | #2
On Fri, Nov 19, 2021 at 11:57:46PM +0000, David Matlack wrote:
> Now that rmap_write_protect has been renamed, there is no need for the
> double underscores in front of __rmap_write_protect.
> 
> No functional change intended.
> 
> Signed-off-by: David Matlack <dmatlack@google.com>

Reviewed-by: Peter Xu <peterx@redhat.com>
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 16ffb571bc75..1146f87044a6 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1235,9 +1235,9 @@  static bool spte_write_protect(u64 *sptep, bool pt_protect)
 	return mmu_spte_update(sptep, spte);
 }
 
-static bool __rmap_write_protect(struct kvm *kvm,
-				 struct kvm_rmap_head *rmap_head,
-				 bool pt_protect)
+static bool rmap_write_protect(struct kvm *kvm,
+			       struct kvm_rmap_head *rmap_head,
+			       bool pt_protect)
 {
 	u64 *sptep;
 	struct rmap_iterator iter;
@@ -1317,7 +1317,7 @@  static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
 	while (mask) {
 		rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
 					PG_LEVEL_4K, slot);
-		__rmap_write_protect(kvm, rmap_head, false);
+		rmap_write_protect(kvm, rmap_head, false);
 
 		/* clear the first set bit */
 		mask &= mask - 1;
@@ -1416,7 +1416,7 @@  bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
 	if (kvm_memslots_have_rmaps(kvm)) {
 		for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
 			rmap_head = gfn_to_rmap(gfn, i, slot);
-			write_protected |= __rmap_write_protect(kvm, rmap_head, true);
+			write_protected |= rmap_write_protect(kvm, rmap_head, true);
 		}
 	}
 
@@ -5780,7 +5780,7 @@  static bool slot_rmap_write_protect(struct kvm *kvm,
 				    struct kvm_rmap_head *rmap_head,
 				    const struct kvm_memory_slot *slot)
 {
-	return __rmap_write_protect(kvm, rmap_head, false);
+	return rmap_write_protect(kvm, rmap_head, false);
 }
 
 void kvm_mmu_slot_remove_write_access(struct kvm *kvm,