diff mbox series

[v2,1/9] KVM: x86/mmu: Move implementation of make_spte to a helper

Message ID 20220321224358.1305530-2-bgardon@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/MMU: Optimize disabling dirty logging | expand

Commit Message

Ben Gardon March 21, 2022, 10:43 p.m. UTC
Move the implementation of make_spte to a helper function. This will
facilitate factoring out all uses of the vCPU pointer from the helper
in subsequent commits.

No functional change intended.

Signed-off-by: Ben Gardon <bgardon@google.com>
---
 arch/x86/kvm/mmu/spte.c | 20 +++++++++++++++-----
 arch/x86/kvm/mmu/spte.h |  4 ++++
 2 files changed, 19 insertions(+), 5 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
index 4739b53c9734..d3da0d3d41cb 100644
--- a/arch/x86/kvm/mmu/spte.c
+++ b/arch/x86/kvm/mmu/spte.c
@@ -90,11 +90,10 @@  static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
 				     E820_TYPE_RAM);
 }
 
-bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
-	       const struct kvm_memory_slot *slot,
-	       unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
-	       u64 old_spte, bool prefetch, bool can_unsync,
-	       bool host_writable, u64 *new_spte)
+bool __make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+		 const struct kvm_memory_slot *slot, unsigned int pte_access,
+		 gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool prefetch,
+		 bool can_unsync, bool host_writable, u64 *new_spte)
 {
 	int level = sp->role.level;
 	u64 spte = SPTE_MMU_PRESENT_MASK;
@@ -192,6 +191,17 @@  bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 	return wrprot;
 }
 
+bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+	       const struct kvm_memory_slot *slot,
+	       unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
+	       u64 old_spte, bool prefetch, bool can_unsync,
+	       bool host_writable, u64 *new_spte)
+{
+	return __make_spte(vcpu, sp, slot, pte_access, gfn, pfn, old_spte,
+			   prefetch, can_unsync, host_writable, new_spte);
+
+}
+
 static u64 make_spte_executable(u64 spte)
 {
 	bool is_access_track = is_access_track_spte(spte);
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index 73f12615416f..3fae3c3124f7 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -410,6 +410,10 @@  static inline u64 get_mmio_spte_generation(u64 spte)
 	return gen;
 }
 
+bool __make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+		 const struct kvm_memory_slot *slot, unsigned int pte_access,
+		 gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool prefetch,
+		 bool can_unsync, bool host_writable, u64 *new_spte);
 bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 	       const struct kvm_memory_slot *slot,
 	       unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,