diff mbox series

[RFC,v1,15/26] KVM: arm64: Rename kvm_pinned_page to kvm_guest_page

Message ID 20240222161047.402609-16-tabba@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: Restricted mapping of guest_memfd at the host and pKVM/arm64 support | expand

Commit Message

Fuad Tabba Feb. 22, 2024, 4:10 p.m. UTC
With guestmem, pages won't be pinned. Change the name of the
structure to reflect that.

No functional change intended.

Signed-off-by: Fuad Tabba <tabba@google.com>
---
 arch/arm64/include/asm/kvm_host.h |  2 +-
 arch/arm64/kvm/mmu.c              | 12 ++++++------
 arch/arm64/kvm/pkvm.c             | 10 +++++-----
 3 files changed, 12 insertions(+), 12 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index fb7aff14fd1a..99bf2b534ff8 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -206,7 +206,7 @@  struct kvm_smccc_features {
 	unsigned long vendor_hyp_bmap;
 };
 
-struct kvm_pinned_page {
+struct kvm_guest_page {
 	struct rb_node		node;
 	struct page		*page;
 	u64			ipa;
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index f796e092a921..ae6f65717178 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -336,7 +336,7 @@  static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 si
 
 static void pkvm_stage2_flush(struct kvm *kvm)
 {
-	struct kvm_pinned_page *ppage;
+	struct kvm_guest_page *ppage;
 	struct rb_node *node;
 
 	/*
@@ -346,7 +346,7 @@  static void pkvm_stage2_flush(struct kvm *kvm)
 	 * destroy (which only occurs when all vcpu are gone).
 	 */
 	for (node = rb_first(&kvm->arch.pkvm.pinned_pages); node; node = rb_next(node)) {
-		ppage = rb_entry(node, struct kvm_pinned_page, node);
+		ppage = rb_entry(node, struct kvm_guest_page, node);
 		__clean_dcache_guest_page(page_address(ppage->page), PAGE_SIZE);
 		cond_resched_rwlock_write(&kvm->mmu_lock);
 	}
@@ -1416,8 +1416,8 @@  static int pkvm_host_map_guest(u64 pfn, u64 gfn)
 
 static int cmp_ppages(struct rb_node *node, const struct rb_node *parent)
 {
-	struct kvm_pinned_page *a = container_of(node, struct kvm_pinned_page, node);
-	struct kvm_pinned_page *b = container_of(parent, struct kvm_pinned_page, node);
+	struct kvm_guest_page *a = container_of(node, struct kvm_guest_page, node);
+	struct kvm_guest_page *b = container_of(parent, struct kvm_guest_page, node);
 
 	if (a->ipa < b->ipa)
 		return -1;
@@ -1426,7 +1426,7 @@  static int cmp_ppages(struct rb_node *node, const struct rb_node *parent)
 	return 0;
 }
 
-static int insert_ppage(struct kvm *kvm, struct kvm_pinned_page *ppage)
+static int insert_ppage(struct kvm *kvm, struct kvm_guest_page *ppage)
 {
 	if (rb_find_add(&ppage->node, &kvm->arch.pkvm.pinned_pages, cmp_ppages))
 		return -EEXIST;
@@ -1440,7 +1440,7 @@  static int pkvm_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	struct kvm_hyp_memcache *hyp_memcache = &vcpu->arch.pkvm_memcache;
 	struct mm_struct *mm = current->mm;
 	unsigned int flags = FOLL_HWPOISON | FOLL_LONGTERM | FOLL_WRITE;
-	struct kvm_pinned_page *ppage;
+	struct kvm_guest_page *ppage;
 	struct kvm *kvm = vcpu->kvm;
 	struct kvm_s2_mmu *mmu =  &kvm->arch.mmu;
 	struct page *page;
diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c
index 713bbb023177..0dbde37d21d0 100644
--- a/arch/arm64/kvm/pkvm.c
+++ b/arch/arm64/kvm/pkvm.c
@@ -26,7 +26,7 @@  phys_addr_t hyp_mem_size;
 
 static int rb_ppage_cmp(const void *key, const struct rb_node *node)
 {
-       struct kvm_pinned_page *p = container_of(node, struct kvm_pinned_page, node);
+       struct kvm_guest_page *p = container_of(node, struct kvm_guest_page, node);
        phys_addr_t ipa = (phys_addr_t)key;
 
        return (ipa < p->ipa) ? -1 : (ipa > p->ipa);
@@ -254,7 +254,7 @@  static bool pkvm_teardown_vm(struct kvm *host_kvm)
 
 void pkvm_destroy_hyp_vm(struct kvm *host_kvm)
 {
-	struct kvm_pinned_page *ppage;
+	struct kvm_guest_page *ppage;
 	struct mm_struct *mm = current->mm;
 	struct rb_node *node;
 	unsigned long pages = 0;
@@ -266,7 +266,7 @@  void pkvm_destroy_hyp_vm(struct kvm *host_kvm)
 
 	node = rb_first(&host_kvm->arch.pkvm.pinned_pages);
 	while (node) {
-		ppage = rb_entry(node, struct kvm_pinned_page, node);
+		ppage = rb_entry(node, struct kvm_guest_page, node);
 		WARN_ON(kvm_call_hyp_nvhe(__pkvm_host_reclaim_page,
 					  page_to_pfn(ppage->page)));
 		cond_resched();
@@ -341,7 +341,7 @@  device_initcall_sync(finalize_pkvm);
 
 void pkvm_host_reclaim_page(struct kvm *host_kvm, phys_addr_t ipa)
 {
-	struct kvm_pinned_page *ppage;
+	struct kvm_guest_page *ppage;
 	struct mm_struct *mm = current->mm;
 	struct rb_node *node;
 
@@ -356,7 +356,7 @@  void pkvm_host_reclaim_page(struct kvm *host_kvm, phys_addr_t ipa)
 	if (!node)
 		return;
 
-	ppage = container_of(node, struct kvm_pinned_page, node);
+	ppage = container_of(node, struct kvm_guest_page, node);
 
 	WARN_ON(kvm_call_hyp_nvhe(__pkvm_host_reclaim_page,
 				  page_to_pfn(ppage->page)));