diff mbox series

[1/5] KVM: gmem: Don't rely on __kvm_gmem_get_pfn() for preparedness

Message ID 20241212063635.712877-2-michael.roth@amd.com (mailing list archive)
State New
Headers show
Series KVM: gmem: 2MB THP support and preparedness tracking changes | expand

Commit Message

Michael Roth Dec. 12, 2024, 6:36 a.m. UTC
Currently __kvm_gmem_get_pfn() sets 'is_prepared' so callers can skip
calling kvm_gmem_prepare_folio(). However, subsequent patches will
introduce some locking constraints around setting/checking preparedness
that will require filemap_invalidate_lock*() to be held while checking
for preparedness. This locking could theoretically be done inside
__kvm_gmem_get_pfn(), or by requiring that filemap_invalidate_lock*() is
held while calling __kvm_gmem_get_pfn(), but that places unnecessary
constraints around when __kvm_gmem_get_pfn() can be called, whereas
callers could just as easily call kvm_gmem_is_prepared() directly.

So, in preparation for these locking changes, drop the 'is_prepared'
argument, and leave it up to callers to handle checking preparedness
where needed and with the proper locking constraints.

Signed-off-by: Michael Roth <michael.roth@amd.com>
---
 virt/kvm/guest_memfd.c | 13 +++++--------
 1 file changed, 5 insertions(+), 8 deletions(-)
diff mbox series

Patch

diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index b69af3580bef..aa0038ddf4a4 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -773,7 +773,7 @@  void kvm_gmem_unbind(struct kvm_memory_slot *slot)
 static struct folio *__kvm_gmem_get_pfn(struct file *file,
 					struct kvm_memory_slot *slot,
 					pgoff_t index, kvm_pfn_t *pfn,
-					bool *is_prepared, int *max_order)
+					int *max_order)
 {
 	struct kvm_gmem *gmem = file->private_data;
 	struct folio *folio;
@@ -803,7 +803,6 @@  static struct folio *__kvm_gmem_get_pfn(struct file *file,
 	if (max_order)
 		*max_order = 0;
 
-	*is_prepared = kvm_gmem_is_prepared(file, index, folio);
 	return folio;
 }
 
@@ -814,19 +813,18 @@  int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
 	pgoff_t index = kvm_gmem_get_index(slot, gfn);
 	struct file *file = kvm_gmem_get_file(slot);
 	struct folio *folio;
-	bool is_prepared = false;
 	int r = 0;
 
 	if (!file)
 		return -EFAULT;
 
-	folio = __kvm_gmem_get_pfn(file, slot, index, pfn, &is_prepared, max_order);
+	folio = __kvm_gmem_get_pfn(file, slot, index, pfn, max_order);
 	if (IS_ERR(folio)) {
 		r = PTR_ERR(folio);
 		goto out;
 	}
 
-	if (!is_prepared)
+	if (kvm_gmem_is_prepared(file, index, folio))
 		r = kvm_gmem_prepare_folio(kvm, file, slot, gfn, folio);
 
 	folio_unlock(folio);
@@ -872,7 +870,6 @@  long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
 		struct folio *folio;
 		gfn_t gfn = start_gfn + i;
 		pgoff_t index = kvm_gmem_get_index(slot, gfn);
-		bool is_prepared = false;
 		kvm_pfn_t pfn;
 
 		if (signal_pending(current)) {
@@ -880,13 +877,13 @@  long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
 			break;
 		}
 
-		folio = __kvm_gmem_get_pfn(file, slot, index, &pfn, &is_prepared, &max_order);
+		folio = __kvm_gmem_get_pfn(file, slot, index, &pfn, &max_order);
 		if (IS_ERR(folio)) {
 			ret = PTR_ERR(folio);
 			break;
 		}
 
-		if (is_prepared) {
+		if (kvm_gmem_is_prepared(file, index, folio)) {
 			folio_unlock(folio);
 			folio_put(folio);
 			ret = -EEXIST;