diff mbox series

[RFC,v8,09/56] KVM: SEV: Handle memory backed by restricted memfd

Message ID 20230220183847.59159-10-michael.roth@amd.com (mailing list archive)
State Not Applicable
Delegated to: Herbert Xu
Headers show
Series Add AMD Secure Nested Paging (SEV-SNP) Hypervisor Support | expand

Commit Message

Michael Roth Feb. 20, 2023, 6:38 p.m. UTC
From: Nikunj A Dadhania <nikunj@amd.com>

Do not pin the guest memory backed by a restrictedmem backend, as
pages in the restrictedmem are already pinned. Instead, populate the
pages array for these guests using the already-pinned pages provided by
restrictedmem backend.

Signed-off-by: Nikunj A Dadhania <nikunj@amd.com>
Signed-off-by: Michael Roth <michael.roth@amd.com>
---
 arch/x86/kvm/svm/sev.c | 68 +++++++++++++++++++++++++++++++++++-------
 1 file changed, 58 insertions(+), 10 deletions(-)

Comments

Vlastimil Babka March 3, 2023, 2:05 p.m. UTC | #1
On 2/20/23 19:38, Michael Roth wrote:
> From: Nikunj A Dadhania <nikunj@amd.com>
> 
> Do not pin the guest memory backed by a restrictedmem backend, as
> pages in the restrictedmem are already pinned. Instead, populate the
> pages array for these guests using the already-pinned pages provided by

IIUC the "already pinned" became "effectively unmovable and unevictable"
since the earlier versions, so that would be more accurate now?

> +static int sev_private_mem_get_pages(struct kvm *kvm, unsigned long addr,
> +				     unsigned long size, unsigned long npages,
> +				     struct page **pages)
> +{
> +	return kvm_vm_do_hva_range_op(kvm, addr, addr + size,
> +				      sev_private_mem_get_pages_handler, pages);
> +}
> +
>  /*
>   * Legacy SEV guest pin the pages and return the array populated with pinned
>   * pages.
> + *
> + * SEV guests using restricted memfd backend, pages are already marked as
> + * unmovable and unevictable. Populate the pages array for these guests using
> + * restrictedmem get_pfn.

Right.
Nikunj A. Dadhania March 6, 2023, 11:03 a.m. UTC | #2
On 03/03/23 19:35, Vlastimil Babka wrote:
> On 2/20/23 19:38, Michael Roth wrote:
>> From: Nikunj A Dadhania <nikunj@amd.com>
>>
>> Do not pin the guest memory backed by a restrictedmem backend, as
>> pages in the restrictedmem are already pinned. Instead, populate the
>> pages array for these guests using the already-pinned pages provided by
> 
> IIUC the "already pinned" became "effectively unmovable and unevictable"
> since the earlier versions, so that would be more accurate now?

Yes, that makes sense.

Regards
Nikunj
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 523c78bbff3f..ad9b29ff4590 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -383,9 +383,46 @@  static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
 	return ret;
 }
 
+static int sev_private_mem_get_pages_handler(struct kvm *kvm, struct kvm_gfn_range *range,
+					     void *data)
+{
+	struct kvm_memory_slot *memslot = range->slot;
+	struct page **pages = data;
+	int ret = 0, i = 0;
+	kvm_pfn_t pfn;
+	gfn_t gfn;
+
+	for (gfn = range->start; gfn < range->end; gfn++) {
+		int order;
+
+		ret = kvm_restrictedmem_get_pfn(memslot, gfn, &pfn, &order);
+		if (ret)
+			return ret;
+
+		if (is_error_noslot_pfn(pfn))
+			return -EFAULT;
+
+		pages[i++] = pfn_to_page(pfn);
+	}
+
+	return ret;
+}
+
+static int sev_private_mem_get_pages(struct kvm *kvm, unsigned long addr,
+				     unsigned long size, unsigned long npages,
+				     struct page **pages)
+{
+	return kvm_vm_do_hva_range_op(kvm, addr, addr + size,
+				      sev_private_mem_get_pages_handler, pages);
+}
+
 /*
  * Legacy SEV guest pin the pages and return the array populated with pinned
  * pages.
+ *
+ * SEV guests using restricted memfd backend, pages are already marked as
+ * unmovable and unevictable. Populate the pages array for these guests using
+ * restrictedmem get_pfn.
  */
 static struct page **sev_memory_get_pages(struct kvm *kvm, unsigned long uaddr,
 					  unsigned long ulen, unsigned long *n,
@@ -393,7 +430,7 @@  static struct page **sev_memory_get_pages(struct kvm *kvm, unsigned long uaddr,
 {
 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
 	unsigned long npages, size;
-	int npinned;
+	int npinned = 0;
 	unsigned long locked, lock_limit;
 	struct page **pages;
 	unsigned long first, last;
@@ -429,16 +466,25 @@  static struct page **sev_memory_get_pages(struct kvm *kvm, unsigned long uaddr,
 	if (!pages)
 		return ERR_PTR(-ENOMEM);
 
-	/* Pin the user virtual address. */
-	npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
-	if (npinned != npages) {
-		pr_err("SEV: Failure locking %lu pages.\n", npages);
-		ret = -ENOMEM;
-		goto err;
+	if (kvm_arch_has_private_mem(kvm)) {
+		/* Get the PFN from memfile */
+		if (sev_private_mem_get_pages(kvm, uaddr, ulen, npages, pages)) {
+			pr_err("%s: ERROR: unable to find slot for uaddr %lx", __func__, uaddr);
+			ret = -ENOMEM;
+			goto err;
+		}
+	} else {
+		/* Pin the user virtual address. */
+		npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
+		if (npinned != npages) {
+			pr_err("SEV: Failure locking %lu pages.\n", npages);
+			ret = -ENOMEM;
+			goto err;
+		}
+		sev->pages_locked = locked;
 	}
 
 	*n = npages;
-	sev->pages_locked = locked;
 
 	return pages;
 
@@ -455,9 +501,11 @@  static void sev_memory_put_pages(struct kvm *kvm, struct page **pages,
 {
 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
 
-	unpin_user_pages(pages, npages);
+	if (!kvm_arch_has_private_mem(kvm)) {
+		unpin_user_pages(pages, npages);
+		sev->pages_locked -= npages;
+	}
 	kvfree(pages);
-	sev->pages_locked -= npages;
 }
 
 static void sev_clflush_pages(struct page *pages[], unsigned long npages)