diff mbox series

[v7,2/7] kvmppc: Shared pages support for secure guests

Message ID 20190822102620.21897-3-bharata@linux.ibm.com (mailing list archive)
State New, archived
Headers show
Series KVMPPC driver to manage secure guest pages | expand

Commit Message

Bharata B Rao Aug. 22, 2019, 10:26 a.m. UTC
A secure guest will share some of its pages with hypervisor (Eg. virtio
bounce buffers etc). Support sharing of pages between hypervisor and
ultravisor.

Once a secure page is converted to shared page, the device page is
unmapped from the HV side page tables.

Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
---
 arch/powerpc/include/asm/hvcall.h |  3 ++
 arch/powerpc/kvm/book3s_hv_devm.c | 70 +++++++++++++++++++++++++++++--
 2 files changed, 69 insertions(+), 4 deletions(-)

Comments

Sukadev Bhattiprolu Aug. 29, 2019, 3:04 a.m. UTC | #1
> A secure guest will share some of its pages with hypervisor (Eg. virtio
> bounce buffers etc). Support sharing of pages between hypervisor and
> ultravisor.
> 
> Once a secure page is converted to shared page, the device page is
> unmapped from the HV side page tables.
> 
> Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
> ---
>  arch/powerpc/include/asm/hvcall.h |  3 ++
>  arch/powerpc/kvm/book3s_hv_devm.c | 70 +++++++++++++++++++++++++++++--
>  2 files changed, 69 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
> index 2f6b952deb0f..05b8536f6653 100644
> --- a/arch/powerpc/include/asm/hvcall.h
> +++ b/arch/powerpc/include/asm/hvcall.h
> @@ -337,6 +337,9 @@
>  #define H_TLB_INVALIDATE	0xF808
>  #define H_COPY_TOFROM_GUEST	0xF80C
> 
> +/* Flags for H_SVM_PAGE_IN */
> +#define H_PAGE_IN_SHARED        0x1
> +
>  /* Platform-specific hcalls used by the Ultravisor */
>  #define H_SVM_PAGE_IN		0xEF00
>  #define H_SVM_PAGE_OUT		0xEF04
> diff --git a/arch/powerpc/kvm/book3s_hv_devm.c b/arch/powerpc/kvm/book3s_hv_devm.c
> index 13722f27fa7d..6a3229b78fed 100644
> --- a/arch/powerpc/kvm/book3s_hv_devm.c
> +++ b/arch/powerpc/kvm/book3s_hv_devm.c
> @@ -46,6 +46,7 @@ struct kvmppc_devm_page_pvt {
>  	unsigned long *rmap;
>  	unsigned int lpid;
>  	unsigned long gpa;
> +	bool skip_page_out;
>  };
> 
>  /*
> @@ -139,6 +140,54 @@ kvmppc_devm_migrate_alloc_and_copy(struct migrate_vma *mig,
>  	return 0;
>  }
> 
> +/*
> + * Shares the page with HV, thus making it a normal page.
> + *
> + * - If the page is already secure, then provision a new page and share
> + * - If the page is a normal page, share the existing page
> + *
> + * In the former case, uses the dev_pagemap_ops migrate_to_ram handler
> + * to unmap the device page from QEMU's page tables.
> + */
> +static unsigned long
> +kvmppc_share_page(struct kvm *kvm, unsigned long gpa, unsigned long page_shift)
> +{
> +
> +	int ret = H_PARAMETER;
> +	struct page *devm_page;
> +	struct kvmppc_devm_page_pvt *pvt;
> +	unsigned long pfn;
> +	unsigned long *rmap;
> +	struct kvm_memory_slot *slot;
> +	unsigned long gfn = gpa >> page_shift;
> +	int srcu_idx;
> +
> +	srcu_idx = srcu_read_lock(&kvm->srcu);
> +	slot = gfn_to_memslot(kvm, gfn);
> +	if (!slot)
> +		goto out;
> +
> +	rmap = &slot->arch.rmap[gfn - slot->base_gfn];
> +	if (kvmppc_rmap_is_devm_pfn(*rmap)) {
> +		devm_page = pfn_to_page(*rmap & ~KVMPPC_RMAP_DEVM_PFN);
> +		pvt = (struct kvmppc_devm_page_pvt *)
> +			devm_page->zone_device_data;
> +		pvt->skip_page_out = true;
> +	}
> +
> +	pfn = gfn_to_pfn(kvm, gpa >> page_shift);

Use 'gfn'?

> +	if (is_error_noslot_pfn(pfn))
> +		goto out;
> +
> +	ret = uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0, page_shift);
> +	if (ret == U_SUCCESS)
> +		ret = H_SUCCESS;
> +	kvm_release_pfn_clean(pfn);

Nit: Blank line?
> +out:
> +	srcu_read_unlock(&kvm->srcu, srcu_idx);
> +	return ret;
> +}
> +
>  /*
>   * Move page from normal memory to secure memory.
>   */
> @@ -159,9 +208,12 @@ kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
>  	if (page_shift != PAGE_SHIFT)
>  		return H_P3;
> 
> -	if (flags)
> +	if (flags & ~H_PAGE_IN_SHARED)
>  		return H_P2;
> 
> +	if (flags & H_PAGE_IN_SHARED)
> +		return kvmppc_share_page(kvm, gpa, page_shift);
> +
>  	ret = H_PARAMETER;
>  	down_read(&kvm->mm->mmap_sem);
>  	srcu_idx = srcu_read_lock(&kvm->srcu);
> @@ -211,7 +263,7 @@ kvmppc_devm_fault_migrate_alloc_and_copy(struct migrate_vma *mig,
>  	struct page *dpage, *spage;
>  	struct kvmppc_devm_page_pvt *pvt;
>  	unsigned long pfn;
> -	int ret;
> +	int ret = U_SUCCESS;
> 
>  	spage = migrate_pfn_to_page(*mig->src);
>  	if (!spage || !(*mig->src & MIGRATE_PFN_MIGRATE))
> @@ -226,8 +278,18 @@ kvmppc_devm_fault_migrate_alloc_and_copy(struct migrate_vma *mig,
>  	pvt = spage->zone_device_data;
> 
>  	pfn = page_to_pfn(dpage);
> -	ret = uv_page_out(pvt->lpid, pfn << page_shift, pvt->gpa, 0,
> -			  page_shift);
> +
> +	/*
> +	 * This same function is used in two cases:

Nit: s/same//

> +	 * - When HV touches a secure page, for which we do page-out

Better to qualify page-out with "uv page-out"? its kind of counterintuitive
to do a page-out on a fault!

> +	 * - When a secure page is converted to shared page, we touch
> +	 *   the page to essentially unmap the device page. In this
> +	 *   case we skip page-out.
> +	 */
> +	if (!pvt->skip_page_out)
> +		ret = uv_page_out(pvt->lpid, pfn << page_shift, pvt->gpa, 0,
> +				  page_shift);
> +
>  	if (ret == U_SUCCESS)
>  		*mig->dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
>  	else {
> -- 
> 2.21.0
Bharata B Rao Aug. 29, 2019, 6:58 a.m. UTC | #2
On Wed, Aug 28, 2019 at 08:04:43PM -0700, Sukadev Bhattiprolu wrote:
> > A secure guest will share some of its pages with hypervisor (Eg. virtio
> > bounce buffers etc). Support sharing of pages between hypervisor and
> > ultravisor.
> > 
> > Once a secure page is converted to shared page, the device page is
> > unmapped from the HV side page tables.
> > 
> > Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
> > ---
> >  arch/powerpc/include/asm/hvcall.h |  3 ++
> >  arch/powerpc/kvm/book3s_hv_devm.c | 70 +++++++++++++++++++++++++++++--
> >  2 files changed, 69 insertions(+), 4 deletions(-)
> > 
> > diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
> > index 2f6b952deb0f..05b8536f6653 100644
> > --- a/arch/powerpc/include/asm/hvcall.h
> > +++ b/arch/powerpc/include/asm/hvcall.h
> > @@ -337,6 +337,9 @@
> >  #define H_TLB_INVALIDATE	0xF808
> >  #define H_COPY_TOFROM_GUEST	0xF80C
> > 
> > +/* Flags for H_SVM_PAGE_IN */
> > +#define H_PAGE_IN_SHARED        0x1
> > +
> >  /* Platform-specific hcalls used by the Ultravisor */
> >  #define H_SVM_PAGE_IN		0xEF00
> >  #define H_SVM_PAGE_OUT		0xEF04
> > diff --git a/arch/powerpc/kvm/book3s_hv_devm.c b/arch/powerpc/kvm/book3s_hv_devm.c
> > index 13722f27fa7d..6a3229b78fed 100644
> > --- a/arch/powerpc/kvm/book3s_hv_devm.c
> > +++ b/arch/powerpc/kvm/book3s_hv_devm.c
> > @@ -46,6 +46,7 @@ struct kvmppc_devm_page_pvt {
> >  	unsigned long *rmap;
> >  	unsigned int lpid;
> >  	unsigned long gpa;
> > +	bool skip_page_out;
> >  };
> > 
> >  /*
> > @@ -139,6 +140,54 @@ kvmppc_devm_migrate_alloc_and_copy(struct migrate_vma *mig,
> >  	return 0;
> >  }
> > 
> > +/*
> > + * Shares the page with HV, thus making it a normal page.
> > + *
> > + * - If the page is already secure, then provision a new page and share
> > + * - If the page is a normal page, share the existing page
> > + *
> > + * In the former case, uses the dev_pagemap_ops migrate_to_ram handler
> > + * to unmap the device page from QEMU's page tables.
> > + */
> > +static unsigned long
> > +kvmppc_share_page(struct kvm *kvm, unsigned long gpa, unsigned long page_shift)
> > +{
> > +
> > +	int ret = H_PARAMETER;
> > +	struct page *devm_page;
> > +	struct kvmppc_devm_page_pvt *pvt;
> > +	unsigned long pfn;
> > +	unsigned long *rmap;
> > +	struct kvm_memory_slot *slot;
> > +	unsigned long gfn = gpa >> page_shift;
> > +	int srcu_idx;
> > +
> > +	srcu_idx = srcu_read_lock(&kvm->srcu);
> > +	slot = gfn_to_memslot(kvm, gfn);
> > +	if (!slot)
> > +		goto out;
> > +
> > +	rmap = &slot->arch.rmap[gfn - slot->base_gfn];
> > +	if (kvmppc_rmap_is_devm_pfn(*rmap)) {
> > +		devm_page = pfn_to_page(*rmap & ~KVMPPC_RMAP_DEVM_PFN);
> > +		pvt = (struct kvmppc_devm_page_pvt *)
> > +			devm_page->zone_device_data;
> > +		pvt->skip_page_out = true;
> > +	}
> > +
> > +	pfn = gfn_to_pfn(kvm, gpa >> page_shift);
> 
> Use 'gfn'?

Yes.

> 
> > +	if (is_error_noslot_pfn(pfn))
> > +		goto out;
> > +
> > +	ret = uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0, page_shift);
> > +	if (ret == U_SUCCESS)
> > +		ret = H_SUCCESS;
> > +	kvm_release_pfn_clean(pfn);
> 
> Nit: Blank line?
> > +out:
> > +	srcu_read_unlock(&kvm->srcu, srcu_idx);
> > +	return ret;
> > +}
> > +
> >  /*
> >   * Move page from normal memory to secure memory.
> >   */
> > @@ -159,9 +208,12 @@ kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
> >  	if (page_shift != PAGE_SHIFT)
> >  		return H_P3;
> > 
> > -	if (flags)
> > +	if (flags & ~H_PAGE_IN_SHARED)
> >  		return H_P2;
> > 
> > +	if (flags & H_PAGE_IN_SHARED)
> > +		return kvmppc_share_page(kvm, gpa, page_shift);
> > +
> >  	ret = H_PARAMETER;
> >  	down_read(&kvm->mm->mmap_sem);
> >  	srcu_idx = srcu_read_lock(&kvm->srcu);
> > @@ -211,7 +263,7 @@ kvmppc_devm_fault_migrate_alloc_and_copy(struct migrate_vma *mig,
> >  	struct page *dpage, *spage;
> >  	struct kvmppc_devm_page_pvt *pvt;
> >  	unsigned long pfn;
> > -	int ret;
> > +	int ret = U_SUCCESS;
> > 
> >  	spage = migrate_pfn_to_page(*mig->src);
> >  	if (!spage || !(*mig->src & MIGRATE_PFN_MIGRATE))
> > @@ -226,8 +278,18 @@ kvmppc_devm_fault_migrate_alloc_and_copy(struct migrate_vma *mig,
> >  	pvt = spage->zone_device_data;
> > 
> >  	pfn = page_to_pfn(dpage);
> > -	ret = uv_page_out(pvt->lpid, pfn << page_shift, pvt->gpa, 0,
> > -			  page_shift);
> > +
> > +	/*
> > +	 * This same function is used in two cases:
> 
> Nit: s/same//

Extra emphasis :)

> 
> > +	 * - When HV touches a secure page, for which we do page-out
> 
> Better to qualify page-out with "uv page-out"? its kind of counterintuitive
> to do a page-out on a fault!

Sure.

Regards,
Bharata.
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 2f6b952deb0f..05b8536f6653 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -337,6 +337,9 @@ 
 #define H_TLB_INVALIDATE	0xF808
 #define H_COPY_TOFROM_GUEST	0xF80C
 
+/* Flags for H_SVM_PAGE_IN */
+#define H_PAGE_IN_SHARED        0x1
+
 /* Platform-specific hcalls used by the Ultravisor */
 #define H_SVM_PAGE_IN		0xEF00
 #define H_SVM_PAGE_OUT		0xEF04
diff --git a/arch/powerpc/kvm/book3s_hv_devm.c b/arch/powerpc/kvm/book3s_hv_devm.c
index 13722f27fa7d..6a3229b78fed 100644
--- a/arch/powerpc/kvm/book3s_hv_devm.c
+++ b/arch/powerpc/kvm/book3s_hv_devm.c
@@ -46,6 +46,7 @@  struct kvmppc_devm_page_pvt {
 	unsigned long *rmap;
 	unsigned int lpid;
 	unsigned long gpa;
+	bool skip_page_out;
 };
 
 /*
@@ -139,6 +140,54 @@  kvmppc_devm_migrate_alloc_and_copy(struct migrate_vma *mig,
 	return 0;
 }
 
+/*
+ * Shares the page with HV, thus making it a normal page.
+ *
+ * - If the page is already secure, then provision a new page and share
+ * - If the page is a normal page, share the existing page
+ *
+ * In the former case, uses the dev_pagemap_ops migrate_to_ram handler
+ * to unmap the device page from QEMU's page tables.
+ */
+static unsigned long
+kvmppc_share_page(struct kvm *kvm, unsigned long gpa, unsigned long page_shift)
+{
+
+	int ret = H_PARAMETER;
+	struct page *devm_page;
+	struct kvmppc_devm_page_pvt *pvt;
+	unsigned long pfn;
+	unsigned long *rmap;
+	struct kvm_memory_slot *slot;
+	unsigned long gfn = gpa >> page_shift;
+	int srcu_idx;
+
+	srcu_idx = srcu_read_lock(&kvm->srcu);
+	slot = gfn_to_memslot(kvm, gfn);
+	if (!slot)
+		goto out;
+
+	rmap = &slot->arch.rmap[gfn - slot->base_gfn];
+	if (kvmppc_rmap_is_devm_pfn(*rmap)) {
+		devm_page = pfn_to_page(*rmap & ~KVMPPC_RMAP_DEVM_PFN);
+		pvt = (struct kvmppc_devm_page_pvt *)
+			devm_page->zone_device_data;
+		pvt->skip_page_out = true;
+	}
+
+	pfn = gfn_to_pfn(kvm, gpa >> page_shift);
+	if (is_error_noslot_pfn(pfn))
+		goto out;
+
+	ret = uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0, page_shift);
+	if (ret == U_SUCCESS)
+		ret = H_SUCCESS;
+	kvm_release_pfn_clean(pfn);
+out:
+	srcu_read_unlock(&kvm->srcu, srcu_idx);
+	return ret;
+}
+
 /*
  * Move page from normal memory to secure memory.
  */
@@ -159,9 +208,12 @@  kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
 	if (page_shift != PAGE_SHIFT)
 		return H_P3;
 
-	if (flags)
+	if (flags & ~H_PAGE_IN_SHARED)
 		return H_P2;
 
+	if (flags & H_PAGE_IN_SHARED)
+		return kvmppc_share_page(kvm, gpa, page_shift);
+
 	ret = H_PARAMETER;
 	down_read(&kvm->mm->mmap_sem);
 	srcu_idx = srcu_read_lock(&kvm->srcu);
@@ -211,7 +263,7 @@  kvmppc_devm_fault_migrate_alloc_and_copy(struct migrate_vma *mig,
 	struct page *dpage, *spage;
 	struct kvmppc_devm_page_pvt *pvt;
 	unsigned long pfn;
-	int ret;
+	int ret = U_SUCCESS;
 
 	spage = migrate_pfn_to_page(*mig->src);
 	if (!spage || !(*mig->src & MIGRATE_PFN_MIGRATE))
@@ -226,8 +278,18 @@  kvmppc_devm_fault_migrate_alloc_and_copy(struct migrate_vma *mig,
 	pvt = spage->zone_device_data;
 
 	pfn = page_to_pfn(dpage);
-	ret = uv_page_out(pvt->lpid, pfn << page_shift, pvt->gpa, 0,
-			  page_shift);
+
+	/*
+	 * This same function is used in two cases:
+	 * - When HV touches a secure page, for which we do page-out
+	 * - When a secure page is converted to shared page, we touch
+	 *   the page to essentially unmap the device page. In this
+	 *   case we skip page-out.
+	 */
+	if (!pvt->skip_page_out)
+		ret = uv_page_out(pvt->lpid, pfn << page_shift, pvt->gpa, 0,
+				  page_shift);
+
 	if (ret == U_SUCCESS)
 		*mig->dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
 	else {