@@ -337,6 +337,9 @@
#define H_TLB_INVALIDATE 0xF808
#define H_COPY_TOFROM_GUEST 0xF80C
+/* Flags for H_SVM_PAGE_IN */
+#define H_PAGE_IN_SHARED 0x1
+
/* Platform-specific hcalls used by the Ultravisor */
#define H_SVM_PAGE_IN 0xEF00
#define H_SVM_PAGE_OUT 0xEF04
@@ -45,6 +45,7 @@ struct kvmppc_hmm_page_pvt {
unsigned long *rmap;
unsigned int lpid;
unsigned long gpa;
+ bool skip_page_out;
};
struct kvmppc_hmm_migrate_args {
@@ -212,6 +213,45 @@ static const struct migrate_vma_ops kvmppc_hmm_migrate_ops = {
.finalize_and_map = kvmppc_hmm_migrate_finalize_and_map,
};
+/*
+ * Shares the page with HV, thus making it a normal page.
+ *
+ * - If the page is already secure, then provision a new page and share
+ * - If the page is a normal page, share the existing page
+ *
+ * In the former case, uses the HMM fault handler to release the HMM page.
+ */
+static unsigned long
+kvmppc_share_page(struct kvm *kvm, unsigned long *rmap, unsigned long gpa,
+ unsigned long addr, unsigned long page_shift)
+{
+
+ int ret;
+ unsigned int lpid = kvm->arch.lpid;
+ struct page *hmm_page;
+ struct kvmppc_hmm_page_pvt *pvt;
+ unsigned long pfn;
+ int srcu_idx;
+
+ if (kvmppc_is_hmm_pfn(*rmap)) {
+ hmm_page = pfn_to_page(*rmap & ~KVMPPC_PFN_HMM);
+ pvt = (struct kvmppc_hmm_page_pvt *)
+ hmm_devmem_page_get_drvdata(hmm_page);
+ pvt->skip_page_out = true;
+ }
+
+ srcu_idx = srcu_read_lock(&kvm->srcu);
+ pfn = gfn_to_pfn(kvm, gpa >> page_shift);
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+ if (is_error_noslot_pfn(pfn))
+ return H_PARAMETER;
+
+ ret = uv_page_in(lpid, pfn << page_shift, gpa, 0, page_shift);
+ kvm_release_pfn_clean(pfn);
+
+ return (ret == U_SUCCESS) ? H_SUCCESS : H_PARAMETER;
+}
+
/*
* Move page from normal memory to secure memory.
*/
@@ -242,9 +282,12 @@ kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
end = addr + (1UL << page_shift);
- if (flags)
+ if (flags & ~H_PAGE_IN_SHARED)
return H_P2;
+ if (flags & H_PAGE_IN_SHARED)
+ return kvmppc_share_page(kvm, rmap, gpa, addr, page_shift);
+
args.rmap = rmap;
args.lpid = kvm->arch.lpid;
args.gpa = gpa;
@@ -291,8 +334,17 @@ kvmppc_hmm_fault_migrate_alloc_and_copy(struct vm_area_struct *vma,
hmm_devmem_page_get_drvdata(spage);
pfn = page_to_pfn(dpage);
- ret = uv_page_out(pvt->lpid, pfn << PAGE_SHIFT,
- pvt->gpa, 0, PAGE_SHIFT);
+
+ /*
+ * This same alloc_and_copy() callback is used in two cases:
+ * - When HV touches a secure page, for which we do page-out
+ * - When a secure page is converted to shared page, we touch
+ * the page to essentially discard the HMM page. In this case we
+ * skip page-out.
+ */
+ if (!pvt->skip_page_out)
+ ret = uv_page_out(pvt->lpid, pfn << PAGE_SHIFT,
+ pvt->gpa, 0, PAGE_SHIFT);
if (ret == U_SUCCESS)
*dst_pfn = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
}
A secure guest will share some of its pages with hypervisor (Eg. virtio bounce buffers etc). Support shared pages in HMM driver. Signed-off-by: Bharata B Rao <bharata@linux.ibm.com> --- arch/powerpc/include/asm/hvcall.h | 3 ++ arch/powerpc/kvm/book3s_hv_hmm.c | 58 +++++++++++++++++++++++++++++-- 2 files changed, 58 insertions(+), 3 deletions(-)