@@ -866,6 +866,8 @@ static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
extern int kvmppc_hmm_init(void);
extern void kvmppc_hmm_free(void);
extern void kvmppc_hmm_release_pfns(struct kvm_memory_slot *free);
+extern bool kvmppc_is_guest_secure(struct kvm *kvm);
+extern int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gpa);
#else
static inline int kvmppc_hmm_init(void)
{
@@ -874,6 +876,17 @@ static inline int kvmppc_hmm_init(void)
static inline void kvmppc_hmm_free(void) {}
static inline void kvmppc_hmm_release_pfns(struct kvm_memory_slot *free) {}
+
+
+static inline bool kvmppc_is_guest_secure(struct kvm *kvm)
+{
+ return false;
+}
+
+static inline int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gpa)
+{
+ return -EFAULT;
+}
#endif /* CONFIG_PPC_UV */
#endif /* __POWERPC_KVM_HOST_H__ */
@@ -24,5 +24,6 @@
#define UV_UNREGISTER_MEM_SLOT 0xF124
#define UV_PAGE_IN 0xF128
#define UV_PAGE_OUT 0xF12C
+#define UV_PAGE_INVAL 0xF138
#endif /* _ASM_POWERPC_ULTRAVISOR_API_H */
@@ -77,6 +77,13 @@ static inline int uv_unregister_mem_slot(u64 lpid, u64 slotid)
return ucall(UV_UNREGISTER_MEM_SLOT, retbuf, lpid, slotid);
}
+
+static inline int uv_page_inval(u64 lpid, u64 gpa, u64 page_shift)
+{
+ unsigned long retbuf[UCALL_BUFSIZE];
+
+ return ucall(UV_PAGE_INVAL, retbuf, lpid, gpa, page_shift);
+}
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_ULTRAVISOR_H */
@@ -21,6 +21,8 @@
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/pte-walk.h>
+#include <asm/ultravisor.h>
+#include <asm/kvm_host.h>
/*
* Supported radix tree geometry.
@@ -923,6 +925,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (!(dsisr & DSISR_PRTABLE_FAULT))
gpa |= ea & 0xfff;
+ if (kvmppc_is_guest_secure(kvm))
+ return kvmppc_send_page_to_uv(kvm, gpa & PAGE_MASK);
+
/* Get the corresponding memslot */
memslot = gfn_to_memslot(kvm, gfn);
@@ -980,6 +985,11 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gpa = gfn << PAGE_SHIFT;
unsigned int shift;
+ if (kvmppc_is_guest_secure(kvm)) {
+ uv_page_inval(kvm->arch.lpid, gpa, PAGE_SIZE);
+ return 0;
+ }
+
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
if (ptep && pte_present(*ptep))
kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
@@ -997,6 +1007,9 @@ int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
int ref = 0;
unsigned long old, *rmapp;
+ if (kvmppc_is_guest_secure(kvm))
+ return ref;
+
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
@@ -1021,6 +1034,9 @@ int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned int shift;
int ref = 0;
+ if (kvmppc_is_guest_secure(kvm))
+ return ref;
+
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
if (ptep && pte_present(*ptep) && pte_young(*ptep))
ref = 1;
@@ -1038,6 +1054,9 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm,
int ret = 0;
unsigned long old, *rmapp;
+ if (kvmppc_is_guest_secure(kvm))
+ return ret;
+
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
ret = 1;
@@ -55,6 +55,11 @@ struct kvmppc_hmm_migrate_args {
unsigned long page_shift;
};
+bool kvmppc_is_guest_secure(struct kvm *kvm)
+{
+ return !!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE);
+}
+
unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
{
struct kvm_memslots *slots;
@@ -478,6 +483,24 @@ kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
return ret;
}
+int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gpa)
+{
+ int srcu_idx;
+ unsigned long pfn;
+ int ret;
+
+ srcu_idx = srcu_read_lock(&kvm->srcu);
+ pfn = gfn_to_pfn(kvm, gpa >> PAGE_SHIFT);
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+ if (is_error_noslot_pfn(pfn))
+ return -EFAULT;
+
+ ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gpa, 0, PAGE_SHIFT);
+ kvm_release_pfn_clean(pfn);
+
+ return (ret == U_SUCCESS) ? RESUME_GUEST : -EFAULT;
+}
+
static u64 kvmppc_get_secmem_size(void)
{
struct device_node *np;
- After the guest becomes secure, when we handle a page fault of a page belonging to SVM in HV, send that page to UV via UV_PAGE_IN. - Whenever a page is unmapped on the HV side, inform UV via UV_PAGE_INVAL. Signed-off-by: Bharata B Rao <bharata@linux.ibm.com> --- arch/powerpc/include/asm/kvm_host.h | 13 +++++++++++++ arch/powerpc/include/asm/ultravisor-api.h | 1 + arch/powerpc/include/asm/ultravisor.h | 7 +++++++ arch/powerpc/kvm/book3s_64_mmu_radix.c | 19 +++++++++++++++++++ arch/powerpc/kvm/book3s_hv_hmm.c | 23 +++++++++++++++++++++++ 5 files changed, 63 insertions(+)