@@ -1438,6 +1438,70 @@ static int insert_ppage(struct kvm *kvm, struct kvm_guest_page *ppage)
return 0;
}
+static int guestmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ struct kvm_memory_slot *memslot)
+{
+ struct kvm_hyp_memcache *hyp_memcache = &vcpu->arch.pkvm_memcache;
+ struct kvm_guest_page *guest_page;
+ struct mm_struct *mm = current->mm;
+ gfn_t gfn = gpa_to_gfn(fault_ipa);
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_s2_mmu *mmu = &kvm->arch.mmu;
+ struct page *page = NULL;
+ kvm_pfn_t pfn;
+ int ret;
+
+ ret = topup_hyp_memcache(hyp_memcache, kvm_mmu_cache_min_pages(mmu));
+ if (ret)
+ return ret;
+
+ /*
+ * Acquire the page lock to avoid racing with kvm_gmem_fault() when
+ * checking the page_mapcount later on.
+ */
+ ret = kvm_gmem_get_pfn_locked(kvm, memslot, gfn, &pfn, NULL);
+ if (ret)
+ return ret;
+
+ page = pfn_to_page(pfn);
+
+ if (!kvm_gmem_is_mappable(kvm, gfn) && page_mapcount(page)) {
+ ret = -EPERM;
+ goto rel_page;
+ }
+
+ guest_page = kmalloc(sizeof(*guest_page), GFP_KERNEL_ACCOUNT);
+ if (!guest_page) {
+ ret = -ENOMEM;
+ goto rel_page;
+ }
+
+ guest_page->page = page;
+ guest_page->ipa = fault_ipa;
+ guest_page->is_pinned = false;
+
+ ret = account_locked_vm(mm, 1, true);
+ if (ret)
+ goto free_gp;
+
+ write_lock(&kvm->mmu_lock);
+ ret = pkvm_host_map_guest(pfn, gfn);
+ if (!ret)
+ WARN_ON(insert_ppage(kvm, guest_page));
+ write_unlock(&kvm->mmu_lock);
+
+ if (ret)
+ account_locked_vm(mm, 1, false);
+free_gp:
+ if (ret)
+ kfree(guest_page);
+rel_page:
+ unlock_page(page);
+ put_page(page);
+
+ return ret != -EAGAIN ? ret : 0;
+}
+
static int pkvm_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_memory_slot *memslot)
{
@@ -1887,11 +1951,16 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
goto out_unlock;
}
- if (is_protected_kvm_enabled())
- ret = pkvm_mem_abort(vcpu, fault_ipa, memslot);
- else
+ if (is_protected_kvm_enabled()) {
+ if ((kvm_slot_can_be_private(memslot)))
+ ret = guestmem_abort(vcpu, fault_ipa, memslot);
+ else
+ ret = pkvm_mem_abort(vcpu, fault_ipa, memslot);
+ } else {
ret = user_mem_abort(vcpu, fault_ipa, memslot,
esr_fsc_is_permission_fault(esr));
+ }
+
if (ret == 0)
ret = 1;
Introduce a new fault handler which responds to guest faults for guestmem pages. Signed-off-by: Fuad Tabba <tabba@google.com> --- arch/arm64/kvm/mmu.c | 75 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 72 insertions(+), 3 deletions(-)