@@ -64,6 +64,7 @@
#define __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector 18
#define __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize 19
#define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc 20
+#define __KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp 21
#ifndef __ASSEMBLY__
@@ -55,6 +55,7 @@ extern const u8 pkvm_hyp_id;
int __pkvm_prot_finalize(void);
int __pkvm_host_share_hyp(u64 pfn, u64 nr_pages);
+int __pkvm_host_unshare_hyp(u64 pfn, u64 nr_pages);
bool addr_is_memory(phys_addr_t phys);
int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot);
@@ -148,6 +148,14 @@ static void handle___pkvm_host_share_hyp(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = __pkvm_host_share_hyp(pfn, nr_pages);
}
+static void handle___pkvm_host_unshare_hyp(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(u64, pfn, host_ctxt, 1);
+ DECLARE_REG(u64, nr_pages, host_ctxt, 2);
+
+ cpu_reg(host_ctxt, 1) = __pkvm_host_unshare_hyp(pfn, nr_pages);
+}
+
static void handle___pkvm_create_private_mapping(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(phys_addr_t, phys, host_ctxt, 1);
@@ -184,6 +192,7 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__pkvm_init),
HANDLE_FUNC(__pkvm_cpu_set_vector),
HANDLE_FUNC(__pkvm_host_share_hyp),
+ HANDLE_FUNC(__pkvm_host_unshare_hyp),
HANDLE_FUNC(__pkvm_create_private_mapping),
HANDLE_FUNC(__pkvm_prot_finalize),
};
@@ -740,3 +740,162 @@ int __pkvm_host_share_hyp(u64 pfn, u64 nr_pages)
return ret;
}
+
+static int host_initiate_unshare(struct pkvm_page_req *req)
+{
+ struct hyp_page *page = hyp_phys_to_page(req->phys);
+ enum kvm_pgtable_prot prot;
+
+ if (page->refcount > 1)
+ return 0;
+
+ prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_OWNED);
+ return host_stage2_idmap_locked(req->initiator.addr, PAGE_SIZE, prot);
+}
+
+static int initiate_unshare(struct pkvm_page_req *req,
+ struct pkvm_mem_share *share)
+{
+ struct pkvm_mem_transition *tx = &share->tx;
+
+ switch (tx->initiator.id) {
+ case PKVM_ID_HOST:
+ return host_initiate_unshare(req);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int hyp_complete_unshare(struct pkvm_page_req *req)
+{
+ struct hyp_page *page = hyp_phys_to_page(req->phys);
+ void *addr = (void *)req->completer.addr;
+ int ret = 0;
+
+ if (hyp_page_ref_dec_and_test(page)) {
+ ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, (u64)addr, PAGE_SIZE);
+ ret = (ret == PAGE_SIZE) ? 0 : -EINVAL;
+ }
+
+ return ret;
+}
+
+static int complete_unshare(struct pkvm_page_req *req,
+ struct pkvm_mem_share *share)
+{
+ struct pkvm_mem_transition *tx = &share->tx;
+
+ switch (tx->completer.id) {
+ case PKVM_ID_HYP:
+ return hyp_complete_unshare(req);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int check_unshare(struct pkvm_page_req *req,
+ struct pkvm_page_share_ack *ack,
+ struct pkvm_mem_share *share)
+{
+ struct pkvm_mem_transition *tx = &share->tx;
+
+ if (!addr_is_memory(req->phys))
+ return -EINVAL;
+
+ switch (tx->completer.id) {
+ case PKVM_ID_HYP:
+ return hyp_check_incoming_share(req, ack, tx->initiator.id,
+ share->prot);
+ default:
+ return -EPERM;
+ }
+}
+
+/*
+ * do_unshare():
+ *
+ * The page owner revokes access from another component for a range of
+ * pages which were previously shared using do_share().
+ *
+ * Initiator: SHARED_OWNED => OWNED
+ * Completer: SHARED_BORROWED => NOPAGE
+ */
+static int do_unshare(struct pkvm_mem_share *share)
+{
+ struct pkvm_page_req req;
+ int ret = 0;
+ u64 idx;
+
+ for (idx = 0; idx < share->tx.nr_pages; ++idx) {
+ struct pkvm_page_share_ack ack;
+
+ /*
+ * Use the request_share() and ack_share() from the normal share
+ * path as they implement all the checks we need here. But
+ * check_unshare() needs to differ -- PKVM_PAGE_OWNED is illegal
+ * for the initiator.
+ */
+ ret = request_share(&req, share, idx);
+ if (ret)
+ goto out;
+
+ ret = ack_share(&ack, &req, share);
+ if (ret)
+ goto out;
+
+ ret = check_unshare(&req, &ack, share);
+ if (ret)
+ goto out;
+ }
+
+ for (idx = 0; idx < share->tx.nr_pages; ++idx) {
+ ret = request_share(&req, share, idx);
+ if (ret)
+ break;
+
+ ret = initiate_unshare(&req, share);
+ if (ret)
+ break;
+
+ ret = complete_unshare(&req, share);
+ if (ret)
+ break;
+ }
+
+ WARN_ON(ret);
+out:
+ return ret;
+}
+
+int __pkvm_host_unshare_hyp(u64 pfn, u64 nr_pages)
+{
+ int ret;
+ u64 host_addr = hyp_pfn_to_phys(pfn);
+ u64 hyp_addr = (u64)__hyp_va(host_addr);
+ struct pkvm_mem_share share = {
+ .tx = {
+ .nr_pages = nr_pages,
+ .initiator = {
+ .id = PKVM_ID_HOST,
+ .addr = host_addr,
+ .host = {
+ .completer_addr = hyp_addr,
+ },
+ },
+ .completer = {
+ .id = PKVM_ID_HYP,
+ },
+ },
+ .prot = PAGE_HYP,
+ };
+
+ host_lock_component();
+ hyp_lock_component();
+
+ ret = do_unshare(&share);
+
+ hyp_unlock_component();
+ host_unlock_component();
+
+ return ret;
+}
Introduce an unshare hypercall which can be used to unmap things from the hypervisor stage-1 in nVHE protected mode. This will be useful to update the EL2 ownership state of pages during guest teardown, and avoids keeping dangling mappings to unreferenced portions of memory. Signed-off-by: Quentin Perret <qperret@google.com> --- arch/arm64/include/asm/kvm_asm.h | 1 + arch/arm64/kvm/hyp/include/nvhe/mem_protect.h | 1 + arch/arm64/kvm/hyp/nvhe/hyp-main.c | 9 + arch/arm64/kvm/hyp/nvhe/mem_protect.c | 159 ++++++++++++++++++ 4 files changed, 170 insertions(+)