Message ID | 20211201170411.1561936-14-qperret@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: arm64: Introduce kvm_share_hyp() | expand |
Reviewed-by: Andrew Walbran <qwandor@google.com> On Wed, 1 Dec 2021 at 17:05, 'Quentin Perret' via kernel-team <kernel-team@android.com> wrote: > > From: Will Deacon <will@kernel.org> > > Tearing down a previously shared memory region results in the borrower > losing access to the underlying pages and returning them to the "owned" > state in the owner. > > Implement a do_unshare() helper, along the same lines as do_share(), to > provide this functionality for the host-to-hyp case. > > Signed-off-by: Will Deacon <will@kernel.org> > Signed-off-by: Quentin Perret <qperret@google.com> > --- > arch/arm64/kvm/hyp/nvhe/mem_protect.c | 115 ++++++++++++++++++++++++++ > 1 file changed, 115 insertions(+) > > diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c > index 1282cbd6b9b3..43b25e2de780 100644 > --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c > +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c > @@ -485,6 +485,16 @@ static int host_request_owned_transition(u64 *completer_addr, > return __host_check_page_state_range(addr, size, PKVM_PAGE_OWNED); > } > > +static int host_request_unshare(u64 *completer_addr, > + const struct pkvm_mem_transition *tx) > +{ > + u64 size = tx->nr_pages * PAGE_SIZE; > + u64 addr = tx->initiator.addr; > + > + *completer_addr = tx->initiator.host.completer_addr; > + return __host_check_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED); > +} > + > static int host_initiate_share(u64 *completer_addr, > const struct pkvm_mem_transition *tx) > { > @@ -495,6 +505,16 @@ static int host_initiate_share(u64 *completer_addr, > return __host_set_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED); > } > > +static int host_initiate_unshare(u64 *completer_addr, > + const struct pkvm_mem_transition *tx) > +{ > + u64 size = tx->nr_pages * PAGE_SIZE; > + u64 addr = tx->initiator.addr; > + > + *completer_addr = tx->initiator.host.completer_addr; > + return __host_set_page_state_range(addr, size, PKVM_PAGE_OWNED); > +} > + > static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte) > { > if (!kvm_pte_valid(pte)) > @@ -535,6 +555,17 @@ static int hyp_ack_share(u64 addr, const struct pkvm_mem_transition *tx, > return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE); > } > > +static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx) > +{ > + u64 size = tx->nr_pages * PAGE_SIZE; > + > + if (__hyp_ack_skip_pgtable_check(tx)) > + return 0; > + > + return __hyp_check_page_state_range(addr, size, > + PKVM_PAGE_SHARED_BORROWED); > +} > + > static int hyp_complete_share(u64 addr, const struct pkvm_mem_transition *tx, > enum kvm_pgtable_prot perms) > { > @@ -545,6 +576,14 @@ static int hyp_complete_share(u64 addr, const struct pkvm_mem_transition *tx, > return pkvm_create_mappings_locked(start, end, prot); > } > > +static int hyp_complete_unshare(u64 addr, const struct pkvm_mem_transition *tx) > +{ > + u64 size = tx->nr_pages * PAGE_SIZE; > + int ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, addr, size); > + > + return (ret != size) ? -EFAULT : 0; > +} > + > static int check_share(struct pkvm_mem_share *share) > { > const struct pkvm_mem_transition *tx = &share->tx; > @@ -621,6 +660,82 @@ static int do_share(struct pkvm_mem_share *share) > return WARN_ON(__do_share(share)); > } > > +static int check_unshare(struct pkvm_mem_share *share) > +{ > + const struct pkvm_mem_transition *tx = &share->tx; > + u64 completer_addr; > + int ret; > + > + switch (tx->initiator.id) { > + case PKVM_ID_HOST: > + ret = host_request_unshare(&completer_addr, tx); > + break; > + default: > + ret = -EINVAL; > + } > + > + if (ret) > + return ret; > + > + switch (tx->completer.id) { > + case PKVM_ID_HYP: > + ret = hyp_ack_unshare(completer_addr, tx); > + break; > + default: > + ret = -EINVAL; > + } > + > + return ret; > +} > + > +static int __do_unshare(struct pkvm_mem_share *share) > +{ > + const struct pkvm_mem_transition *tx = &share->tx; > + u64 completer_addr; > + int ret; > + > + switch (tx->initiator.id) { > + case PKVM_ID_HOST: > + ret = host_initiate_unshare(&completer_addr, tx); > + break; > + default: > + ret = -EINVAL; > + } > + > + if (ret) > + return ret; > + > + switch (tx->completer.id) { > + case PKVM_ID_HYP: > + ret = hyp_complete_unshare(completer_addr, tx); > + break; > + default: > + ret = -EINVAL; > + } > + > + return ret; > +} > + > +/* > + * do_unshare(): > + * > + * The page owner revokes access from another component for a range of > + * pages which were previously shared using do_share(). > + * > + * Initiator: SHARED_OWNED => OWNED > + * Completer: SHARED_BORROWED => NOPAGE > + */ > +static int do_unshare(struct pkvm_mem_share *share) > +{ > + int ret; > + > + ret = check_unshare(share); > + if (ret) > + return ret; > + > + return WARN_ON(__do_unshare(share)); > +} > + > int __pkvm_host_share_hyp(u64 pfn) > { > int ret; > -- > 2.34.0.rc2.393.gf8c9666880-goog > > -- > To unsubscribe from this group and stop receiving emails from it, send an email to kernel-team+unsubscribe@android.com. >
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index 1282cbd6b9b3..43b25e2de780 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -485,6 +485,16 @@ static int host_request_owned_transition(u64 *completer_addr, return __host_check_page_state_range(addr, size, PKVM_PAGE_OWNED); } +static int host_request_unshare(u64 *completer_addr, + const struct pkvm_mem_transition *tx) +{ + u64 size = tx->nr_pages * PAGE_SIZE; + u64 addr = tx->initiator.addr; + + *completer_addr = tx->initiator.host.completer_addr; + return __host_check_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED); +} + static int host_initiate_share(u64 *completer_addr, const struct pkvm_mem_transition *tx) { @@ -495,6 +505,16 @@ static int host_initiate_share(u64 *completer_addr, return __host_set_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED); } +static int host_initiate_unshare(u64 *completer_addr, + const struct pkvm_mem_transition *tx) +{ + u64 size = tx->nr_pages * PAGE_SIZE; + u64 addr = tx->initiator.addr; + + *completer_addr = tx->initiator.host.completer_addr; + return __host_set_page_state_range(addr, size, PKVM_PAGE_OWNED); +} + static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte) { if (!kvm_pte_valid(pte)) @@ -535,6 +555,17 @@ static int hyp_ack_share(u64 addr, const struct pkvm_mem_transition *tx, return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE); } +static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx) +{ + u64 size = tx->nr_pages * PAGE_SIZE; + + if (__hyp_ack_skip_pgtable_check(tx)) + return 0; + + return __hyp_check_page_state_range(addr, size, + PKVM_PAGE_SHARED_BORROWED); +} + static int hyp_complete_share(u64 addr, const struct pkvm_mem_transition *tx, enum kvm_pgtable_prot perms) { @@ -545,6 +576,14 @@ static int hyp_complete_share(u64 addr, const struct pkvm_mem_transition *tx, return pkvm_create_mappings_locked(start, end, prot); } +static int hyp_complete_unshare(u64 addr, const struct pkvm_mem_transition *tx) +{ + u64 size = tx->nr_pages * PAGE_SIZE; + int ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, addr, size); + + return (ret != size) ? -EFAULT : 0; +} + static int check_share(struct pkvm_mem_share *share) { const struct pkvm_mem_transition *tx = &share->tx; @@ -621,6 +660,82 @@ static int do_share(struct pkvm_mem_share *share) return WARN_ON(__do_share(share)); } +static int check_unshare(struct pkvm_mem_share *share) +{ + const struct pkvm_mem_transition *tx = &share->tx; + u64 completer_addr; + int ret; + + switch (tx->initiator.id) { + case PKVM_ID_HOST: + ret = host_request_unshare(&completer_addr, tx); + break; + default: + ret = -EINVAL; + } + + if (ret) + return ret; + + switch (tx->completer.id) { + case PKVM_ID_HYP: + ret = hyp_ack_unshare(completer_addr, tx); + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static int __do_unshare(struct pkvm_mem_share *share) +{ + const struct pkvm_mem_transition *tx = &share->tx; + u64 completer_addr; + int ret; + + switch (tx->initiator.id) { + case PKVM_ID_HOST: + ret = host_initiate_unshare(&completer_addr, tx); + break; + default: + ret = -EINVAL; + } + + if (ret) + return ret; + + switch (tx->completer.id) { + case PKVM_ID_HYP: + ret = hyp_complete_unshare(completer_addr, tx); + break; + default: + ret = -EINVAL; + } + + return ret; +} + +/* + * do_unshare(): + * + * The page owner revokes access from another component for a range of + * pages which were previously shared using do_share(). + * + * Initiator: SHARED_OWNED => OWNED + * Completer: SHARED_BORROWED => NOPAGE + */ +static int do_unshare(struct pkvm_mem_share *share) +{ + int ret; + + ret = check_unshare(share); + if (ret) + return ret; + + return WARN_ON(__do_unshare(share)); +} + int __pkvm_host_share_hyp(u64 pfn) { int ret;