Message ID | 20211019121304.2732332-8-qperret@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: arm64: pkvm: Implement unshare hypercall | expand |
Hi Quentin, > diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c > index 0019b2309f70..0cc4b295e525 100644 > --- a/arch/arm64/kvm/mmu.c > +++ b/arch/arm64/kvm/mmu.c > @@ -299,6 +299,17 @@ static int pkvm_share_hyp(phys_addr_t start, phys_addr_t end) > return 0; > } > > +int kvm_share_hyp(void *from, void *to) > +{ > + if (is_kernel_in_hyp_mode()) > + return 0; > + > + if (kvm_host_owns_hyp_mappings()) > + return create_hyp_mappings(from, to, PAGE_HYP); > + > + return pkvm_share_hyp(kvm_kaddr_to_phys(from), kvm_kaddr_to_phys(to)); We should be careful about vmalloc memory here. kvm_kaddr_to_phys will happily return the physical address but the range is not guaranteed to be physically contiguous. It doesn't look like this series ever shares vmalloc memory, but we should make it harder for users of this function to shoot themselves in the foot. One option would be to turn this into a loop and call pkvm_share_hyp on each physical page. But since the hypervisor has no means of making those pages virtually contigous anyway, probably not the right approach. We could make it possible to share vmalloc buffer that fit into a page and reject others. There we still need to be careful about the upper bound because the way it's written now, 'to' can be treated as exclusive and 'kvm_kaddr_to_phys(to)' would not always return the page after 'kvm_kaddr_to_phys(from)'. -David
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 02d378887743..185d0f62b724 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -150,6 +150,7 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v) #include <asm/kvm_pgtable.h> #include <asm/stage2_pgtable.h> +int kvm_share_hyp(void *from, void *to); int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot); int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, void __iomem **kaddr, diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index e1cc3f8560e7..5312d5a2ef69 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -146,7 +146,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) if (ret) return ret; - ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP); + ret = kvm_share_hyp(kvm, kvm + 1); if (ret) goto out_free_stage2_pgd; @@ -343,7 +343,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) if (err) return err; - return create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP); + return kvm_share_hyp(vcpu, vcpu + 1); } void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 62c0d78da7be..2fe1128d9f3d 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -35,11 +35,11 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu) * Make sure the host task thread flags and fpsimd state are * visible to hyp: */ - ret = create_hyp_mappings(ti, ti + 1, PAGE_HYP); + ret = kvm_share_hyp(ti, ti + 1); if (ret) goto error; - ret = create_hyp_mappings(fpsimd, fpsimd + 1, PAGE_HYP); + ret = kvm_share_hyp(fpsimd, fpsimd + 1); if (ret) goto error; diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 0019b2309f70..0cc4b295e525 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -299,6 +299,17 @@ static int pkvm_share_hyp(phys_addr_t start, phys_addr_t end) return 0; } +int kvm_share_hyp(void *from, void *to) +{ + if (is_kernel_in_hyp_mode()) + return 0; + + if (kvm_host_owns_hyp_mappings()) + return create_hyp_mappings(from, to, PAGE_HYP); + + return pkvm_share_hyp(kvm_kaddr_to_phys(from), kvm_kaddr_to_phys(to)); +} + /** * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode * @from: The virtual kernel start address of the range @@ -319,12 +330,8 @@ int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot) if (is_kernel_in_hyp_mode()) return 0; - if (!kvm_host_owns_hyp_mappings()) { - if (WARN_ON(prot != PAGE_HYP)) - return -EPERM; - return pkvm_share_hyp(kvm_kaddr_to_phys(from), - kvm_kaddr_to_phys(to)); - } + if (!kvm_host_owns_hyp_mappings()) + return -EPERM; start = start & PAGE_MASK; end = PAGE_ALIGN(end); diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 59e4c6e819e9..cf781ec9212a 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -113,7 +113,7 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu) if (!buf) return -ENOMEM; - ret = create_hyp_mappings(buf, buf + reg_sz, PAGE_HYP); + ret = kvm_share_hyp(buf, buf + reg_sz); if (ret) { kfree(buf); return ret;
The create_hyp_mappings() function can currently be called at any point in time. However, its behaviour in protected mode changes widely depending on when it is being called. Prior to KVM init, it is used to create the temporary page-table used to bring-up the hypervisor, and later on it is transparently turned into a 'share' hypercall when the kernel has lost control over the hypervisor stage-1. In order to prepare the ground for also unsharing pages with the hypervisor during guest teardown, introduce a kvm_share_hyp() function to make it clear in which places a share hypercall should be expected, as we will soon need a matching unshare hypercall in all those places. Signed-off-by: Quentin Perret <qperret@google.com> --- arch/arm64/include/asm/kvm_mmu.h | 1 + arch/arm64/kvm/arm.c | 4 ++-- arch/arm64/kvm/fpsimd.c | 4 ++-- arch/arm64/kvm/mmu.c | 19 +++++++++++++------ arch/arm64/kvm/reset.c | 2 +- 5 files changed, 19 insertions(+), 11 deletions(-)