@@ -175,11 +175,7 @@ struct kvm_smccc_features {
struct kvm_protected_vm {
unsigned int shadow_handle;
struct mutex shadow_lock;
-
- struct {
- void *pgd;
- void *shadow;
- } hyp_donations;
+ struct kvm_hyp_memcache teardown_mc;
};
struct kvm_arch {
@@ -76,7 +76,7 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
int hyp_pin_shared_mem(void *from, void *to);
void hyp_unpin_shared_mem(void *from, void *to);
-void reclaim_guest_pages(struct kvm_shadow_vm *vm);
+void reclaim_guest_pages(struct kvm_shadow_vm *vm, struct kvm_hyp_memcache *mc);
int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
struct kvm_hyp_memcache *host_mc);
@@ -260,19 +260,24 @@ int kvm_guest_prepare_stage2(struct kvm_shadow_vm *vm, void *pgd)
return 0;
}
-void reclaim_guest_pages(struct kvm_shadow_vm *vm)
+void reclaim_guest_pages(struct kvm_shadow_vm *vm, struct kvm_hyp_memcache *mc)
{
- unsigned long nr_pages, pfn;
-
- nr_pages = kvm_pgtable_stage2_pgd_size(vm->kvm.arch.vtcr) >> PAGE_SHIFT;
- pfn = hyp_virt_to_pfn(vm->pgt.pgd);
+ void *addr;
+ /* Dump all pgtable pages in the hyp_pool */
guest_lock_component(vm);
kvm_pgtable_stage2_destroy(&vm->pgt);
vm->kvm.arch.mmu.pgd_phys = 0ULL;
guest_unlock_component(vm);
- WARN_ON(__pkvm_hyp_donate_host(pfn, nr_pages));
+ /* Drain the hyp_pool into the memcache */
+ addr = hyp_alloc_pages(&vm->pool, 0);
+ while (addr) {
+ memset(hyp_virt_to_page(addr), 0, sizeof(struct hyp_page));
+ push_hyp_memcache(mc, addr, hyp_virt_to_phys);
+ WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(addr), 1));
+ addr = hyp_alloc_pages(&vm->pool, 0);
+ }
}
int __pkvm_prot_finalize(void)
@@ -546,8 +546,10 @@ int __pkvm_init_shadow(struct kvm *kvm, unsigned long shadow_hva,
int __pkvm_teardown_shadow(unsigned int shadow_handle)
{
+ struct kvm_hyp_memcache *mc;
struct kvm_shadow_vm *vm;
size_t shadow_size;
+ void *addr;
int err;
/* Lookup then remove entry from the shadow table. */
@@ -569,7 +571,8 @@ int __pkvm_teardown_shadow(unsigned int shadow_handle)
hyp_spin_unlock(&shadow_lock);
/* Reclaim guest pages (including page-table pages) */
- reclaim_guest_pages(vm);
+ mc = &vm->host_kvm->arch.pkvm.teardown_mc;
+ reclaim_guest_pages(vm, mc);
unpin_host_vcpus(vm->shadow_vcpu_states, vm->kvm.created_vcpus);
/* Push the metadata pages to the teardown memcache */
@@ -577,6 +580,9 @@ int __pkvm_teardown_shadow(unsigned int shadow_handle)
hyp_unpin_shared_mem(vm->host_kvm, vm->host_kvm + 1);
memset(vm, 0, shadow_size);
+ for (addr = vm; addr < (void *)vm + shadow_size; addr += PAGE_SIZE)
+ push_hyp_memcache(mc, addr, hyp_virt_to_phys);
+
unmap_donated_memory_noclear(vm, shadow_size);
return 0;
@@ -160,8 +160,6 @@ static int __kvm_shadow_create(struct kvm *kvm)
/* Store the shadow handle given by hyp for future call reference. */
kvm->arch.pkvm.shadow_handle = shadow_handle;
- kvm->arch.pkvm.hyp_donations.pgd = pgd;
- kvm->arch.pkvm.hyp_donations.shadow = shadow_addr;
return 0;
free_shadow:
@@ -185,20 +183,12 @@ int kvm_shadow_create(struct kvm *kvm)
void kvm_shadow_destroy(struct kvm *kvm)
{
- size_t pgd_sz, shadow_sz;
-
if (kvm->arch.pkvm.shadow_handle)
WARN_ON(kvm_call_hyp_nvhe(__pkvm_teardown_shadow,
kvm->arch.pkvm.shadow_handle));
kvm->arch.pkvm.shadow_handle = 0;
-
- shadow_sz = PAGE_ALIGN(KVM_SHADOW_VM_SIZE +
- KVM_SHADOW_VCPU_STATE_SIZE * kvm->created_vcpus);
- pgd_sz = kvm_pgtable_stage2_pgd_size(kvm->arch.vtcr);
-
- free_pages_exact(kvm->arch.pkvm.hyp_donations.shadow, shadow_sz);
- free_pages_exact(kvm->arch.pkvm.hyp_donations.pgd, pgd_sz);
+ free_hyp_memcache(&kvm->arch.pkvm.teardown_mc);
}
int kvm_init_pvm(struct kvm *kvm)