@@ -91,17 +91,6 @@ struct kvm_arch {
u32 psci_version;
};
-#define KVM_NR_MEM_OBJS 40
-
-/*
- * We don't want allocation failures within the mmu code, so we preallocate
- * enough memory for a single page fault in a cache.
- */
-struct kvm_mmu_memory_cache {
- int nobjs;
- void *objects[KVM_NR_MEM_OBJS];
-};
-
struct kvm_vcpu_fault_info {
u32 hsr; /* Hyp Syndrome Register */
u32 hxfar; /* Hyp Data/Inst. Fault Address Register */
@@ -210,7 +199,7 @@ struct kvm_vcpu_arch {
struct kvm_decode mmio_decode;
/* Cache some mmu pages needed inside spinlock regions */
- struct kvm_mmu_memory_cache mmu_page_cache;
+ struct kvm_mmu_memcache mmu_page_cache;
struct vcpu_reset_state reset_state;
@@ -71,7 +71,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
-void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
+void kvm_mmu_free_memcaches(struct kvm_vcpu *vcpu);
phys_addr_t kvm_mmu_get_httbr(void);
phys_addr_t kvm_get_idmap_vector(void);
@@ -2,4 +2,10 @@
#ifndef _ASM_ARM_KVM_TYPES_H
#define _ASM_ARM_KVM_TYPES_H
+#define KVM_ARCH_WANT_MMU_MEMCACHE
+
+#define KVM_MMU_NR_MEMCACHE_OBJS 40
+
+#define KVM_MMU_CACHE_GFP_FLAGS __GFP_ZERO
+
#endif /* _ASM_ARM_KVM_TYPES_H */
@@ -96,17 +96,6 @@ struct kvm_arch {
u32 psci_version;
};
-#define KVM_NR_MEM_OBJS 40
-
-/*
- * We don't want allocation failures within the mmu code, so we preallocate
- * enough memory for a single page fault in a cache.
- */
-struct kvm_mmu_memory_cache {
- int nobjs;
- void *objects[KVM_NR_MEM_OBJS];
-};
-
struct kvm_vcpu_fault_info {
u32 esr_el2; /* Hyp Syndrom Register */
u64 far_el2; /* Hyp Fault Address Register */
@@ -331,7 +320,7 @@ struct kvm_vcpu_arch {
struct kvm_decode mmio_decode;
/* Cache some mmu pages needed inside spinlock regions */
- struct kvm_mmu_memory_cache mmu_page_cache;
+ struct kvm_mmu_memcache mmu_page_cache;
/* Target CPU and feature flags */
int target;
@@ -171,7 +171,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
-void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
+void kvm_mmu_free_memcaches(struct kvm_vcpu *vcpu);
phys_addr_t kvm_mmu_get_httbr(void);
phys_addr_t kvm_get_idmap_vector(void);
@@ -2,5 +2,11 @@
#ifndef _ASM_ARM64_KVM_TYPES_H
#define _ASM_ARM64_KVM_TYPES_H
+#define KVM_ARCH_WANT_MMU_MEMCACHE
+
+#define KVM_MMU_NR_MEMCACHE_OBJS 40
+
+#define KVM_MMU_CACHE_GFP_FLAGS __GFP_ZERO
+
#endif /* _ASM_ARM64_KVM_TYPES_H */
@@ -313,7 +313,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm)))
static_branch_dec(&userspace_irqchip_in_use);
- kvm_mmu_free_memory_caches(vcpu);
+ kvm_mmu_free_memcaches(vcpu);
kvm_timer_vcpu_terminate(vcpu);
kvm_pmu_vcpu_destroy(vcpu);
kvm_vcpu_uninit(vcpu);
@@ -132,38 +132,6 @@ static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp)
put_page(virt_to_page(pudp));
}
-static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
- int min, int max)
-{
- void *page;
-
- BUG_ON(max > KVM_NR_MEM_OBJS);
- if (cache->nobjs >= min)
- return 0;
- while (cache->nobjs < max) {
- page = (void *)__get_free_page(PGALLOC_GFP);
- if (!page)
- return -ENOMEM;
- cache->objects[cache->nobjs++] = page;
- }
- return 0;
-}
-
-static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
-{
- while (mc->nobjs)
- free_page((unsigned long)mc->objects[--mc->nobjs]);
-}
-
-static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
-{
- void *p;
-
- BUG_ON(!mc || !mc->nobjs);
- p = mc->objects[--mc->nobjs];
- return p;
-}
-
static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
{
pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, pgd, 0UL);
@@ -1020,7 +988,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
free_pages_exact(pgd, stage2_pgd_size(kvm));
}
-static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memcache *cache,
phys_addr_t addr)
{
pgd_t *pgd;
@@ -1030,7 +998,7 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
if (stage2_pgd_none(kvm, *pgd)) {
if (!cache)
return NULL;
- pud = mmu_memory_cache_alloc(cache);
+ pud = kvm_mmu_memcache_alloc(cache);
stage2_pgd_populate(kvm, pgd, pud);
get_page(virt_to_page(pgd));
}
@@ -1038,7 +1006,7 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
return stage2_pud_offset(kvm, pgd, addr);
}
-static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memcache *cache,
phys_addr_t addr)
{
pud_t *pud;
@@ -1051,7 +1019,7 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
if (stage2_pud_none(kvm, *pud)) {
if (!cache)
return NULL;
- pmd = mmu_memory_cache_alloc(cache);
+ pmd = kvm_mmu_memcache_alloc(cache);
stage2_pud_populate(kvm, pud, pmd);
get_page(virt_to_page(pud));
}
@@ -1059,7 +1027,7 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
return stage2_pmd_offset(kvm, pud, addr);
}
-static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
+static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memcache
*cache, phys_addr_t addr, const pmd_t *new_pmd)
{
pmd_t *pmd, old_pmd;
@@ -1123,7 +1091,7 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
return 0;
}
-static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memcache *cache,
phys_addr_t addr, const pud_t *new_pudp)
{
pud_t *pudp, old_pud;
@@ -1225,7 +1193,7 @@ static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
return kvm_s2pte_exec(ptep);
}
-static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memcache *cache,
phys_addr_t addr, const pte_t *new_pte,
unsigned long flags)
{
@@ -1257,7 +1225,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
if (stage2_pud_none(kvm, *pud)) {
if (!cache)
return 0; /* ignore calls from kvm_set_spte_hva */
- pmd = mmu_memory_cache_alloc(cache);
+ pmd = kvm_mmu_memcache_alloc(cache);
stage2_pud_populate(kvm, pud, pmd);
get_page(virt_to_page(pud));
}
@@ -1282,7 +1250,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
if (pmd_none(*pmd)) {
if (!cache)
return 0; /* ignore calls from kvm_set_spte_hva */
- pte = mmu_memory_cache_alloc(cache);
+ pte = kvm_mmu_memcache_alloc(cache);
kvm_pmd_populate(pmd, pte);
get_page(virt_to_page(pmd));
}
@@ -1349,7 +1317,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
phys_addr_t addr, end;
int ret = 0;
unsigned long pfn;
- struct kvm_mmu_memory_cache cache = { 0, };
+ struct kvm_mmu_memcache cache = { 0, };
end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
pfn = __phys_to_pfn(pa);
@@ -1360,9 +1328,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
if (writable)
pte = kvm_s2pte_mkwrite(pte);
- ret = mmu_topup_memory_cache(&cache,
- kvm_mmu_cache_min_pages(kvm),
- KVM_NR_MEM_OBJS);
+ ret = kvm_mmu_topup_memcache_page(&cache,
+ kvm_mmu_cache_min_pages(kvm));
if (ret)
goto out;
spin_lock(&kvm->mmu_lock);
@@ -1376,7 +1343,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
}
out:
- mmu_free_memory_cache(&cache);
+ kvm_mmu_free_memcache_page(&cache);
return ret;
}
@@ -1683,7 +1650,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
unsigned long mmu_seq;
gfn_t gfn = fault_ipa >> PAGE_SHIFT;
struct kvm *kvm = vcpu->kvm;
- struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
+ struct kvm_mmu_memcache *memcache = &vcpu->arch.mmu_page_cache;
struct vm_area_struct *vma;
kvm_pfn_t pfn;
pgprot_t mem_type = PAGE_S2;
@@ -1728,8 +1695,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
up_read(¤t->mm->mmap_sem);
/* We need minimum second+third level pages */
- ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm),
- KVM_NR_MEM_OBJS);
+ ret = kvm_mmu_topup_memcache_page(memcache, kvm_mmu_cache_min_pages(kvm));
if (ret)
return ret;
@@ -2149,9 +2115,9 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
}
-void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
+void kvm_mmu_free_memcaches(struct kvm_vcpu *vcpu)
{
- mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+ kvm_mmu_free_memcache_page(&vcpu->arch.mmu_page_cache);
}
phys_addr_t kvm_mmu_get_httbr(void)
Now when we have a common mmu mmemcache implementation, we can reuse this for arm and arm64. The common implementation has a slightly different behavior when allocating objects under high memory pressure; whereas the current arm/arm64 implementation will give up and return -ENOMEM if the full size of the cache cannot be allocated during topup, the common implementation is happy with any allocation between min and max. There should be no architecture-specific requirement for doing it one way or the other and it's in fact better to enforce a cross-architecture KVM policy on this behavior. Signed-off-by: Christoffer Dall <christoffer.dall@arm.com> --- arch/arm/include/asm/kvm_host.h | 13 +----- arch/arm/include/asm/kvm_mmu.h | 2 +- arch/arm/include/asm/kvm_types.h | 6 +++ arch/arm64/include/asm/kvm_host.h | 13 +----- arch/arm64/include/asm/kvm_mmu.h | 2 +- arch/arm64/include/asm/kvm_types.h | 6 +++ virt/kvm/arm/arm.c | 2 +- virt/kvm/arm/mmu.c | 68 ++++++++---------------------- 8 files changed, 34 insertions(+), 78 deletions(-)