@@ -320,6 +320,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.target = -1;
bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
+ vcpu->arch.mmu_page_cache.capacity = KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE;
vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
/* Set up the timer */
@@ -764,7 +764,10 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
{
phys_addr_t addr;
int ret = 0;
- struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, NULL, };
+ struct kvm_mmu_memory_cache cache = {
+ .capacity = KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE,
+ .gfp_zero = __GFP_ZERO,
+ };
struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE |
KVM_PGTABLE_PROT_R |
@@ -387,6 +387,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
if (err)
goto out_free_gebase;
+ vcpu->arch.mmu_page_cache.capacity = KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE;
+
return 0;
out_free_gebase:
@@ -347,10 +347,10 @@ static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
int ret = 0;
unsigned long pfn;
phys_addr_t addr, end;
- struct kvm_mmu_memory_cache pcache;
-
- memset(&pcache, 0, sizeof(pcache));
- pcache.gfp_zero = __GFP_ZERO;
+ struct kvm_mmu_memory_cache cache = {
+ .capacity = KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE,
+ .gfp_zero = __GFP_ZERO,
+ };
end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
pfn = __phys_to_pfn(hpa);
@@ -361,12 +361,12 @@ static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
if (!writable)
pte = pte_wrprotect(pte);
- ret = kvm_mmu_topup_memory_cache(&pcache, stage2_pgd_levels);
+ ret = kvm_mmu_topup_memory_cache(&cache.cache, stage2_pgd_levels);
if (ret)
goto out;
spin_lock(&kvm->mmu_lock);
- ret = stage2_set_pte(kvm, 0, &pcache, addr, &pte);
+ ret = stage2_set_pte(kvm, 0, &cache.cache, addr, &pte);
spin_unlock(&kvm->mmu_lock);
if (ret)
goto out;
@@ -375,7 +375,7 @@ static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
}
out:
- kvm_mmu_free_memory_cache(&pcache);
+ kvm_mmu_free_memory_cache(&cache.cache);
return ret;
}
@@ -94,6 +94,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
/* Mark this VCPU never ran */
vcpu->arch.ran_atleast_once = false;
+ vcpu->arch.mmu_page_cache.capacity = KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE;
vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
/* Setup ISA features available to VCPU */
@@ -5719,12 +5719,21 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
{
int ret;
+ vcpu->arch.mmu_pte_list_desc_cache.capacity =
+ KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE;
vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
+ vcpu->arch.mmu_page_header_cache.capacity =
+ KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE;
vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
+ vcpu->arch.mmu_shadowed_info_cache.capacity =
+ KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE;
+
+ vcpu->arch.mmu_shadow_page_cache.capacity =
+ KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE;
vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
vcpu->arch.mmu = &vcpu->arch.root_mmu;
@@ -83,14 +83,19 @@ struct gfn_to_pfn_cache {
* MMU flows is problematic, as is triggering reclaim, I/O, etc... while
* holding MMU locks. Note, these caches act more like prefetch buffers than
* classical caches, i.e. objects are not returned to the cache on being freed.
+ *
+ * The storage for the cache object pointers is allocated dynamically when the
+ * cache is topped-up. The capacity field defines the number of object pointers
+ * available after the struct.
*/
struct kvm_mmu_memory_cache {
int nobjs;
+ int capacity;
gfp_t gfp_zero;
struct kmem_cache *kmem_cache;
- void *objects[KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE];
+ void **objects;
};
-#endif
+#endif /* KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE */
#define HALT_POLL_HIST_COUNT 32
@@ -371,12 +371,23 @@ static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
{
+ gfp_t gfp = GFP_KERNEL_ACCOUNT;
void *obj;
if (mc->nobjs >= min)
return 0;
- while (mc->nobjs < ARRAY_SIZE(mc->objects)) {
- obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT);
+
+ if (WARN_ON(mc->capacity == 0))
+ return -EINVAL;
+
+ if (!mc->objects) {
+ mc->objects = kvmalloc_array(sizeof(void *), mc->capacity, gfp);
+ if (!mc->objects)
+ return -ENOMEM;
+ }
+
+ while (mc->nobjs < mc->capacity) {
+ obj = mmu_memory_cache_alloc_obj(mc, gfp);
if (!obj)
return mc->nobjs >= min ? 0 : -ENOMEM;
mc->objects[mc->nobjs++] = obj;
@@ -397,6 +408,11 @@ void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
else
free_page((unsigned long)mc->objects[--mc->nobjs]);
}
+
+ kvfree(mc->objects);
+
+ /* Note, must set to NULL to avoid use-after-free in the next top-up. */
+ mc->objects = NULL;
}
void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
Allow the capacity of the kvm_mmu_memory_cache struct to be chosen at declaration time rather than being fixed for all declarations. This will be used in a follow-up commit to declare an cache in x86 with a capacity of 512+ objects without having to increase the capacity of all caches in KVM. This change requires each cache now specify its capacity at runtime, since the cache struct itself no longer has a fixed capacity known at compile time. To protect against someone accidentally defining a kvm_mmu_memory_cache struct directly (without the extra storage), this commit includes a WARN_ON() in kvm_mmu_topup_memory_cache(). In order to support different capacities, this commit changes the objects pointer array to be dynamically allocated the first time the cache is topped-up. An alternative would be to lay out the objects array after the kvm_mmu_memory_cache struct, which can be done at compile time. But that change, unfortunately, adds some grottiness to arm64 and riscv, which uses a function-local (i.e. stack-allocated) kvm_mmu_memory_cache struct. Since C does not allow anonymous structs in functions, the new wrapper struct that contains kvm_mmu_memory_cache and the objects pointer array, must be named, which means dealing with an outer and inner struct. The outer struct can't be dropped since then there would be no guarantee the kvm_mmu_memory_cache struct and objects array would be laid out consecutively on the stack. No functional change intended. Signed-off-by: David Matlack <dmatlack@google.com> --- arch/arm64/kvm/arm.c | 1 + arch/arm64/kvm/mmu.c | 5 ++++- arch/mips/kvm/mips.c | 2 ++ arch/riscv/kvm/mmu.c | 14 +++++++------- arch/riscv/kvm/vcpu.c | 1 + arch/x86/kvm/mmu/mmu.c | 9 +++++++++ include/linux/kvm_types.h | 9 +++++++-- virt/kvm/kvm_main.c | 20 ++++++++++++++++++-- 8 files changed, 49 insertions(+), 12 deletions(-)