@@ -1060,6 +1060,15 @@ static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
local_irq_enable();
}
+static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
+ gfp_t gfp_flags)
+{
+ if (mc->kmem_cache)
+ return kmem_cache_zalloc(mc->kmem_cache, gfp_flags);
+ else
+ return (void *)__get_free_page(gfp_flags);
+}
+
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
{
void *obj;
@@ -1067,10 +1076,7 @@ static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
if (mc->nobjs >= min)
return 0;
while (mc->nobjs < ARRAY_SIZE(mc->objects)) {
- if (mc->kmem_cache)
- obj = kmem_cache_zalloc(mc->kmem_cache, GFP_KERNEL_ACCOUNT);
- else
- obj = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
+ obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT);
if (!obj)
return mc->nobjs >= min ? 0 : -ENOMEM;
mc->objects[mc->nobjs++] = obj;
@@ -1118,8 +1124,11 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
{
void *p;
- BUG_ON(!mc->nobjs);
- p = mc->objects[--mc->nobjs];
+ if (WARN_ON(!mc->nobjs))
+ p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT);
+ else
+ p = mc->objects[--mc->nobjs];
+ BUG_ON(!p);
return p;
}