@@ -1060,27 +1060,27 @@ static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
local_irq_enable();
}
-static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, int min)
+static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
{
void *obj;
- if (cache->nobjs >= min)
+ if (mc->nobjs >= min)
return 0;
- while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
- if (cache->kmem_cache)
- obj = kmem_cache_zalloc(cache->kmem_cache, GFP_KERNEL_ACCOUNT);
+ while (mc->nobjs < ARRAY_SIZE(mc->objects)) {
+ if (mc->kmem_cache)
+ obj = kmem_cache_zalloc(mc->kmem_cache, GFP_KERNEL_ACCOUNT);
else
obj = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
if (!obj)
- return cache->nobjs >= min ? 0 : -ENOMEM;
- cache->objects[cache->nobjs++] = obj;
+ return mc->nobjs >= min ? 0 : -ENOMEM;
+ mc->objects[mc->nobjs++] = obj;
}
return 0;
}
-static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *cache)
+static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *mc)
{
- return cache->nobjs;
+ return mc->nobjs;
}
static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
@@ -1395,10 +1395,10 @@ static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn,
static bool rmap_can_add(struct kvm_vcpu *vcpu)
{
- struct kvm_mmu_memory_cache *cache;
+ struct kvm_mmu_memory_cache *mc;
- cache = &vcpu->arch.mmu_pte_list_desc_cache;
- return mmu_memory_cache_free_objects(cache);
+ mc = &vcpu->arch.mmu_pte_list_desc_cache;
+ return mmu_memory_cache_free_objects(mc);
}
static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)