@@ -1369,8 +1369,7 @@ static void kvm_release_vm_pages(struct kvm *kvm)
unsigned long base_gfn;
slots = kvm_memslots(kvm);
- for (i = 0; i < slots->nmemslots; i++) {
- memslot = &slots->memslots[i];
+ kvm_for_each_memslot(slots, memslot, i) {
base_gfn = memslot->base_gfn;
for (j = 0; j < memslot->npages; j++) {
@@ -874,11 +874,11 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
int ret;
int retval = 0;
struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
slots = kvm_memslots(kvm);
- for (i = 0; i < slots->nmemslots; i++) {
- struct kvm_memory_slot *memslot = &slots->memslots[i];
+ kvm_for_each_memslot(slots, memslot, i) {
unsigned long start = memslot->userspace_addr;
unsigned long end;
@@ -3671,11 +3671,12 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
unsigned int nr_mmu_pages;
unsigned int nr_pages = 0;
struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
slots = kvm_memslots(kvm);
- for (i = 0; i < slots->nmemslots; i++)
- nr_pages += slots->memslots[i].npages;
+ kvm_for_each_memslot(slots, memslot, i)
+ nr_pages += memslot->npages;
nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
nr_mmu_pages = max(nr_mmu_pages,
@@ -3252,7 +3252,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
goto out;
memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
- memslots_updated(slots, log->slot);
+ memslots_updated(slots);
old_slots = kvm->memslots;
rcu_assign_pointer(kvm->memslots, slots);
@@ -222,7 +222,6 @@ struct kvm_irq_routing_table {};
#endif
struct kvm_memslots {
- int nmemslots;
int used_slots;
u64 generation;
struct kvm_memory_slot *slot_cache;
@@ -302,6 +301,10 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
idx < atomic_read(&kvm->online_vcpus) && vcpup; \
vcpup = kvm_get_vcpu(kvm, ++idx))
+#define kvm_for_each_memslot(memslots, memslot, i) \
+ for (i = 0; i < (memslots)->used_slots && \
+ ({memslot = (memslots)->slots_sort[i]; 1; }); i++)
+
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
@@ -334,7 +337,7 @@ int is_error_pfn(pfn_t pfn);
int is_hwpoison_pfn(pfn_t pfn);
int is_fault_pfn(pfn_t pfn);
int kvm_is_error_hva(unsigned long addr);
-void memslots_updated(struct kvm_memslots *slots, int slot_id);
+void memslots_updated(struct kvm_memslots *slots);
int kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
int user_alloc);
@@ -128,12 +128,13 @@ static int kvm_iommu_map_memslots(struct kvm *kvm)
{
int i, idx, r = 0;
struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
idx = srcu_read_lock(&kvm->srcu);
slots = kvm_memslots(kvm);
- for (i = 0; i < slots->nmemslots; i++) {
- r = kvm_iommu_map_pages(kvm, &slots->memslots[i]);
+ kvm_for_each_memslot(slots, memslot, i) {
+ r = kvm_iommu_map_pages(kvm, memslot);
if (r)
break;
}
@@ -289,14 +290,14 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm)
{
int i, idx;
struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
idx = srcu_read_lock(&kvm->srcu);
slots = kvm_memslots(kvm);
- for (i = 0; i < slots->nmemslots; i++) {
- kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn,
- slots->memslots[i].npages);
- }
+ kvm_for_each_memslot(slots, memslot, i)
+ kvm_iommu_put_pages(kvm, memslot->base_gfn, memslot->npages);
+
srcu_read_unlock(&kvm->srcu, idx);
return 0;
@@ -544,9 +544,10 @@ void kvm_free_physmem(struct kvm *kvm)
{
int i;
struct kvm_memslots *slots = kvm->memslots;
+ struct kvm_memory_slot *memslot;
- for (i = 0; i < slots->nmemslots; ++i)
- kvm_free_physmem_slot(&slots->memslots[i], NULL);
+ kvm_for_each_memslot(slots, memslot, i)
+ kvm_free_physmem_slot(memslot, NULL);
kfree(kvm->memslots);
}
@@ -673,7 +674,7 @@ static void sort_memslots(struct kvm_memslots *slots)
int i, num = 0;
struct kvm_memory_slot *memslot;
- for (i = 0; i < slots->nmemslots; i++) {
+ for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) {
memslot = &slots->memslots[i];
if (!memslot->npages)
continue;
@@ -685,10 +686,8 @@ static void sort_memslots(struct kvm_memslots *slots)
sort(slots->slots_sort, num, sizeof(memslot), cmp_memslot, NULL);
}
-void memslots_updated(struct kvm_memslots *slots, int slot_id)
+void memslots_updated(struct kvm_memslots *slots)
{
- if (slot_id >= slots->nmemslots)
- slots->nmemslots = slot_id + 1;
slots->generation++;
slots->slot_cache = NULL;
sort_memslots(slots);
@@ -840,7 +839,7 @@ skip_lpage:
goto out_free;
memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID;
- memslots_updated(slots, mem->slot);
+ memslots_updated(slots);
old_memslots = kvm->memslots;
rcu_assign_pointer(kvm->memslots, slots);
@@ -882,7 +881,7 @@ skip_lpage:
}
slots->memslots[mem->slot] = new;
- memslots_updated(slots, mem->slot);
+ memslots_updated(slots);
old_memslots = kvm->memslots;
rcu_assign_pointer(kvm->memslots, slots);
synchronize_srcu_expedited(&kvm->srcu);