@@ -160,7 +160,7 @@ int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
return 0;
}
-void kvm_mips_free_vcpus(struct kvm *kvm)
+void kvm_arch_free_vcpus(struct kvm *kvm)
{
unsigned int i;
struct kvm_vcpu *vcpu;
@@ -168,15 +168,6 @@ void kvm_mips_free_vcpus(struct kvm *kvm)
kvm_for_each_vcpu(i, vcpu, kvm) {
kvm_arch_vcpu_free(vcpu);
}
-
- mutex_lock(&kvm->lock);
-
- for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
- kvm->vcpus[i] = NULL;
-
- atomic_set(&kvm->online_vcpus, 0);
-
- mutex_unlock(&kvm->lock);
}
static void kvm_mips_free_gpa_pt(struct kvm *kvm)
@@ -188,7 +179,7 @@ static void kvm_mips_free_gpa_pt(struct kvm *kvm)
void kvm_arch_destroy_vm(struct kvm *kvm)
{
- kvm_mips_free_vcpus(kvm);
+ kvm_free_vcpus(kvm);
kvm_mips_free_gpa_pt(kvm);
}
@@ -456,11 +456,17 @@ int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
return 0;
}
-void kvm_arch_destroy_vm(struct kvm *kvm)
+void kvm_arch_free_vcpus(struct kvm *kvm)
{
- unsigned int i;
+ int i;
struct kvm_vcpu *vcpu;
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_arch_vcpu_free(vcpu);
+}
+
+void kvm_arch_destroy_vm(struct kvm *kvm)
+{
#ifdef CONFIG_KVM_XICS
/*
* We call kick_all_cpus_sync() to ensure that all
@@ -471,14 +477,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kick_all_cpus_sync();
#endif
- kvm_for_each_vcpu(i, vcpu, kvm)
- kvm_arch_vcpu_free(vcpu);
+ kvm_free_vcpus(kvm);
mutex_lock(&kvm->lock);
- for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
- kvm->vcpus[i] = NULL;
-
- atomic_set(&kvm->online_vcpus, 0);
kvmppc_core_destroy_vm(kvm);
@@ -1941,20 +1941,13 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
kmem_cache_free(kvm_vcpu_cache, vcpu);
}
-static void kvm_free_vcpus(struct kvm *kvm)
+void kvm_arch_free_vcpus(struct kvm *kvm)
{
unsigned int i;
struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_arch_vcpu_destroy(vcpu);
-
- mutex_lock(&kvm->lock);
- for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
- kvm->vcpus[i] = NULL;
-
- atomic_set(&kvm->online_vcpus, 0);
- mutex_unlock(&kvm->lock);
}
void kvm_arch_destroy_vm(struct kvm *kvm)
@@ -8077,7 +8077,7 @@ static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
vcpu_put(vcpu);
}
-static void kvm_free_vcpus(struct kvm *kvm)
+void kvm_arch_free_vcpus(struct kvm *kvm)
{
unsigned int i;
struct kvm_vcpu *vcpu;
@@ -8091,13 +8091,6 @@ static void kvm_free_vcpus(struct kvm *kvm)
}
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_arch_vcpu_free(vcpu);
-
- mutex_lock(&kvm->lock);
- for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
- kvm->vcpus[i] = NULL;
-
- atomic_set(&kvm->online_vcpus, 0);
- mutex_unlock(&kvm->lock);
}
void kvm_arch_sync_events(struct kvm *kvm)
@@ -874,6 +874,8 @@ static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
void kvm_arch_destroy_vm(struct kvm *kvm);
void kvm_arch_sync_events(struct kvm *kvm);
+void kvm_free_vcpus(struct kvm *kvm);
+void kvm_arch_free_vcpus(struct kvm *kvm);
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
@@ -166,6 +166,14 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
+void kvm_arch_free_vcpus(struct kvm *kvm)
+{
+ int i;
+ struct kvm_vcpu *vcpu;
+
+ for_each_online_vcpu(i, vcpu, kvm)
+ kvm_arch_vcpu_free(vcpu);
+}
/**
* kvm_arch_destroy_vm - destroy the VM data structure
@@ -173,17 +181,10 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
*/
void kvm_arch_destroy_vm(struct kvm *kvm)
{
- int i;
-
free_percpu(kvm->arch.last_vcpu_ran);
kvm->arch.last_vcpu_ran = NULL;
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- if (kvm->vcpus[i]) {
- kvm_arch_vcpu_free(kvm->vcpus[i]);
- kvm->vcpus[i] = NULL;
- }
- }
+ kvm_free_vcpus(kvm);
kvm_vgic_destroy(kvm);
}
@@ -750,6 +750,23 @@ static void kvm_destroy_devices(struct kvm *kvm)
}
}
+void kvm_free_vcpus(struct kvm *kvm)
+{
+ int i;
+
+ kvm_arch_free_vcpus(kvm);
+
+ mutex_lock(&kvm->lock);
+
+ i = atomic_read(&kvm->online_vcpus);
+ atomic_set(&kvm->online_vcpus, 0);
+
+ while (i--)
+ kvm->vcpus[i] = NULL;
+
+ mutex_unlock(&kvm->lock);
+}
+
static void kvm_destroy_vm(struct kvm *kvm)
{
int i;
Generalize clearing of kvm->vcpus. This should not be needed at all as all accesses to VCPUs in the destruction path are bugs, but maybe helps to catch them. The call path crosses arch/common code way too much, so extra untangling patch is welcome. Doing the clearing later seems be ok. I don't see a reason for the locking the mutex, so there definitely is a room for improvements. Suggested-by: Cornelia Huck <cohuck@redhat.com> Signed-off-by: Radim Krčmář <rkrcmar@redhat.com> --- arch/mips/kvm/mips.c | 13 ++----------- arch/powerpc/kvm/powerpc.c | 17 +++++++++-------- arch/s390/kvm/kvm-s390.c | 9 +-------- arch/x86/kvm/x86.c | 9 +-------- include/linux/kvm_host.h | 2 ++ virt/kvm/arm/arm.c | 17 +++++++++-------- virt/kvm/kvm_main.c | 17 +++++++++++++++++ 7 files changed, 41 insertions(+), 43 deletions(-)