@@ -247,7 +247,7 @@ int kvm_init(void *opaque, unsigned vcpu
struct module *module);
void kvm_exit(void);
-void kvm_get_kvm(struct kvm *kvm);
+int kvm_get_kvm(struct kvm *kvm);
void kvm_put_kvm(struct kvm *kvm);
static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
@@ -496,9 +496,30 @@ static void kvm_destroy_vm(struct kvm *k
mmdrop(mm);
}
-void kvm_get_kvm(struct kvm *kvm)
+/*
+ * Once the counter goes to 0, we destroy the
+ * kvm object. Do not allow additional refs
+ * to be obtained once this occurs.
+ *
+ * Any calls which are done via the kvm fd
+ * could use atomic_inc(). That is because
+ * ->users_count is set to 1 when the kvm fd
+ * is created, and stays at least 1 while
+ * the fd exists.
+ *
+ * But, those calls are currently rare, so do
+ * this (more expensive) atomic_add_unless()
+ * to keep the number of functions down.
+ *
+ * Returns 0 if the reference was obtained
+ * successfully.
+ */
+int kvm_get_kvm(struct kvm *kvm)
{
- atomic_inc(&kvm->users_count);
+ int did_add = atomic_add_unless(&kvm->users_count, 1, 0);
+ if (did_add)
+ return 0;
+ return -EBUSY;
}
EXPORT_SYMBOL_GPL(kvm_get_kvm);
@@ -1332,7 +1353,12 @@ static int kvm_vm_ioctl_create_vcpu(stru
BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
/* Now it's all set up, let userspace reach it */
- kvm_get_kvm(kvm);
+ r = kvm_get_kvm(kvm);
+ /*
+ * Getting called via the kvm fd _should_ guarantee
+ * that we can always get a reference.
+ */
+ WARN_ON(r);
r = create_vcpu_fd(vcpu);
if (r < 0) {
kvm_put_kvm(kvm);