@@ -133,12 +133,10 @@ bool kvm_is_mmio_pfn(pfn_t pfn)
/*
* Switches to specified vcpu, until a matching vcpu_put()
*/
-int vcpu_load(struct kvm_vcpu *vcpu)
+static void __vcpu_load(struct kvm_vcpu *vcpu)
{
int cpu;
- if (mutex_lock_killable(&vcpu->mutex))
- return -EINTR;
if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
/* The thread running this VCPU changed. */
struct pid *oldpid = vcpu->pid;
@@ -151,6 +149,14 @@ int vcpu_load(struct kvm_vcpu *vcpu)
preempt_notifier_register(&vcpu->preempt_notifier);
kvm_arch_vcpu_load(vcpu, cpu);
put_cpu();
+}
+
+int vcpu_load(struct kvm_vcpu *vcpu)
+{
+ if (mutex_lock_killable(&vcpu->mutex))
+ return -EINTR;
+
+ __vcpu_load(vcpu);
return 0;
}
@@ -2197,10 +2203,21 @@ static long kvm_vcpu_ioctl(struct file *filp,
return kvm_arch_vcpu_ioctl(filp, ioctl, arg);
#endif
+ if (!mutex_trylock(&vcpu->mutex)) {
+ /*
+ * Before a potentially long sleep, check if we'd exit anyway.
+ * The common case is for the mutex not to be contended, when
+ * this does not add overhead.
+ */
+ if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
+ return -EINVAL;
+
+ if (mutex_lock_killable(&vcpu->mutex))
+ return -EINTR;
+ }
+
+ __vcpu_load(vcpu);
- r = vcpu_load(vcpu);
- if (r)
- return r;
switch (ioctl) {
case KVM_RUN:
r = -EINVAL;