@@ -105,6 +105,8 @@ static int kvm_sstep_flags;
static bool kvm_immediate_exit;
static hwaddr kvm_max_slot_size = ~0;
+QemuLockCnt kvm_in_ioctl_lock;
+
static const KVMCapabilityInfo kvm_required_capabilites[] = {
KVM_CAP_INFO(USER_MEMORY),
KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
@@ -2310,6 +2312,7 @@ static int kvm_init(MachineState *ms)
assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size());
s->sigmask_len = 8;
+ qemu_lockcnt_init(&kvm_in_ioctl_lock);
#ifdef KVM_CAP_SET_GUEST_DEBUG
QTAILQ_INIT(&s->kvm_sw_breakpoints);
@@ -2808,6 +2811,18 @@ static void kvm_eat_signals(CPUState *cpu)
} while (sigismember(&chkset, SIG_IPI));
}
+static void kvm_set_in_ioctl(bool in_ioctl)
+{
+ if (likely(qemu_mutex_iothread_locked())) {
+ return;
+ }
+ if (in_ioctl) {
+ qemu_lockcnt_inc(&kvm_in_ioctl_lock);
+ } else {
+ qemu_lockcnt_dec(&kvm_in_ioctl_lock);
+ }
+}
+
int kvm_cpu_exec(CPUState *cpu)
{
struct kvm_run *run = cpu->kvm_run;
@@ -3014,7 +3029,9 @@ int kvm_vm_ioctl(KVMState *s, int type, ...)
va_end(ap);
trace_kvm_vm_ioctl(type, arg);
+ kvm_set_in_ioctl(true);
ret = ioctl(s->vmfd, type, arg);
+ kvm_set_in_ioctl(false);
if (ret == -1) {
ret = -errno;
}
@@ -3050,7 +3067,9 @@ int kvm_device_ioctl(int fd, int type, ...)
va_end(ap);
trace_kvm_device_ioctl(fd, type, arg);
+ kvm_set_in_ioctl(true);
ret = ioctl(fd, type, arg);
+ kvm_set_in_ioctl(false);
if (ret == -1) {
ret = -errno;
}