@@ -30,6 +30,7 @@ struct AccelOpsClass {
void (*create_vcpu_thread)(CPUState *cpu); /* MANDATORY NON-NULL */
void (*kick_vcpu_thread)(CPUState *cpu);
+ bool (*cpu_thread_is_idle)(CPUState *cpu);
void (*synchronize_post_reset)(CPUState *cpu);
void (*synchronize_post_init)(CPUState *cpu);
@@ -74,11 +74,17 @@ static void kvm_start_vcpu_thread(CPUState *cpu)
cpu, QEMU_THREAD_JOINABLE);
}
+static bool kvm_vcpu_thread_is_idle(CPUState *cpu)
+{
+ return !kvm_halt_in_kernel();
+}
+
static void kvm_accel_ops_class_init(ObjectClass *oc, void *data)
{
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
ops->create_vcpu_thread = kvm_start_vcpu_thread;
+ ops->cpu_thread_is_idle = kvm_vcpu_thread_is_idle;
ops->synchronize_post_reset = kvm_cpu_synchronize_post_reset;
ops->synchronize_post_init = kvm_cpu_synchronize_post_init;
ops->synchronize_state = kvm_cpu_synchronize_state;
@@ -90,10 +90,12 @@ bool cpu_thread_is_idle(CPUState *cpu)
if (cpu_is_stopped(cpu)) {
return true;
}
- if (!cpu->halted || cpu_has_work(cpu) ||
- kvm_halt_in_kernel() || whpx_apic_in_platform()) {
+ if (!cpu->halted || cpu_has_work(cpu)) {
return false;
}
+ if (cpus_accel->cpu_thread_is_idle) {
+ return cpus_accel->cpu_thread_is_idle(cpu);
+ }
return true;
}
@@ -83,12 +83,18 @@ static void whpx_kick_vcpu_thread(CPUState *cpu)
}
}
+static bool whpx_vcpu_thread_is_idle(CPUState *cpu)
+{
+ return !whpx_apic_in_platform();
+}
+
static void whpx_accel_ops_class_init(ObjectClass *oc, void *data)
{
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
ops->create_vcpu_thread = whpx_start_vcpu_thread;
ops->kick_vcpu_thread = whpx_kick_vcpu_thread;
+ ops->cpu_thread_is_idle = whpx_vcpu_thread_is_idle;
ops->synchronize_post_reset = whpx_cpu_synchronize_post_reset;
ops->synchronize_post_init = whpx_cpu_synchronize_post_init;