@@ -6105,13 +6105,20 @@ void kvm_arch_flush_shadow(struct kvm *kvm)
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
- return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
+ if ((vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
!vcpu->arch.apf.halted)
- || !list_empty_careful(&vcpu->async_pf.done)
|| vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
|| vcpu->arch.nmi_pending ||
(kvm_arch_interrupt_allowed(vcpu) &&
- kvm_cpu_has_interrupt(vcpu));
+ kvm_cpu_has_interrupt(vcpu)))
+ return 1;
+
+ if (!list_empty_careful(&vcpu->async_pf.done)) {
+ vcpu->arch.apf.halted = false;
+ return 2;
+ }
+
+ return 0;
}
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
@@ -436,6 +436,12 @@ void kvm_arch_hardware_disable(void *garbage);
int kvm_arch_hardware_setup(void);
void kvm_arch_hardware_unsetup(void);
void kvm_arch_check_processor_compat(void *rtn);
+
+/*
+ * return value: > 0 if the vcpu is runnable, 0 if not.
+ * Especially, if the return value == 1, we should make a
+ * 'KVM_REQ_UNHALT' request.
+ */
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
void kvm_free_physmem(struct kvm *kvm);
@@ -1375,14 +1375,21 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
void kvm_vcpu_block(struct kvm_vcpu *vcpu)
{
DEFINE_WAIT(wait);
+ int ret;
for (;;) {
prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
- if (kvm_arch_vcpu_runnable(vcpu)) {
+ ret = kvm_arch_vcpu_runnable(vcpu);
+
+ if (ret == 1) {
kvm_make_request(KVM_REQ_UNHALT, vcpu);
break;
}
+
+ if (ret > 1)
+ break;
+
if (kvm_cpu_has_pending_timer(vcpu))
break;
if (signal_pending(current))