@@ -5321,8 +5321,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
vcpu->run->exit_reason = KVM_EXIT_INTR;
++vcpu->stat.request_irq_exits;
}
-
- kvm_check_async_pf_completion(vcpu);
if (signal_pending(current)) {
r = -EINTR;
@@ -6108,7 +6106,6 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
!vcpu->arch.apf.halted)
- || !list_empty_careful(&vcpu->async_pf.done)
|| vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
|| vcpu->arch.nmi_pending ||
(kvm_arch_interrupt_allowed(vcpu) &&
@@ -90,7 +90,6 @@ struct kvm_async_pf {
};
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
-void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
struct kvm_arch_async_pf *arch);
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
@@ -120,13 +120,13 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
vcpu->async_pf.queued = 0;
}
-void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
+bool kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
{
struct kvm_async_pf *work;
if (list_empty_careful(&vcpu->async_pf.done) ||
!kvm_arch_can_inject_async_page_present(vcpu))
- return;
+ return false;
spin_lock(&vcpu->async_pf.lock);
work = list_first_entry(&vcpu->async_pf.done, typeof(*work), link);
@@ -142,6 +142,8 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
if (work->page)
put_page(work->page);
kmem_cache_free(async_pf_cache, work);
+
+ return true;
}
int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
@@ -27,10 +27,16 @@
int kvm_async_pf_init(void);
void kvm_async_pf_deinit(void);
void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu);
+bool kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
+
#else
#define kvm_async_pf_init() (0)
#define kvm_async_pf_deinit() do{}while(0)
#define kvm_async_pf_vcpu_init(C) do{}while(0)
+static inline bool kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
#endif
#endif
@@ -1347,7 +1347,8 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
for (;;) {
prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
- if (kvm_arch_vcpu_runnable(vcpu)) {
+ if (kvm_arch_vcpu_runnable(vcpu) ||
+ kvm_check_async_pf_completion(vcpu)) {
kvm_make_request(KVM_REQ_UNHALT, vcpu);
break;
}