@@ -131,6 +131,7 @@ struct kvm_vcpu {
struct list_head queue;
struct list_head done;
spinlock_t lock;
+ bool wakeup;
} async_pf;
#endif
@@ -124,6 +124,11 @@ bool kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
{
struct kvm_async_pf *work;
+ if (vcpu->async_pf.wakeup) {
+ vcpu->async_pf.wakeup = false;
+ return true;
+ }
+
if (list_empty_careful(&vcpu->async_pf.done) ||
!kvm_arch_can_inject_async_page_present(vcpu))
return false;
@@ -197,20 +202,6 @@ retry_sync:
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
{
- struct kvm_async_pf *work;
-
- if (!list_empty(&vcpu->async_pf.done))
- return 0;
-
- work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC);
- if (!work)
- return -ENOMEM;
-
- work->page = bad_page;
- get_page(bad_page);
- INIT_LIST_HEAD(&work->queue); /* for list_del to work */
-
- list_add_tail(&work->link, &vcpu->async_pf.done);
- vcpu->async_pf.queued++;
+ vcpu->async_pf.wakeup = true;
return 0;
}