@@ -818,7 +818,8 @@ bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work);
-void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
+/* return true if we can handle more completed apfs, false otherwise */
+bool kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work);
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work);
@@ -6265,7 +6265,7 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
}
}
-void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
+bool kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work)
{
trace_kvm_async_pf_ready(work->arch.token, work->gva);
@@ -6274,13 +6274,17 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
else
kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
+ vcpu->arch.apf.halted = false;
+
if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
!apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
vcpu->arch.fault.error_code = 0;
vcpu->arch.fault.address = work->arch.token;
kvm_inject_page_fault(vcpu);
+ return false;
}
- vcpu->arch.apf.halted = false;
+
+ return true;
}
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
@@ -123,25 +123,29 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
{
struct kvm_async_pf *work;
+ bool ret;
if (list_empty_careful(&vcpu->async_pf.done) ||
!kvm_arch_can_inject_async_page_present(vcpu))
return;
- spin_lock(&vcpu->async_pf.lock);
- work = list_first_entry(&vcpu->async_pf.done, typeof(*work), link);
- list_del(&work->link);
- spin_unlock(&vcpu->async_pf.lock);
+ do {
+ spin_lock(&vcpu->async_pf.lock);
+ work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
+ link);
+ list_del(&work->link);
+ spin_unlock(&vcpu->async_pf.lock);
- if (work->page)
- kvm_arch_async_page_ready(vcpu, work);
- kvm_arch_async_page_present(vcpu, work);
+ if (work->page)
+ kvm_arch_async_page_ready(vcpu, work);
+ ret = kvm_arch_async_page_present(vcpu, work);
- list_del(&work->queue);
- vcpu->async_pf.queued--;
- if (work->page)
- put_page(work->page);
- kmem_cache_free(async_pf_cache, work);
+ list_del(&work->queue);
+ vcpu->async_pf.queued--;
+ if (work->page)
+ put_page(work->page);
+ kmem_cache_free(async_pf_cache, work);
+ } while (ret && !list_empty_careful(&vcpu->async_pf.done));
}
int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,