@@ -822,6 +822,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work);
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work);
+void kvm_arch_async_pf_completion(struct kvm_vcpu *vcpu);
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
@@ -6280,6 +6280,11 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
}
}
+void kvm_arch_async_pf_completion(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.apf.halted = false;
+}
+
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
{
if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
@@ -142,6 +142,7 @@ bool kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
if (work->page)
put_page(work->page);
kmem_cache_free(async_pf_cache, work);
+ kvm_arch_async_pf_completion(vcpu);
return true;
}
@@ -1347,11 +1347,14 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
for (;;) {
prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
- if (kvm_arch_vcpu_runnable(vcpu) ||
- kvm_check_async_pf_completion(vcpu)) {
+ if (kvm_arch_vcpu_runnable(vcpu)) {
kvm_make_request(KVM_REQ_UNHALT, vcpu);
break;
}
+
+ if (kvm_check_async_pf_completion(vcpu))
+ break;
+
if (kvm_cpu_has_pending_timer(vcpu))
break;
if (signal_pending(current))