@@ -78,7 +78,6 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
#ifdef CONFIG_KVM_ASYNC_PF
struct kvm_async_pf {
struct work_struct work;
- struct list_head link;
struct list_head queue;
struct kvm_vcpu *vcpu;
struct mm_struct *mm;
@@ -127,10 +126,9 @@ struct kvm_vcpu {
#ifdef CONFIG_KVM_ASYNC_PF
struct {
+ atomic_t done;
u32 queued;
struct list_head queue;
- struct list_head done;
- spinlock_t lock;
bool wakeup;
} async_pf;
#endif
@@ -49,9 +49,7 @@ void kvm_async_pf_deinit(void)
void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)
{
- INIT_LIST_HEAD(&vcpu->async_pf.done);
INIT_LIST_HEAD(&vcpu->async_pf.queue);
- spin_lock_init(&vcpu->async_pf.lock);
}
static void async_pf_execute(struct work_struct *work)
@@ -72,11 +70,9 @@ static void async_pf_execute(struct work_struct *work)
up_read(&mm->mmap_sem);
unuse_mm(mm);
- spin_lock(&vcpu->async_pf.lock);
- list_add_tail(&apf->link, &vcpu->async_pf.done);
apf->page = page;
apf->done = true;
- spin_unlock(&vcpu->async_pf.lock);
+ atomic_inc(&vcpu->async_pf.done);
/*
* apf may be freed by kvm_check_async_pf_completion() after
@@ -101,52 +97,48 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
typeof(*work), queue);
cancel_work_sync(&work->work);
list_del(&work->queue);
- if (!work->done) /* work was canceled */
- kmem_cache_free(async_pf_cache, work);
- }
-
- spin_lock(&vcpu->async_pf.lock);
- while (!list_empty(&vcpu->async_pf.done)) {
- struct kvm_async_pf *work =
- list_entry(vcpu->async_pf.done.next,
- typeof(*work), link);
- list_del(&work->link);
if (work->page)
put_page(work->page);
kmem_cache_free(async_pf_cache, work);
}
- spin_unlock(&vcpu->async_pf.lock);
vcpu->async_pf.queued = 0;
+ atomic_set(&vcpu->async_pf.done, 0);
}
bool kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
{
struct kvm_async_pf *work;
+ struct list_head *pos, *temp;
if (vcpu->async_pf.wakeup) {
vcpu->async_pf.wakeup = false;
return true;
}
- if (list_empty_careful(&vcpu->async_pf.done) ||
+ if (!atomic_read(&vcpu->async_pf.done) ||
!kvm_arch_can_inject_async_page_present(vcpu))
return false;
- spin_lock(&vcpu->async_pf.lock);
- work = list_first_entry(&vcpu->async_pf.done, typeof(*work), link);
- list_del(&work->link);
- spin_unlock(&vcpu->async_pf.lock);
+ list_for_each_safe(pos, temp, &vcpu->async_pf.queue) {
+ work = list_entry(pos, typeof(*work), queue);
+ if (!work->done)
+ continue;
- if (work->page)
- kvm_arch_async_page_ready(vcpu, work);
- kvm_arch_async_page_present(vcpu, work);
+ if (work->page) {
+ kvm_arch_async_page_ready(vcpu, work);
+ put_page(work->page);
+ }
+
+ kvm_arch_async_page_present(vcpu, work);
+
+ list_del(&work->queue);
+ vcpu->async_pf.queued--;
+ kmem_cache_free(async_pf_cache, work);
+ if (atomic_dec_and_test(&vcpu->async_pf.done))
+ break;
+ }
- list_del(&work->queue);
- vcpu->async_pf.queued--;
- if (work->page)
- put_page(work->page);
- kmem_cache_free(async_pf_cache, work);
kvm_arch_async_pf_completion(vcpu);
return true;