@@ -63,6 +63,7 @@ struct kvm_task_sleep_node {
struct hlist_node link;
wait_queue_head_t wq;
u32 token;
+ int cpu;
};
static struct kvm_task_sleep_head {
@@ -91,6 +92,11 @@ static void apf_task_wait(struct task_struct *tsk, u32 token)
struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
struct kvm_task_sleep_node n, *e;
DEFINE_WAIT(wait);
+ int cpu, idle;
+
+ cpu = get_cpu();
+ idle = idle_cpu(cpu);
+ put_cpu();
spin_lock(&b->lock);
e = _find_apf_task(b, token);
@@ -105,15 +111,30 @@ static void apf_task_wait(struct task_struct *tsk, u32 token)
n.token = token;
init_waitqueue_head(&n.wq);
hlist_add_head(&n.link, &b->list);
+ if (idle || preempt_count() > 1)
+ n.cpu = smp_processor_id();
+ else
+ n.cpu = -1;
spin_unlock(&b->lock);
for (;;) {
- prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
+ if (n.cpu < 0)
+ prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
if (hlist_unhashed(&n.link))
break;
- schedule();
+
+ if (n.cpu < 0) {
+ schedule();
+ } else {
+ /*
+ * We cannot reschedule. So halt.
+ */
+ native_safe_halt();
+ local_irq_disable();
+ }
}
- finish_wait(&n.wq, &wait);
+ if (n.cpu < 0)
+ finish_wait(&n.wq, &wait);
return;
}
@@ -146,7 +167,9 @@ again:
hlist_add_head(&n->link, &b->list);
} else {
hlist_del_init(&n->link);
- if (waitqueue_active(&n->wq))
+ if (n->cpu >= 0)
+ smp_send_reschedule(n->cpu);
+ else if (waitqueue_active(&n->wq))
wake_up(&n->wq);
}
spin_unlock(&b->lock);