===================================================================
@@ -1,4 +1,4 @@
-BEGIN { split("INIT_WORK on_each_cpu smp_call_function " \
+BEGIN { split("INIT_WORK on_each_cpu smp_call_function smp_send_reschedule " \
"hrtimer_add_expires_ns hrtimer_get_expires " \
"hrtimer_get_expires_ns hrtimer_start_expires " \
"hrtimer_expires_remaining " \
===================================================================
@@ -1,7 +1,7 @@
BEGIN { split("INIT_WORK desc_struct ldttss_desc64 desc_ptr " \
"hrtimer_add_expires_ns hrtimer_get_expires " \
"hrtimer_get_expires_ns hrtimer_start_expires " \
- "hrtimer_expires_remaining " \
+ "hrtimer_expires_remaining smp_send_reschedule " \
"on_each_cpu relay_open request_irq free_irq" , compat_apis); }
/^int kvm_init\(/ { anon_inodes = 1 }
===================================================================
@@ -224,6 +224,10 @@ int kvm_smp_call_function_mask(cpumask_t
#define smp_call_function_mask kvm_smp_call_function_mask
+void kvm_smp_send_reschedule(int cpu);
+
+#define smp_send_reschedule kvm_smp_send_reschedule
+
#endif
/* empty_zero_page isn't exported in all kernels */
===================================================================
@@ -221,6 +221,58 @@ out:
return 0;
}
+#include <linux/workqueue.h>
+
+static void vcpu_kick_intr(void *info)
+{
+}
+
+struct kvm_kick {
+ int cpu;
+ struct work_struct work;
+};
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+static void kvm_do_smp_call_function(void *data)
+{
+ int me;
+ struct kvm_kick *kvm_kick = data;
+#else
+static void kvm_do_smp_call_function(struct work_struct *work)
+{
+ int me;
+ struct kvm_kick *kvm_kick = container_of(work, struct kvm_kick, work);
+#endif
+ me = get_cpu();
+
+ if (kvm_kick->cpu != me)
+ smp_call_function_single(kvm_kick->cpu, vcpu_kick_intr,
+ NULL, 0);
+ kfree(kvm_kick);
+ put_cpu();
+}
+
+void kvm_queue_smp_call_function(int cpu)
+{
+ struct kvm_kick *kvm_kick = kmalloc(sizeof(struct kvm_kick), GFP_ATOMIC);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+ INIT_WORK(&kvm_kick->work, kvm_do_smp_call_function, kvm_kick);
+#else
+ INIT_WORK(&kvm_kick->work, kvm_do_smp_call_function);
+#endif
+
+ schedule_work(&kvm_kick->work);
+}
+
+void kvm_smp_send_reschedule(int cpu)
+{
+ if (irqs_disabled()) {
+ kvm_queue_smp_call_function(cpu);
+ return;
+ }
+ smp_call_function_single(cpu, vcpu_kick_intr, NULL, 0);
+}
#endif
/* manually export hrtimer_init/start/cancel */
smp_send_reschedule was exported (via smp_ops) in v2.6.24. Create a compat function which schedules the IPI to keventd context, in case interrupts are disabled, for kernels < 2.6.24. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html