@@ -32,3 +32,66 @@
#define KVM_MAGIC_PAGE (-4096L)
#define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
+static bool kvm_patching_worked = true;
+
+static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
+{
+ *inst = new_inst;
+ flush_icache_range((ulong)inst, (ulong)inst + 4);
+}
+
+static void kvm_map_magic_page(void *data)
+{
+ kvm_hypercall2(KVM_HC_PPC_MAP_MAGIC_PAGE,
+ KVM_MAGIC_PAGE, /* Physical Address */
+ KVM_MAGIC_PAGE); /* Effective Address */
+}
+
+static void kvm_check_ins(u32 *inst)
+{
+ u32 _inst = *inst;
+ u32 inst_no_rt = _inst & ~KVM_MASK_RT;
+ u32 inst_rt = _inst & KVM_MASK_RT;
+
+ switch (inst_no_rt) {
+ }
+
+ switch (_inst) {
+ }
+}
+
+static void kvm_use_magic_page(void)
+{
+ u32 *p;
+ u32 *start, *end;
+
+ /* Tell the host to map the magic page to -4096 on all CPUs */
+
+ on_each_cpu(kvm_map_magic_page, NULL, 1);
+
+ /* Now loop through all code and find instructions */
+
+ start = (void*)_stext;
+ end = (void*)_etext;
+
+ for (p = start; p < end; p++)
+ kvm_check_ins(p);
+}
+
+static int __init kvm_guest_init(void)
+{
+ char *p;
+
+ if (!kvm_para_available())
+ return 0;
+
+ if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
+ kvm_use_magic_page();
+
+ printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
+ kvm_patching_worked ? "worked" : "failed");
+
+ return 0;
+}
+
+postcore_initcall(kvm_guest_init);