@@ -4536,6 +4536,31 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
}
EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
+/*
+ * Force vcpu to hang when it is trying to access the specified page.
+ *
+ * kvm_vcpus_hang_on_page_start and kvm_vcpus_hang_on_page_end should
+ * be used in pairs and they are currently used to sync memory access
+ * between vcpus when host cross-modifies the code segment of guest.
+ *
+ * We unmap the page from the guest and do memory synchronization by
+ * kvm_flush_remote_tlbs() under the protection of mmu-lock. If vcpu
+ * accesses the page, it will trigger #PF and be blocked on mmu-lock.
+ */
+void kvm_vcpus_hang_on_page_start(struct kvm *kvm, gfn_t gfn)
+{
+ spin_lock(&kvm->mmu_lock);
+
+ /* kvm_flush_remote_tlbs() can act as serializing instruction. */
+ if (kvm_unmap_hva(kvm, gfn_to_hva(kvm, gfn)))
+ kvm_flush_remote_tlbs(kvm);
+}
+
+void kvm_vcpus_hang_on_page_end(struct kvm *kvm)
+{
+ spin_unlock(&kvm->mmu_lock);
+}
+
void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
{
ASSERT(vcpu);
@@ -115,4 +115,7 @@ static inline bool permission_fault(struct kvm_mmu *mmu, unsigned pte_access,
}
void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
+
+void kvm_vcpus_hang_on_page_start(struct kvm *kvm, gfn_t gfn);
+void kvm_vcpus_hang_on_page_end(struct kvm *kvm);
#endif
@@ -5528,8 +5528,15 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
char instruction[3];
unsigned long rip = kvm_rip_read(vcpu);
+ gpa_t gpa;
+
+ gpa = kvm_mmu_gva_to_gpa_fetch(vcpu, rip, NULL);
+ if (gpa == UNMAPPED_GVA)
+ return X86EMUL_PROPAGATE_FAULT;
+ kvm_vcpus_hang_on_page_start(vcpu->kvm, gpa_to_gfn(gpa));
kvm_x86_ops->patch_hypercall(vcpu, instruction);
+ kvm_vcpus_hang_on_page_end(vcpu->kvm);
return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
}