@@ -1614,6 +1614,29 @@ void kvm_isolation_check_memslots(struct kvm *kvm)
}
+void kvm_isolation_check_buses(struct kvm *kvm)
+{
+ struct kvm_range_mapping *rmapping;
+ struct kvm_io_bus *bus;
+ int i, err;
+
+ if (!kvm_isolation())
+ return;
+
+ for (i = 0; i < KVM_NR_BUSES; i++) {
+ bus = kvm->buses[i];
+ rmapping = kvm_get_range_mapping(bus, NULL);
+ if (rmapping)
+ continue;
+ pr_debug("remapping kvm buses[%d]\n", i);
+ err = kvm_copy_ptes(bus, sizeof(*bus) + bus->dev_count *
+ sizeof(struct kvm_io_range));
+ if (err)
+ pr_debug("failed to map kvm buses[%d]\n", i);
+ }
+
+}
+
int kvm_isolation_init_vm(struct kvm *kvm)
{
int err, i;
@@ -1632,6 +1655,15 @@ int kvm_isolation_init_vm(struct kvm *kvm)
return err;
}
+ pr_debug("mapping kvm buses\n");
+
+ for (i = 0; i < KVM_NR_BUSES; i++) {
+ err = kvm_copy_ptes(kvm->buses[i],
+ sizeof(struct kvm_io_bus));
+ if (err)
+ return err;
+ }
+
pr_debug("mapping kvm srcu sda\n");
return (kvm_copy_percpu_mapping(kvm->srcu.sda,
@@ -1650,6 +1682,11 @@ void kvm_isolation_destroy_vm(struct kvm *kvm)
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
kvm_clear_range_mapping(kvm->memslots[i]);
+ pr_debug("unmapping kvm buses\n");
+
+ for (i = 0; i < KVM_NR_BUSES; i++)
+ kvm_clear_range_mapping(kvm->buses[i]);
+
pr_debug("unmapping kvm srcu sda\n");
kvm_clear_percpu_mapping(kvm->srcu.sda);
@@ -33,6 +33,7 @@ static inline bool kvm_isolation(void)
extern int kvm_copy_percpu_mapping(void *percpu_ptr, size_t size);
extern void kvm_clear_percpu_mapping(void *percpu_ptr);
extern void kvm_isolation_check_memslots(struct kvm *kvm);
+extern void kvm_isolation_check_buses(struct kvm *kvm);
extern int kvm_add_task_mapping(struct task_struct *tsk);
extern void kvm_cleanup_task_mapping(struct task_struct *tsk);
@@ -9253,6 +9253,13 @@ void kvm_arch_sync_events(struct kvm *kvm)
cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
kvm_free_pit(kvm);
+ /*
+ * Note that kvm_isolation_destroy_vm() has to be called from
+ * here, and not from kvm_arch_destroy_vm() because it will unmap
+ * buses which are already destroyed when kvm_arch_destroy_vm()
+ * is invoked.
+ */
+ kvm_isolation_destroy_vm(kvm);
}
int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
@@ -9331,7 +9338,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0);
x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
}
- kvm_isolation_destroy_vm(kvm);
if (kvm_x86_ops->vm_destroy)
kvm_x86_ops->vm_destroy(kvm);
kvm_pic_destroy(kvm);
@@ -9909,6 +9915,11 @@ bool kvm_vector_hashing_enabled(void)
}
EXPORT_SYMBOL_GPL(kvm_vector_hashing_enabled);
+void kvm_arch_buses_updated(struct kvm *kvm, struct kvm_io_bus *bus)
+{
+ kvm_isolation_check_buses(kvm);
+}
+
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
@@ -199,6 +199,7 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
struct kvm_io_device *dev);
struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
gpa_t addr);
+void kvm_arch_buses_updated(struct kvm *kvm, struct kvm_io_bus *bus);
#ifdef CONFIG_KVM_ASYNC_PF
struct kvm_async_pf {
@@ -3749,6 +3749,8 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
synchronize_srcu_expedited(&kvm->srcu);
kfree(bus);
+ kvm_arch_buses_updated(kvm, new_bus);
+
return 0;
}
KVM buses can change after they have been created so new buses have to be mapped when they are created. Signed-off-by: Alexandre Chartre <alexandre.chartre@oracle.com> --- arch/x86/kvm/isolation.c | 37 +++++++++++++++++++++++++++++++++++++ arch/x86/kvm/isolation.h | 1 + arch/x86/kvm/x86.c | 13 ++++++++++++- include/linux/kvm_host.h | 1 + virt/kvm/kvm_main.c | 2 ++ 5 files changed, 53 insertions(+), 1 deletions(-)