@@ -1222,6 +1222,23 @@ static void kvm_isolation_clear_handlers(void)
kvm_set_isolation_exit_handler(NULL);
}
+int kvm_isolation_init_vm(struct kvm *kvm)
+{
+ if (!kvm_isolation())
+ return 0;
+
+ return (kvm_copy_percpu_mapping(kvm->srcu.sda,
+ sizeof(struct srcu_data)));
+}
+
+void kvm_isolation_destroy_vm(struct kvm *kvm)
+{
+ if (!kvm_isolation())
+ return;
+
+ kvm_clear_percpu_mapping(kvm->srcu.sda);
+}
+
int kvm_isolation_init(void)
{
int r;
@@ -23,6 +23,8 @@ static inline bool kvm_isolation(void)
extern int kvm_isolation_init(void);
extern void kvm_isolation_uninit(void);
+extern int kvm_isolation_init_vm(struct kvm *kvm);
+extern void kvm_isolation_destroy_vm(struct kvm *kvm);
extern void kvm_isolation_enter(void);
extern void kvm_isolation_exit(void);
extern void kvm_may_access_sensitive_data(struct kvm_vcpu *vcpu);
@@ -6523,6 +6523,33 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx_complete_interrupts(vmx);
}
+static void vmx_unmap_vm(struct kvm *kvm)
+{
+ struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
+
+ if (!kvm_isolation())
+ return;
+
+ pr_debug("unmapping kvm %p", kvm_vmx);
+ kvm_clear_range_mapping(kvm_vmx);
+}
+
+static int vmx_map_vm(struct kvm *kvm)
+{
+ struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
+
+ if (!kvm_isolation())
+ return 0;
+
+ pr_debug("mapping kvm %p", kvm_vmx);
+ /*
+ * Only copy kvm_vmx struct mapping because other
+ * attributes (like kvm->srcu) are not initialized
+ * yet.
+ */
+ return kvm_copy_ptes(kvm_vmx, sizeof(struct kvm_vmx));
+}
+
static struct kvm *vmx_vm_alloc(void)
{
struct kvm_vmx *kvm_vmx = __vmalloc(sizeof(struct kvm_vmx),
@@ -6533,6 +6560,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
static void vmx_vm_free(struct kvm *kvm)
{
+ vmx_unmap_vm(kvm);
vfree(to_kvm_vmx(kvm));
}
@@ -6702,7 +6730,8 @@ static int vmx_vm_init(struct kvm *kvm)
break;
}
}
- return 0;
+
+ return (vmx_map_vm(kvm));
}
static void __init vmx_check_processor_compat(void *rtn)
@@ -9207,6 +9207,17 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
return 0;
}
+void kvm_arch_vm_postcreate(struct kvm *kvm)
+{
+ /*
+ * The kvm structure is mapped in vmx.c so that the full kvm_vmx
+ * structure can be mapped. Attributes allocated in the kvm
+ * structure (like kvm->srcu) are mapped by kvm_isolation_init_vm()
+ * because they are not initialized when vmx.c maps the kvm structure.
+ */
+ kvm_isolation_init_vm(kvm);
+}
+
static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
{
vcpu_load(vcpu);
@@ -9320,6 +9331,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0);
x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
}
+ kvm_isolation_destroy_vm(kvm);
if (kvm_x86_ops->vm_destroy)
kvm_x86_ops->vm_destroy(kvm);
kvm_pic_destroy(kvm);
@@ -932,6 +932,7 @@ static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
void kvm_arch_destroy_vm(struct kvm *kvm);
+void kvm_arch_vm_postcreate(struct kvm *kvm);
void kvm_arch_sync_events(struct kvm *kvm);
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
@@ -156,6 +156,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
return ret;
}
+void kvm_arch_vm_postcreate(struct kvm *kvm)
+{
+}
+
bool kvm_arch_has_vcpu_debugfs(void)
{
return false;
@@ -3366,7 +3366,7 @@ static int kvm_dev_ioctl_create_vm(unsigned long type)
return -ENOMEM;
}
kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
-
+ kvm_arch_vm_postcreate(kvm);
fd_install(r, file);
return r;
Map VM data, in particular the kvm structure data. Signed-off-by: Alexandre Chartre <alexandre.chartre@oracle.com> --- arch/x86/kvm/isolation.c | 17 +++++++++++++++++ arch/x86/kvm/isolation.h | 2 ++ arch/x86/kvm/vmx/vmx.c | 31 ++++++++++++++++++++++++++++++- arch/x86/kvm/x86.c | 12 ++++++++++++ include/linux/kvm_host.h | 1 + virt/kvm/arm/arm.c | 4 ++++ virt/kvm/kvm_main.c | 2 +- 7 files changed, 67 insertions(+), 2 deletions(-)