@@ -119,3 +119,23 @@ void kvm_isolation_uninit(void)
kvm_isolation_uninit_mm();
pr_info("KVM: x86: End of isolated address space\n");
}
+
+void kvm_isolation_enter(void)
+{
+ if (address_space_isolation) {
+ /*
+ * Switches to kvm_mm should happen from vCPU thread,
+ * which should not be a kernel thread with no mm
+ */
+ BUG_ON(current->active_mm == NULL);
+ /* TODO: switch to kvm_mm */
+ }
+}
+
+void kvm_isolation_exit(void)
+{
+ if (address_space_isolation) {
+ /* TODO: Kick sibling hyperthread before switch to host mm */
+ /* TODO: switch back to original mm */
+ }
+}
@@ -4,5 +4,7 @@
extern int kvm_isolation_init(void);
extern void kvm_isolation_uninit(void);
+extern void kvm_isolation_enter(void);
+extern void kvm_isolation_exit(void);
#endif
@@ -7896,6 +7896,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
goto cancel_injection;
}
+ kvm_isolation_enter();
+
if (req_immediate_exit) {
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_x86_ops->request_immediate_exit(vcpu);
@@ -7946,6 +7948,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
+ /*
+ * TODO: Move this to where we architectually need to access
+ * host (or other VM) sensitive data
+ */
+ kvm_isolation_exit();
+
vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();