@@ -1593,13 +1593,45 @@ static void kvm_isolation_clear_handlers(void)
kvm_page_fault_handler = NULL;
}
+void kvm_isolation_check_memslots(struct kvm *kvm)
+{
+ struct kvm_range_mapping *rmapping;
+ int i, err;
+
+ if (!kvm_isolation())
+ return;
+
+ for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+ rmapping = kvm_get_range_mapping(kvm->memslots[i], NULL);
+ if (rmapping)
+ continue;
+ pr_debug("remapping kvm memslots[%d]\n", i);
+ err = kvm_copy_ptes(kvm->memslots[i],
+ sizeof(struct kvm_memslots));
+ if (err)
+ pr_debug("failed to map kvm memslots[%d]\n", i);
+ }
+
+}
+
int kvm_isolation_init_vm(struct kvm *kvm)
{
+ int err, i;
+
if (!kvm_isolation())
return 0;
kvm_clear_page_fault();
+ pr_debug("mapping kvm memslots\n");
+
+ for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+ err = kvm_copy_ptes(kvm->memslots[i],
+ sizeof(struct kvm_memslots));
+ if (err)
+ return err;
+ }
+
pr_debug("mapping kvm srcu sda\n");
return (kvm_copy_percpu_mapping(kvm->srcu.sda,
@@ -1608,9 +1640,16 @@ int kvm_isolation_init_vm(struct kvm *kvm)
void kvm_isolation_destroy_vm(struct kvm *kvm)
{
+ int i;
+
if (!kvm_isolation())
return;
+ pr_debug("unmapping kvm memslots\n");
+
+ for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
+ kvm_clear_range_mapping(kvm->memslots[i]);
+
pr_debug("unmapping kvm srcu sda\n");
kvm_clear_percpu_mapping(kvm->srcu.sda);
@@ -32,6 +32,7 @@ static inline bool kvm_isolation(void)
extern void kvm_clear_range_mapping(void *ptr);
extern int kvm_copy_percpu_mapping(void *percpu_ptr, size_t size);
extern void kvm_clear_percpu_mapping(void *percpu_ptr);
+extern void kvm_isolation_check_memslots(struct kvm *kvm);
extern int kvm_add_task_mapping(struct task_struct *tsk);
extern void kvm_cleanup_task_mapping(struct task_struct *tsk);
@@ -9438,6 +9438,7 @@ void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
* mmio generation may have reached its maximum value.
*/
kvm_mmu_invalidate_mmio_sptes(kvm, gen);
+ kvm_isolation_check_memslots(kvm);
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
@@ -9537,6 +9538,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
*/
if (change != KVM_MR_DELETE)
kvm_mmu_slot_apply_flags(kvm, (struct kvm_memory_slot *) new);
+
+ kvm_isolation_check_memslots(kvm);
}
void kvm_arch_flush_shadow_all(struct kvm *kvm)
KVM memslots can change after they have been created so new memslots have to be mapped when they are created. TODO: we currently don't unmapped old memslots, they should be unmapped when they are freed. Signed-off-by: Alexandre Chartre <alexandre.chartre@oracle.com> --- arch/x86/kvm/isolation.c | 39 +++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/isolation.h | 1 + arch/x86/kvm/x86.c | 3 +++ 3 files changed, 43 insertions(+), 0 deletions(-)