@@ -61,6 +61,20 @@ struct pgt_directory_group {
#define PGTD_ALIGN(entry) \
((typeof(entry))(((unsigned long)(entry)) & PAGE_MASK))
+/*
+ * Variables to keep track of address ranges mapped into the KVM
+ * address space.
+ */
+struct kvm_range_mapping {
+ struct list_head list;
+ void *ptr;
+ size_t size;
+ enum page_table_level level;
+};
+
+static LIST_HEAD(kvm_range_mapping_list);
+static DEFINE_MUTEX(kvm_range_mapping_lock);
+
struct mm_struct kvm_mm = {
.mm_rb = RB_ROOT,
@@ -91,6 +105,52 @@ struct mm_struct kvm_mm = {
static bool __read_mostly address_space_isolation;
module_param(address_space_isolation, bool, 0444);
+static struct kvm_range_mapping *kvm_get_range_mapping_locked(void *ptr,
+ bool *subset)
+{
+ struct kvm_range_mapping *range;
+
+ list_for_each_entry(range, &kvm_range_mapping_list, list) {
+ if (range->ptr == ptr) {
+ if (subset)
+ *subset = false;
+ return range;
+ }
+ if (ptr > range->ptr && ptr < range->ptr + range->size) {
+ if (subset)
+ *subset = true;
+ return range;
+ }
+ }
+
+ return NULL;
+}
+
+static struct kvm_range_mapping *kvm_get_range_mapping(void *ptr, bool *subset)
+{
+ struct kvm_range_mapping *range;
+
+ mutex_lock(&kvm_range_mapping_lock);
+ range = kvm_get_range_mapping_locked(ptr, subset);
+ mutex_unlock(&kvm_range_mapping_lock);
+
+ return range;
+}
+
+static void kvm_free_all_range_mapping(void)
+{
+ struct kvm_range_mapping *range, *range_next;
+
+ mutex_lock(&kvm_range_mapping_lock);
+
+ list_for_each_entry_safe(range, range_next,
+ &kvm_range_mapping_list, list) {
+ list_del(&range->list);
+ kfree(range);
+ }
+
+ mutex_unlock(&kvm_range_mapping_lock);
+}
static struct pgt_directory_group *pgt_directory_group_create(void)
{
@@ -661,10 +721,30 @@ static int kvm_copy_mapping(void *ptr, size_t size, enum page_table_level level)
{
unsigned long addr = (unsigned long)ptr;
unsigned long end = addr + ((unsigned long)size);
+ struct kvm_range_mapping *range_mapping;
+ bool subset;
+ int err;
BUG_ON(current->mm == &kvm_mm);
- pr_debug("KERNMAP COPY addr=%px size=%lx\n", ptr, size);
- return kvm_copy_pgd_range(&kvm_mm, current->mm, addr, end, level);
+ pr_debug("KERNMAP COPY addr=%px size=%lx level=%d\n", ptr, size, level);
+
+ range_mapping = kmalloc(sizeof(struct kvm_range_mapping), GFP_KERNEL);
+ if (!range_mapping)
+ return -ENOMEM;
+
+ err = kvm_copy_pgd_range(&kvm_mm, current->mm, addr, end, level);
+ if (err) {
+ kfree(range_mapping);
+ return err;
+ }
+
+ INIT_LIST_HEAD(&range_mapping->list);
+ range_mapping->ptr = ptr;
+ range_mapping->size = size;
+ range_mapping->level = level;
+ list_add(&range_mapping->list, &kvm_range_mapping_list);
+
+ return 0;
}
@@ -720,6 +800,8 @@ static void kvm_isolation_uninit_mm(void)
destroy_context(&kvm_mm);
+ kvm_free_all_range_mapping();
+
#ifdef CONFIG_PAGE_TABLE_ISOLATION
/*
* With PTI, the KVM address space is defined in the user
This will be used when we have to clear mappings to ensure the same range is cleared at the same page table level it was copied. Signed-off-by: Alexandre Chartre <alexandre.chartre@oracle.com> --- arch/x86/kvm/isolation.c | 86 ++++++++++++++++++++++++++++++++++++++++++++- 1 files changed, 84 insertions(+), 2 deletions(-)