@@ -16,7 +16,7 @@ kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o
kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \
i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \
- hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o \
+ hyperv.o debugfs.o memfd.o mmu/mmu.o mmu/page_track.o \
mmu/spte.o
kvm-$(CONFIG_X86_64) += mmu/tdp_iter.o mmu/tdp_mmu.o
kvm-$(CONFIG_KVM_XEN) += xen.o
new file mode 100644
@@ -0,0 +1,63 @@
+
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * memfd.c: routines for fd based memory backing store
+ * Copyright (c) 2021, Intel Corporation.
+ *
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/memfd.h>
+const static struct guest_mem_ops *memfd_ops;
+
+static void test_guest_invalidate_page_range(struct inode *inode, void *owner,
+ pgoff_t start, pgoff_t end)
+{
+ //!!!We can get here after the owner no longer exists
+}
+
+static const struct guest_ops guest_ops = {
+ .invalidate_page_range = test_guest_invalidate_page_range,
+};
+
+static unsigned long memfd_get_lock_pfn(const struct kvm_memory_slot *slot,
+ gfn_t gfn, int *page_level)
+{
+ pgoff_t index = gfn - slot->base_gfn +
+ (slot->userspace_addr >> PAGE_SHIFT);
+
+ return memfd_ops->get_lock_pfn(slot->file->f_inode, index, page_level);
+}
+
+static void memfd_put_unlock_pfn(unsigned long pfn)
+{
+ memfd_ops->put_unlock_pfn(pfn);
+}
+
+static struct kvm_private_memory_ops memfd_private_ops = {
+ .get_lock_pfn = memfd_get_lock_pfn,
+ .put_unlock_pfn = memfd_put_unlock_pfn,
+};
+
+int kvm_register_private_memslot(struct kvm *kvm,
+ const struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot *slot)
+{
+ struct fd memfd = fdget(mem->fd);
+
+ if(!memfd.file)
+ return -EINVAL;
+
+ slot->file = memfd.file;
+ slot->private_ops = &memfd_private_ops;
+
+ memfd_register_guest(slot->file->f_inode, kvm, &guest_ops, &memfd_ops);
+ return 0;
+}
+
+void kvm_unregister_private_memslot(struct kvm *kvm,
+ const struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot *slot)
+{
+ fput(slot->file);
+}
@@ -777,6 +777,12 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change);
+int kvm_register_private_memslot(struct kvm *kvm,
+ const struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot *slot);
+void kvm_unregister_private_memslot(struct kvm *kvm,
+ const struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot *slot);
/* flush all memory translations */
void kvm_arch_flush_shadow_all(struct kvm *kvm);
/* flush memory translations pointing to 'slot' */
@@ -1250,7 +1250,19 @@ static int kvm_set_memslot(struct kvm *kvm,
kvm_arch_flush_shadow_memslot(kvm, slot);
}
+#ifdef KVM_PRIVATE_ADDRESS_SPACE
+ if (change == KVM_MR_CREATE && as_id == KVM_PRIVATE_ADDRESS_SPACE) {
+ r = kvm_register_private_memslot(kvm, mem, new);
+ if (r)
+ goto out_slots;
+ }
+#endif
+
r = kvm_arch_prepare_memory_region(kvm, new, mem, change);
+#ifdef KVM_PRIVATE_ADDRESS_SPACE
+ if ((r || change == KVM_MR_DELETE) && as_id == KVM_PRIVATE_ADDRESS_SPACE)
+ kvm_unregister_private_memslot(kvm, mem, new);
+#endif
if (r)
goto out_slots;
@@ -1324,10 +1336,15 @@ int __kvm_set_memory_region(struct kvm *kvm,
return -EINVAL;
if (mem->guest_phys_addr & (PAGE_SIZE - 1))
return -EINVAL;
- /* We can read the guest memory with __xxx_user() later on. */
if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
- (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
- !access_ok((void __user *)(unsigned long)mem->userspace_addr,
+ (mem->userspace_addr != untagged_addr(mem->userspace_addr)))
+ return -EINVAL;
+ /* We can read the guest memory with __xxx_user() later on. */
+ if (
+#ifdef KVM_PRIVATE_ADDRESS_SPACE
+ as_id != KVM_PRIVATE_ADDRESS_SPACE &&
+#endif
+ !access_ok((void __user *)(unsigned long)mem->userspace_addr,
mem->memory_size))
return -EINVAL;
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
@@ -1368,6 +1385,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
new.dirty_bitmap = NULL;
memset(&new.arch, 0, sizeof(new.arch));
} else { /* Modify an existing slot. */
+#ifdef KVM_PRIVATE_ADDRESS_SPACE
+ /* Private memslots are immutable, they can only be deleted. */
+ if (as_id == KVM_PRIVATE_ADDRESS_SPACE)
+ return -EINVAL;
+#endif
+
if ((new.userspace_addr != old.userspace_addr) ||
(new.npages != old.npages) ||
((new.flags ^ old.flags) & KVM_MEM_READONLY))