@@ -87,6 +87,7 @@ struct kvm {
struct kvm_config cfg;
int sys_fd; /* For system ioctls(), i.e. /dev/kvm */
int vm_fd; /* For VM ioctls() */
+ int ram_fd; /* For guest memory. */
timer_t timerid; /* Posix timer for interrupts */
int nrcpus; /* Number of cpus to run */
@@ -140,6 +140,9 @@ static inline int pow2_size(unsigned long x)
}
struct kvm;
+int memfd_alloc(u64 size, bool hugetlb, u64 blk_size);
+void *mmap_anon_or_hugetlbfs_align(struct kvm *kvm, const char *hugetlbfs_path,
+ u64 size, u64 align);
void *mmap_anon_or_hugetlbfs(struct kvm *kvm, const char *hugetlbfs_path, u64 size);
#endif /* KVM__UTIL_H */
@@ -160,6 +160,7 @@ struct kvm *kvm__new(void)
mutex_init(&kvm->mem_banks_lock);
kvm->sys_fd = -1;
kvm->vm_fd = -1;
+ kvm->ram_fd = -1;
#ifdef KVM_BRLOCK_DEBUG
kvm->brlock_sem = (pthread_rwlock_t) PTHREAD_RWLOCK_INITIALIZER;
@@ -174,6 +175,9 @@ int kvm__exit(struct kvm *kvm)
kvm__arch_delete_ram(kvm);
+ if (kvm->ram_fd >= 0)
+ close(kvm->ram_fd);
+
list_for_each_entry_safe(bank, tmp, &kvm->mem_banks, list) {
list_del(&bank->list);
free(bank);
@@ -95,35 +95,36 @@ static u64 get_hugepage_blk_size(const char *hugetlbfs_path)
return sfs.f_bsize;
}
-static void *mmap_hugetlbfs(struct kvm *kvm, const char *hugetlbfs_path, u64 size, u64 blk_size)
+int memfd_alloc(u64 size, bool hugetlb, u64 blk_size)
{
const char *name = "kvmtool";
unsigned int flags = 0;
int fd;
- void *addr;
- if (!is_power_of_two(blk_size))
- die("Hugepage size must be a power of 2");
+ if (hugetlb) {
+ if (!is_power_of_two(blk_size))
+ die("Hugepage size must be a power of 2");
- flags |= MFD_HUGETLB;
- flags |= blk_size << MFD_HUGE_SHIFT;
+ flags |= MFD_HUGETLB;
+ flags |= blk_size << MFD_HUGE_SHIFT;
+ }
fd = memfd_create(name, flags);
if (fd < 0)
- die_perror("Can't memfd_create for hugetlbfs map");
+ die_perror("Can't memfd_create for memory map");
+
if (ftruncate(fd, size) < 0)
die("Can't ftruncate for mem mapping size %lld",
(unsigned long long)size);
- addr = mmap(NULL, size, PROT_RW, MAP_PRIVATE, fd, 0);
- close(fd);
- return addr;
+ return fd;
}
/* This function wraps the decision between hugetlbfs map (if requested) or normal mmap */
void *mmap_anon_or_hugetlbfs(struct kvm *kvm, const char *hugetlbfs_path, u64 size)
{
u64 blk_size = 0;
+ int fd;
/*
* We don't /need/ to map guest RAM from hugetlbfs, but we do so
@@ -138,9 +139,14 @@ void *mmap_anon_or_hugetlbfs(struct kvm *kvm, const char *hugetlbfs_path, u64 si
}
kvm->ram_pagesize = blk_size;
- return mmap_hugetlbfs(kvm, hugetlbfs_path, size, blk_size);
} else {
kvm->ram_pagesize = getpagesize();
- return mmap(NULL, size, PROT_RW, MAP_ANON_NORESERVE, -1, 0);
}
+
+ fd = memfd_alloc(size, hugetlbfs_path, blk_size);
+ if (fd < 0)
+ return MAP_FAILED;
+
+ kvm->ram_fd = fd;
+ return mmap(NULL, size, PROT_RW, MAP_PRIVATE, kvm->ram_fd, 0);
}
Allocate all guest ram backed by memfd/ftruncate instead of anonymous mmap. This will make it easier to use kvm with fd-based kvm guest memory proposals [*]. It also would make it easier to use ipc memory sharing should that be needed in the future. Signed-off-by: Fuad Tabba <tabba@google.com> [*] https://lore.kernel.org/all/20221202061347.1070246-1-chao.p.peng@linux.intel.com/ --- include/kvm/kvm.h | 1 + include/kvm/util.h | 3 +++ kvm.c | 4 ++++ util/util.c | 30 ++++++++++++++++++------------ 4 files changed, 26 insertions(+), 12 deletions(-)