@@ -37,17 +37,17 @@ void kvm__init_ram(struct kvm *kvm)
* 2M trumps 64K, so let's go with that.
*/
kvm->ram_size = kvm->cfg.ram_size;
- kvm->arch.ram_alloc_size = kvm->ram_size + SZ_2M;
- kvm->arch.ram_alloc_start = mmap_anon_or_hugetlbfs(kvm,
+ kvm->arch.ram_alloc_size = kvm->ram_size;
+ kvm->arch.ram_alloc_start = mmap_anon_or_hugetlbfs_align(kvm,
kvm->cfg.hugetlbfs_path,
- kvm->arch.ram_alloc_size);
+ kvm->arch.ram_alloc_size,
+ SZ_2M);
if (kvm->arch.ram_alloc_start == MAP_FAILED)
die("Failed to map %lld bytes for guest memory (%d)",
kvm->arch.ram_alloc_size, errno);
- kvm->ram_start = (void *)ALIGN((unsigned long)kvm->arch.ram_alloc_start,
- SZ_2M);
+ kvm->ram_start = kvm->arch.ram_alloc_start;
madvise(kvm->arch.ram_alloc_start, kvm->arch.ram_alloc_size,
MADV_MERGEABLE);
@@ -70,17 +70,17 @@ void kvm__arch_init(struct kvm *kvm)
* 2M trumps 64K, so let's go with that.
*/
kvm->ram_size = min(kvm->cfg.ram_size, (u64)RISCV_MAX_MEMORY(kvm));
- kvm->arch.ram_alloc_size = kvm->ram_size + SZ_2M;
- kvm->arch.ram_alloc_start = mmap_anon_or_hugetlbfs(kvm,
+ kvm->arch.ram_alloc_size = kvm->ram_size;
+ kvm->arch.ram_alloc_start = mmap_anon_or_hugetlbfs_align(kvm,
kvm->cfg.hugetlbfs_path,
- kvm->arch.ram_alloc_size);
+ kvm->arch.ram_alloc_size,
+ SZ_2M);
if (kvm->arch.ram_alloc_start == MAP_FAILED)
die("Failed to map %lld bytes for guest memory (%d)",
kvm->arch.ram_alloc_size, errno);
- kvm->ram_start = (void *)ALIGN((unsigned long)kvm->arch.ram_alloc_start,
- SZ_2M);
+ kvm->ram_start = kvm->arch.ram_alloc_start;
madvise(kvm->arch.ram_alloc_start, kvm->arch.ram_alloc_size,
MADV_MERGEABLE);
Use the new mmap_anon_or_hugetlbfs_align() to allocate memory aligned as needed instead of doing it at the caller while allocating and mapping more than needed. Signed-off-by: Fuad Tabba <tabba@google.com> --- arm/kvm.c | 10 +++++----- riscv/kvm.c | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-)