@@ -5,8 +5,6 @@
* Copyright (C) 2018, Red Hat, Inc.
*/
-#define _GNU_SOURCE /* for program_invocation_name */
-
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
@@ -25,6 +23,9 @@
#define VCPU_ID 1
+#define DIRTY_MEM_BITS 30 /* 1G */
+#define DIRTY_MEM_SIZE (1UL << DIRTY_MEM_BITS)
+
/* The memory slot index to track dirty pages */
#define TEST_MEM_SLOT_INDEX 1
@@ -651,27 +652,6 @@ static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long *bmap)
}
}
-static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
- uint64_t extra_mem_pages, void *guest_code)
-{
- struct kvm_vm *vm;
- uint64_t extra_pg_pages = extra_mem_pages / 512 * 2;
-
- pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
-
- vm = vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
- kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
-#ifdef __x86_64__
- vm_create_irqchip(vm);
-#endif
- log_mode_create_vm_done(vm);
- vm_vcpu_add_default(vm, vcpuid, guest_code);
- return vm;
-}
-
-#define DIRTY_MEM_BITS 30 /* 1G */
-#define PAGE_SHIFT_4K 12
-
struct test_params {
unsigned long iterations;
unsigned long interval;
@@ -690,43 +670,39 @@ static void run_test(enum vm_guest_mode mode, void *arg)
return;
}
+ pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
+
/*
* We reserve page table for 2 times of extra dirty mem which
- * will definitely cover the original (1G+) test range. Here
- * we do the calculation with 4K page size which is the
- * smallest so the page number will be enough for all archs
- * (e.g., 64K page size guest will need even less memory for
- * page tables).
+ * will definitely cover the original (1G+) test range.
*/
- vm = create_vm(mode, VCPU_ID,
- 2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K),
- guest_code);
+ vm = vm_create_with_vcpus(mode, 1,
+ vm_calc_num_guest_pages(mode, DIRTY_MEM_SIZE * 2),
+ 0, guest_code, (uint32_t []){ VCPU_ID });
+
+ log_mode_create_vm_done(vm);
guest_page_size = vm_get_page_size(vm);
+ host_page_size = getpagesize();
+
/*
* A little more than 1G of guest page sized pages. Cover the
* case where the size is not aligned to 64 pages.
*/
- guest_num_pages = (1ul << (DIRTY_MEM_BITS -
- vm_get_page_shift(vm))) + 3;
- guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
-
- host_page_size = getpagesize();
+ guest_num_pages = vm_adjust_num_guest_pages(mode,
+ (1ul << (DIRTY_MEM_BITS - vm_get_page_shift(vm))) + 3);
host_num_pages = vm_num_host_pages(mode, guest_num_pages);
if (!p->phys_offset) {
- guest_test_phys_mem = (vm_get_max_gfn(vm) -
- guest_num_pages) * guest_page_size;
+ guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) * guest_page_size;
guest_test_phys_mem &= ~(host_page_size - 1);
} else {
guest_test_phys_mem = p->phys_offset;
}
-
#ifdef __s390x__
/* Align to 1M (segment size) */
guest_test_phys_mem &= ~((1 << 20) - 1);
#endif
-
pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
bmap = bitmap_alloc(host_num_pages);