@@ -703,6 +703,8 @@ const char *exit_reason_str(unsigned int exit_reason);
vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
uint32_t memslot);
+vm_paddr_t vm_phy_pages_alloc_align(struct kvm_vm *vm, size_t num, size_t align,
+ vm_paddr_t paddr_min, uint32_t memslot);
vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
vm_paddr_t paddr_min, uint32_t memslot);
vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
@@ -1924,6 +1924,7 @@ const char *exit_reason_str(unsigned int exit_reason)
* Input Args:
* vm - Virtual Machine
* num - number of pages
+ * align - pages alignment
* paddr_min - Physical address minimum
* memslot - Memory region to allocate page from
*
@@ -1937,7 +1938,7 @@ const char *exit_reason_str(unsigned int exit_reason)
* and their base address is returned. A TEST_ASSERT failure occurs if
* not enough pages are available at or above paddr_min.
*/
-vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
+vm_paddr_t vm_phy_pages_alloc_align(struct kvm_vm *vm, size_t num, size_t align,
vm_paddr_t paddr_min, uint32_t memslot)
{
struct userspace_mem_region *region;
@@ -1951,24 +1952,27 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
paddr_min, vm->page_size);
region = memslot2region(vm, memslot);
- base = pg = paddr_min >> vm->page_shift;
-
- do {
- for (; pg < base + num; ++pg) {
- if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
- base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
- break;
+ base = paddr_min >> vm->page_shift;
+
+again:
+ base = (base + align - 1) & ~(align - 1);
+ for (pg = base; pg < base + num; ++pg) {
+ if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
+ base = sparsebit_next_set(region->unused_phy_pages, pg);
+ if (!base) {
+ fprintf(stderr, "No guest physical pages "
+ "available, paddr_min: 0x%lx "
+ "page_size: 0x%x memslot: %u "
+ "num_pages: %lu align: %lu\n",
+ paddr_min, vm->page_size, memslot,
+ num, align);
+ fputs("---- vm dump ----\n", stderr);
+ vm_dump(stderr, vm, 2);
+ TEST_ASSERT(false, "false");
+ abort();
}
+ goto again;
}
- } while (pg && pg != base + num);
-
- if (pg == 0) {
- fprintf(stderr, "No guest physical page available, "
- "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
- paddr_min, vm->page_size, memslot);
- fputs("---- vm dump ----\n", stderr);
- vm_dump(stderr, vm, 2);
- abort();
}
for (pg = base; pg < base + num; ++pg)
@@ -1977,6 +1981,12 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
return base * vm->page_size;
}
+vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
+ vm_paddr_t paddr_min, uint32_t memslot)
+{
+ return vm_phy_pages_alloc_align(vm, num, 1, paddr_min, memslot);
+}
+
vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
uint32_t memslot)
{
powerpc will require this to allocate MMU tables in guest memory that are larger than guest base page size. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> --- .../selftests/kvm/include/kvm_util_base.h | 2 + tools/testing/selftests/kvm/lib/kvm_util.c | 44 ++++++++++++------- 2 files changed, 29 insertions(+), 17 deletions(-)