diff mbox series

[v2,04/10] KVM: selftests: compute correct demand paging size

Message ID 20210519200339.829146-5-axelrasmussen@google.com (mailing list archive)
State New
Headers show
Series KVM: selftests: exercise userfaultfd minor faults | expand

Commit Message

Axel Rasmussen May 19, 2021, 8:03 p.m. UTC
This is a preparatory commit needed before we can use different kinds of
backing pages for guest memory.

Previously, we used perf_test_args.host_page_size, which is the host's
native page size (commonly 4K). For VM_MEM_SRC_ANONYMOUS this turns out
to be okay, but in a follow-up commit we want to allow using different
kinds of backing memory.

Take VM_MEM_SRC_ANONYMOUS_HUGETLB for example. Without this change, if
we used that backing page type, when we issued a UFFDIO_COPY ioctl we'd
only do so with 4K, rather than the full 2M of a backing hugepage. In
this case, UFFDIO_COPY returns -EINVAL (__mcopy_atomic_hugetlb checks
the size).

Signed-off-by: Axel Rasmussen <axelrasmussen@google.com>
---
 tools/testing/selftests/kvm/demand_paging_test.c | 11 +++++++----
 1 file changed, 7 insertions(+), 4 deletions(-)

Comments

Ben Gardon May 19, 2021, 9:51 p.m. UTC | #1
On Wed, May 19, 2021 at 1:03 PM Axel Rasmussen <axelrasmussen@google.com> wrote:
>
> This is a preparatory commit needed before we can use different kinds of
> backing pages for guest memory.
>
> Previously, we used perf_test_args.host_page_size, which is the host's
> native page size (commonly 4K). For VM_MEM_SRC_ANONYMOUS this turns out
> to be okay, but in a follow-up commit we want to allow using different
> kinds of backing memory.
>
> Take VM_MEM_SRC_ANONYMOUS_HUGETLB for example. Without this change, if
> we used that backing page type, when we issued a UFFDIO_COPY ioctl we'd
> only do so with 4K, rather than the full 2M of a backing hugepage. In
> this case, UFFDIO_COPY returns -EINVAL (__mcopy_atomic_hugetlb checks
> the size).
>
> Signed-off-by: Axel Rasmussen <axelrasmussen@google.com>

Reviewed-by: Ben Gardon <bgardon@google.com>

> ---
>  tools/testing/selftests/kvm/demand_paging_test.c | 11 +++++++----
>  1 file changed, 7 insertions(+), 4 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c
> index 601a1df24dd2..94cf047358d5 100644
> --- a/tools/testing/selftests/kvm/demand_paging_test.c
> +++ b/tools/testing/selftests/kvm/demand_paging_test.c
> @@ -40,6 +40,7 @@
>
>  static int nr_vcpus = 1;
>  static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
> +static size_t demand_paging_size;
>  static char *guest_data_prototype;
>
>  static void *vcpu_worker(void *data)
> @@ -85,7 +86,7 @@ static int handle_uffd_page_request(int uffd, uint64_t addr)
>
>         copy.src = (uint64_t)guest_data_prototype;
>         copy.dst = addr;
> -       copy.len = perf_test_args.host_page_size;
> +       copy.len = demand_paging_size;
>         copy.mode = 0;
>
>         clock_gettime(CLOCK_MONOTONIC, &start);
> @@ -102,7 +103,7 @@ static int handle_uffd_page_request(int uffd, uint64_t addr)
>         PER_PAGE_DEBUG("UFFDIO_COPY %d \t%ld ns\n", tid,
>                        timespec_to_ns(ts_diff));
>         PER_PAGE_DEBUG("Paged in %ld bytes at 0x%lx from thread %d\n",
> -                      perf_test_args.host_page_size, addr, tid);
> +                      demand_paging_size, addr, tid);
>
>         return 0;
>  }
> @@ -261,10 +262,12 @@ static void run_test(enum vm_guest_mode mode, void *arg)
>
>         perf_test_args.wr_fract = 1;
>
> -       guest_data_prototype = malloc(perf_test_args.host_page_size);
> +       demand_paging_size = get_backing_src_pagesz(VM_MEM_SRC_ANONYMOUS);
> +
> +       guest_data_prototype = malloc(demand_paging_size);
>         TEST_ASSERT(guest_data_prototype,
>                     "Failed to allocate buffer for guest data pattern");
> -       memset(guest_data_prototype, 0xAB, perf_test_args.host_page_size);
> +       memset(guest_data_prototype, 0xAB, demand_paging_size);
>
>         vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
>         TEST_ASSERT(vcpu_threads, "Memory allocation failed");
> --
> 2.31.1.751.gd2f1c929bd-goog
>
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c
index 601a1df24dd2..94cf047358d5 100644
--- a/tools/testing/selftests/kvm/demand_paging_test.c
+++ b/tools/testing/selftests/kvm/demand_paging_test.c
@@ -40,6 +40,7 @@ 
 
 static int nr_vcpus = 1;
 static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
+static size_t demand_paging_size;
 static char *guest_data_prototype;
 
 static void *vcpu_worker(void *data)
@@ -85,7 +86,7 @@  static int handle_uffd_page_request(int uffd, uint64_t addr)
 
 	copy.src = (uint64_t)guest_data_prototype;
 	copy.dst = addr;
-	copy.len = perf_test_args.host_page_size;
+	copy.len = demand_paging_size;
 	copy.mode = 0;
 
 	clock_gettime(CLOCK_MONOTONIC, &start);
@@ -102,7 +103,7 @@  static int handle_uffd_page_request(int uffd, uint64_t addr)
 	PER_PAGE_DEBUG("UFFDIO_COPY %d \t%ld ns\n", tid,
 		       timespec_to_ns(ts_diff));
 	PER_PAGE_DEBUG("Paged in %ld bytes at 0x%lx from thread %d\n",
-		       perf_test_args.host_page_size, addr, tid);
+		       demand_paging_size, addr, tid);
 
 	return 0;
 }
@@ -261,10 +262,12 @@  static void run_test(enum vm_guest_mode mode, void *arg)
 
 	perf_test_args.wr_fract = 1;
 
-	guest_data_prototype = malloc(perf_test_args.host_page_size);
+	demand_paging_size = get_backing_src_pagesz(VM_MEM_SRC_ANONYMOUS);
+
+	guest_data_prototype = malloc(demand_paging_size);
 	TEST_ASSERT(guest_data_prototype,
 		    "Failed to allocate buffer for guest data pattern");
-	memset(guest_data_prototype, 0xAB, perf_test_args.host_page_size);
+	memset(guest_data_prototype, 0xAB, demand_paging_size);
 
 	vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
 	TEST_ASSERT(vcpu_threads, "Memory allocation failed");