diff mbox series

[03/13] fixup! KVM: selftests: Support multiple vCPUs in demand paging test

Message ID 20200214145920.30792-4-drjones@redhat.com (mailing list archive)
State New, archived
Headers show
Series KVM: selftests: Various fixes and cleanups | expand

Commit Message

Andrew Jones Feb. 14, 2020, 2:59 p.m. UTC
[guest_code() can't return, use GUEST_ASSERT(). Ensure the number
 of guests pages is compatible with the host.]
Signed-off-by: Andrew Jones <drjones@redhat.com>
---
 tools/testing/selftests/kvm/demand_paging_test.c | 11 ++++++++---
 1 file changed, 8 insertions(+), 3 deletions(-)

Comments

Ben Gardon Feb. 18, 2020, 5:39 p.m. UTC | #1
On Fri, Feb 14, 2020 at 6:59 AM Andrew Jones <drjones@redhat.com> wrote:
>
> [guest_code() can't return, use GUEST_ASSERT(). Ensure the number
>  of guests pages is compatible with the host.]
> Signed-off-by: Andrew Jones <drjones@redhat.com>
> ---
>  tools/testing/selftests/kvm/demand_paging_test.c | 11 ++++++++---
>  1 file changed, 8 insertions(+), 3 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c
> index ec8860b70129..2e6e3db8418a 100644
> --- a/tools/testing/selftests/kvm/demand_paging_test.c
> +++ b/tools/testing/selftests/kvm/demand_paging_test.c
> @@ -115,9 +115,8 @@ static void guest_code(uint32_t vcpu_id)
>         uint64_t pages;
>         int i;
>
> -       /* Return to signal error if vCPU args data structure is courrupt. */
> -       if (vcpu_args[vcpu_id].vcpu_id != vcpu_id)
> -               return;
> +       /* Make sure vCPU args data structure is not corrupt. */
> +       GUEST_ASSERT(vcpu_args[vcpu_id].vcpu_id == vcpu_id);
>
>         gva = vcpu_args[vcpu_id].gva;
>         pages = vcpu_args[vcpu_id].pages;
> @@ -186,6 +185,12 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, int vcpus,
>         pages += ((2 * vcpus * vcpu_memory_bytes) >> PAGE_SHIFT_4K) /
>                  PTES_PER_4K_PT;
>
> +       /*
> +        * If the host is uing 64K pages, then we need the number of 4K
s/uing/using

> +        * guest pages to be a multiple of 16.
> +        */
> +       pages += 16 - pages % 16;
> +
Could we use some derivative of getpagesize() here instead?
e.g.
ASSERT(getpagesize() >= (1 << PAGE_SHIFT_4K));
ASSERT(!(getpagesize() % (1 << PAGE_SHIFT_4K)));
pages_4k_per_host_page = getpagesize() / (1 << PAGE_SHIFT_4K);
pages += pages_4k_per_host_page - pages % pages_4k_per_host_page;

>         vm = _vm_create(mode, pages, O_RDWR);
>         kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
>  #ifdef __x86_64__
> --
> 2.21.1
>
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c
index ec8860b70129..2e6e3db8418a 100644
--- a/tools/testing/selftests/kvm/demand_paging_test.c
+++ b/tools/testing/selftests/kvm/demand_paging_test.c
@@ -115,9 +115,8 @@  static void guest_code(uint32_t vcpu_id)
 	uint64_t pages;
 	int i;
 
-	/* Return to signal error if vCPU args data structure is courrupt. */
-	if (vcpu_args[vcpu_id].vcpu_id != vcpu_id)
-		return;
+	/* Make sure vCPU args data structure is not corrupt. */
+	GUEST_ASSERT(vcpu_args[vcpu_id].vcpu_id == vcpu_id);
 
 	gva = vcpu_args[vcpu_id].gva;
 	pages = vcpu_args[vcpu_id].pages;
@@ -186,6 +185,12 @@  static struct kvm_vm *create_vm(enum vm_guest_mode mode, int vcpus,
 	pages += ((2 * vcpus * vcpu_memory_bytes) >> PAGE_SHIFT_4K) /
 		 PTES_PER_4K_PT;
 
+	/*
+	 * If the host is uing 64K pages, then we need the number of 4K
+	 * guest pages to be a multiple of 16.
+	 */
+	pages += 16 - pages % 16;
+
 	vm = _vm_create(mode, pages, O_RDWR);
 	kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
 #ifdef __x86_64__