Message ID | 20231005143839.365297-8-thuth@redhat.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Use TAP in some more x86 KVM selftests | expand |
On Thu, Oct 05, 2023, Thomas Huth wrote: > Use the kselftest_harness.h interface in this test to get TAP > output, so that it is easier for the user to see what the test > is doing. > > Note: We're not using the KVM_ONE_VCPU_TEST() macro here (but the > generic TEST() macro from kselftest_harness.h) since each of the > tests needs a different guest code function. I would much rather we add a KVM framework that can deal with this, i.e. build something that is flexible from the get-go. Allowing tests to set the entry point after vCPU is fairly straightforward (patch below, compile tested only on x86). With that, my vote would be to have KVM_ONE_VCPU_TEST_SUITE() *always* pass NULL for the entry point, and instead always require sub-tests to pass the guest code to KVM_ONE_VCPU_TEST(). I think having the sub-test explicitly specify its guest code will be helpful for developers reading the code. And maybe waaay down the road if we can get all tests converted to the framework, we can drop @guest_code from vm_create_with_one_vcpu() entirely. Apologies for the horrifically slow review, I got waylaid by non-upstream stuff for almost all of November=>January. :-( --- From: Sean Christopherson <seanjc@google.com> Date: Fri, 26 Jan 2024 11:15:13 -0800 Subject: [PATCH] KVM: selftests: Move setting a vCPU's entry point to a dedicated API Extract the code to set a vCPU's entry point out of vm_arch_vcpu_add() and into a new API, vcpu_arch_set_entry_point(). Providing a separate API will allow creating a KVM selftests hardness that can handle tests that use different entry points for sub-tests, whereas *requiring* the entry point to be specified at vCPU creation makes it difficult to create a generic harness, e.g. the boilerplate setup/teardown can't easily create and destroy the VM and vCPUs. Signed-off-by: Sean Christopherson <seanjc@google.com> --- .../selftests/kvm/include/kvm_util_base.h | 11 +++++---- .../selftests/kvm/lib/aarch64/processor.c | 23 ++++++++++++++----- .../selftests/kvm/lib/riscv/processor.c | 9 +++++--- .../selftests/kvm/lib/s390x/processor.c | 13 ++++++----- .../selftests/kvm/lib/x86_64/processor.c | 13 ++++++++--- 5 files changed, 47 insertions(+), 22 deletions(-) diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h index 9e5afc472c14..a6e7738a8db7 100644 --- a/tools/testing/selftests/kvm/include/kvm_util_base.h +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h @@ -969,15 +969,18 @@ static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu, * Input Args: * vm - Virtual Machine * vcpu_id - The id of the VCPU to add to the VM. - * guest_code - The vCPU's entry point */ -struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, - void *guest_code); +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id); +void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code); static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, void *guest_code) { - return vm_arch_vcpu_add(vm, vcpu_id, guest_code); + struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id); + + vcpu_arch_set_entry_point(vcpu, guest_code); + + return vcpu; } /* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */ diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c index 43b9a7283360..ed4ab29f4fad 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/processor.c +++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c @@ -365,8 +365,13 @@ void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) indent, "", pstate, pc); } -struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, - struct kvm_vcpu_init *init, void *guest_code) +void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code) +{ + vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code); +} + +static struct kvm_vcpu *__aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, + struct kvm_vcpu_init *init) { size_t stack_size; uint64_t stack_vaddr; @@ -381,15 +386,21 @@ struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, aarch64_vcpu_setup(vcpu, init); vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size); - vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code); +} + +struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, + struct kvm_vcpu_init *init, void *guest_code) +{ + struct kvm_vcpu *vcpu = __aarch64_vcpu_add(vm, vcpu_id, init); + + vcpu_arch_set_entry_point(vcpu, guest_code); return vcpu; } -struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, - void *guest_code) +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) { - return aarch64_vcpu_add(vm, vcpu_id, NULL, guest_code); + return __aarch64_vcpu_add(vm, vcpu_id, NULL); } void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) diff --git a/tools/testing/selftests/kvm/lib/riscv/processor.c b/tools/testing/selftests/kvm/lib/riscv/processor.c index 2bb33a8ac03c..6d5ef6ed0234 100644 --- a/tools/testing/selftests/kvm/lib/riscv/processor.c +++ b/tools/testing/selftests/kvm/lib/riscv/processor.c @@ -277,8 +277,12 @@ static void __aligned(16) guest_unexp_trap(void) 0, 0, 0, 0, 0, 0); } -struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, - void *guest_code) +void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code) +{ + vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code); +} + +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) { int r; size_t stack_size; @@ -312,7 +316,6 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, /* Setup stack pointer and program counter of guest */ vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_vaddr + stack_size); - vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code); /* Setup default exception vector of guest */ vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(stvec), (unsigned long)guest_unexp_trap); diff --git a/tools/testing/selftests/kvm/lib/s390x/processor.c b/tools/testing/selftests/kvm/lib/s390x/processor.c index f6d227892cbc..4ad4492eea1d 100644 --- a/tools/testing/selftests/kvm/lib/s390x/processor.c +++ b/tools/testing/selftests/kvm/lib/s390x/processor.c @@ -155,15 +155,18 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) virt_dump_region(stream, vm, indent, vm->pgd); } -struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, - void *guest_code) +void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code) +{ + vcpu->run->psw_addr = (uintptr_t)guest_code; +} + +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) { size_t stack_size = DEFAULT_STACK_PGS * getpagesize(); uint64_t stack_vaddr; struct kvm_regs regs; struct kvm_sregs sregs; struct kvm_vcpu *vcpu; - struct kvm_run *run; TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x", vm->page_size); @@ -184,9 +187,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, sregs.crs[1] = vm->pgd | 0xf; /* Primary region table */ vcpu_sregs_set(vcpu, &sregs); - run = vcpu->run; - run->psw_mask = 0x0400000180000000ULL; /* DAT enabled + 64 bit mode */ - run->psw_addr = (uintptr_t)guest_code; + vcpu->run->psw_mask = 0x0400000180000000ULL; /* DAT enabled + 64 bit mode */ return vcpu; } diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index 4bc52948447d..18dfabc1c6e7 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -562,8 +562,16 @@ void kvm_arch_vm_post_create(struct kvm_vm *vm) sync_global_to_guest(vm, host_cpu_is_amd); } -struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, - void *guest_code) +void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code) +{ + struct kvm_regs regs; + + vcpu_regs_get(vcpu, ®s); + regs.rip = (unsigned long) guest_code; + vcpu_regs_set(vcpu, ®s); +} + +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) { struct kvm_mp_state mp_state; struct kvm_regs regs; @@ -597,7 +605,6 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, vcpu_regs_get(vcpu, ®s); regs.rflags = regs.rflags | 0x2; regs.rsp = stack_vaddr; - regs.rip = (unsigned long) guest_code; vcpu_regs_set(vcpu, ®s); /* Setup the MP state */ base-commit: e19ec6e3e05fa223f05c72806028dfa531dbd0ae --
On 26/01/2024 20.32, Sean Christopherson wrote: > On Thu, Oct 05, 2023, Thomas Huth wrote: >> Use the kselftest_harness.h interface in this test to get TAP >> output, so that it is easier for the user to see what the test >> is doing. >> >> Note: We're not using the KVM_ONE_VCPU_TEST() macro here (but the >> generic TEST() macro from kselftest_harness.h) since each of the >> tests needs a different guest code function. > > I would much rather we add a KVM framework that can deal with this, i.e. build > something that is flexible from the get-go. Allowing tests to set the entry point > after vCPU is fairly straightforward (patch below, compile tested only on x86). > > With that, my vote would be to have KVM_ONE_VCPU_TEST_SUITE() *always* pass NULL > for the entry point, and instead always require sub-tests to pass the guest code > to KVM_ONE_VCPU_TEST(). I think having the sub-test explicitly specify its guest > code will be helpful for developers reading the code. Yes, I agree that sounds quite a bit nicer. I'll give it a try... Thomas
diff --git a/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c b/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c index 3533dc2fbfeeb..9843528bba0c6 100644 --- a/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c +++ b/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c @@ -8,6 +8,7 @@ #define _GNU_SOURCE /* for program_invocation_short_name */ #include <sys/ioctl.h> +#include "kselftest_harness.h" #include "test_util.h" #include "kvm_util.h" #include "vmx.h" @@ -527,7 +528,7 @@ static void run_guest_then_process_ucall_done(struct kvm_vcpu *vcpu) process_ucall_done(vcpu); } -static void test_msr_filter_allow(void) +TEST(msr_filter_allow) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; @@ -646,7 +647,7 @@ static void handle_wrmsr(struct kvm_run *run) } } -static void test_msr_filter_deny(void) +TEST(msr_filter_deny) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; @@ -693,7 +694,7 @@ static void test_msr_filter_deny(void) kvm_vm_free(vm); } -static void test_msr_permission_bitmap(void) +TEST(msr_permission_bitmap) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; @@ -786,7 +787,7 @@ static void run_msr_filter_flag_test(struct kvm_vm *vm) } /* Test that attempts to write to the unused bits in a flag fails. */ -static void test_user_exit_msr_flags(void) +TEST(user_exit_msr_flags) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; @@ -804,13 +805,5 @@ static void test_user_exit_msr_flags(void) int main(int argc, char *argv[]) { - test_msr_filter_allow(); - - test_msr_filter_deny(); - - test_msr_permission_bitmap(); - - test_user_exit_msr_flags(); - - return 0; + return test_harness_run(argc, argv); }
Use the kselftest_harness.h interface in this test to get TAP output, so that it is easier for the user to see what the test is doing. Note: We're not using the KVM_ONE_VCPU_TEST() macro here (but the generic TEST() macro from kselftest_harness.h) since each of the tests needs a different guest code function. Signed-off-by: Thomas Huth <thuth@redhat.com> --- .../kvm/x86_64/userspace_msr_exit_test.c | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-)