diff mbox series

[05/11] KVM: selftests: Configure XCR0 to max supported value by default

Message ID 20241003234337.273364-6-seanjc@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: selftests: AVX support + fixes | expand

Commit Message

Sean Christopherson Oct. 3, 2024, 11:43 p.m. UTC
To play nice with compilers generating AVX instructions, set CR4.OSXSAVE
and configure XCR0 by default when creating selftests vCPUs.  Some distros
have switched gcc to '-march=x86-64-v3' by default, and while it's hard to
find a CPU which doesn't support AVX today, many KVM selftests fail with

  ==== Test Assertion Failure ====
    lib/x86_64/processor.c:570: Unhandled exception in guest
    pid=72747 tid=72747 errno=4 - Interrupted system call
    Unhandled exception '0x6' at guest RIP '0x4104f7'

due to selftests not enabling AVX by default for the guest.  The failure
is easy to reproduce elsewhere with:

   $ make clean && CFLAGS='-march=x86-64-v3' make -j && ./x86_64/kvm_pv_test

E.g. gcc-13 with -march=x86-64-v3 compiles this chunk from selftests'
kvm_fixup_exception():

        regs->rip = regs->r11;
        regs->r9 = regs->vector;
        regs->r10 = regs->error_code;

into this monstronsity (which is clever, but oof):

  405313:       c4 e1 f9 6e c8          vmovq  %rax,%xmm1
  405318:       48 89 68 08             mov    %rbp,0x8(%rax)
  40531c:       48 89 e8                mov    %rbp,%rax
  40531f:       c4 c3 f1 22 c4 01       vpinsrq $0x1,%r12,%xmm1,%xmm0
  405325:       49 89 6d 38             mov    %rbp,0x38(%r13)
  405329:       c5 fa 7f 45 00          vmovdqu %xmm0,0x0(%rbp)

Alternatively, KVM selftests could explicitly restrict the compiler to
-march=x86-64-v2, but odds are very good that punting on AVX enabling will
simply result in tests that "need" AVX doing their own thing, e.g. there
are already three or so additional cleanups that can be done on top.

Reported-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Closes: https://lore.kernel.org/all/20240920154422.2890096-1-vkuznets@redhat.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 .../selftests/kvm/include/x86_64/processor.h  |  5 ++++
 .../selftests/kvm/lib/x86_64/processor.c      | 24 +++++++++++++++++++
 .../selftests/kvm/x86_64/xcr0_cpuid_test.c    |  6 ++---
 3 files changed, 32 insertions(+), 3 deletions(-)

Comments

Vitaly Kuznetsov Oct. 4, 2024, 9:01 a.m. UTC | #1
Sean Christopherson <seanjc@google.com> writes:

> To play nice with compilers generating AVX instructions, set CR4.OSXSAVE
> and configure XCR0 by default when creating selftests vCPUs.  Some distros
> have switched gcc to '-march=x86-64-v3' by default, and while it's hard to
> find a CPU which doesn't support AVX today, many KVM selftests fail with
>
>   ==== Test Assertion Failure ====
>     lib/x86_64/processor.c:570: Unhandled exception in guest
>     pid=72747 tid=72747 errno=4 - Interrupted system call
>     Unhandled exception '0x6' at guest RIP '0x4104f7'
>
> due to selftests not enabling AVX by default for the guest.  The failure
> is easy to reproduce elsewhere with:
>
>    $ make clean && CFLAGS='-march=x86-64-v3' make -j && ./x86_64/kvm_pv_test
>
> E.g. gcc-13 with -march=x86-64-v3 compiles this chunk from selftests'
> kvm_fixup_exception():
>
>         regs->rip = regs->r11;
>         regs->r9 = regs->vector;
>         regs->r10 = regs->error_code;
>
> into this monstronsity (which is clever, but oof):
>
>   405313:       c4 e1 f9 6e c8          vmovq  %rax,%xmm1
>   405318:       48 89 68 08             mov    %rbp,0x8(%rax)
>   40531c:       48 89 e8                mov    %rbp,%rax
>   40531f:       c4 c3 f1 22 c4 01       vpinsrq $0x1,%r12,%xmm1,%xmm0
>   405325:       49 89 6d 38             mov    %rbp,0x38(%r13)
>   405329:       c5 fa 7f 45 00          vmovdqu %xmm0,0x0(%rbp)
>
> Alternatively, KVM selftests could explicitly restrict the compiler to
> -march=x86-64-v2, but odds are very good that punting on AVX enabling will
> simply result in tests that "need" AVX doing their own thing, e.g. there
> are already three or so additional cleanups that can be done on top.

Ideally, we may still want to precisely pin the set of instructions
which are used to generete guest code in selftests as the environment
where this code runs is defined by us and it may not match the host. I
can easily imaging future CPU features leading to similar issues in case
they require explicit enablement. To achive this, we can probably
separate guest code from each test into its own compilation unit.

>
> Reported-by: Vitaly Kuznetsov <vkuznets@redhat.com>
> Closes: https://lore.kernel.org/all/20240920154422.2890096-1-vkuznets@redhat.com
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> ---
>  .../selftests/kvm/include/x86_64/processor.h  |  5 ++++
>  .../selftests/kvm/lib/x86_64/processor.c      | 24 +++++++++++++++++++
>  .../selftests/kvm/x86_64/xcr0_cpuid_test.c    |  6 ++---
>  3 files changed, 32 insertions(+), 3 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
> index e247f99e0473..645200e95f89 100644
> --- a/tools/testing/selftests/kvm/include/x86_64/processor.h
> +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
> @@ -1049,6 +1049,11 @@ static inline void vcpu_set_cpuid(struct kvm_vcpu *vcpu)
>  	vcpu_ioctl(vcpu, KVM_GET_CPUID2, vcpu->cpuid);
>  }
>  
> +static inline void vcpu_get_cpuid(struct kvm_vcpu *vcpu)
> +{
> +	vcpu_ioctl(vcpu, KVM_GET_CPUID2, vcpu->cpuid);
> +}
> +
>  void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu,
>  			     struct kvm_x86_cpu_property property,
>  			     uint32_t value);
> diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
> index 974bcd2df6d7..636b29ba8985 100644
> --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
> +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
> @@ -506,6 +506,8 @@ static void vcpu_init_sregs(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
>  
>  	sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG;
>  	sregs.cr4 |= X86_CR4_PAE | X86_CR4_OSFXSR;
> +	if (kvm_cpu_has(X86_FEATURE_XSAVE))
> +		sregs.cr4 |= X86_CR4_OSXSAVE;
>  	sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX);
>  
>  	kvm_seg_set_unusable(&sregs.ldt);
> @@ -519,6 +521,20 @@ static void vcpu_init_sregs(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
>  	vcpu_sregs_set(vcpu, &sregs);
>  }
>  
> +static void vcpu_init_xcrs(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
> +{
> +	struct kvm_xcrs xcrs = {
> +		.nr_xcrs = 1,
> +		.xcrs[0].xcr = 0,
> +		.xcrs[0].value = kvm_cpu_supported_xcr0(),
> +	};
> +
> +	if (!kvm_cpu_has(X86_FEATURE_XSAVE))
> +		return;
> +
> +	vcpu_xcrs_set(vcpu, &xcrs);
> +}
> +
>  static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr,
>  			  int dpl, unsigned short selector)
>  {
> @@ -675,6 +691,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
>  	vcpu = __vm_vcpu_add(vm, vcpu_id);
>  	vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid());
>  	vcpu_init_sregs(vm, vcpu);
> +	vcpu_init_xcrs(vm, vcpu);
>  
>  	/* Setup guest general purpose registers */
>  	vcpu_regs_get(vcpu, &regs);
> @@ -686,6 +703,13 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
>  	mp_state.mp_state = 0;
>  	vcpu_mp_state_set(vcpu, &mp_state);
>  
> +	/*
> +	 * Refresh CPUID after setting SREGS and XCR0, so that KVM's "runtime"
> +	 * updates to guest CPUID, e.g. for OSXSAVE and XSAVE state size, are
> +	 * reflected into selftests' vCPU CPUID cache, i.e. so that the cache
> +	 * is consistent with vCPU state.
> +	 */
> +	vcpu_get_cpuid(vcpu);
>  	return vcpu;
>  }
>  
> diff --git a/tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c b/tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c
> index 95ce192d0753..a4aecdc77da5 100644
> --- a/tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c
> +++ b/tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c
> @@ -48,16 +48,16 @@ do {									\
>  
>  static void guest_code(void)
>  {
> -	uint64_t xcr0_reset;
> +	uint64_t initial_xcr0;
>  	uint64_t supported_xcr0;
>  	int i, vector;
>  
>  	set_cr4(get_cr4() | X86_CR4_OSXSAVE);
>  
> -	xcr0_reset = xgetbv(0);
> +	initial_xcr0 = xgetbv(0);
>  	supported_xcr0 = this_cpu_supported_xcr0();
>  
> -	GUEST_ASSERT(xcr0_reset == XFEATURE_MASK_FP);
> +	GUEST_ASSERT(initial_xcr0 == supported_xcr0);
>  
>  	/* Check AVX */
>  	ASSERT_XFEATURE_DEPENDENCIES(supported_xcr0,

Reviewed-and-tested-by: Vitaly Kuznetsov <vkuznets@redhat.com>

Thanks!
Sean Christopherson Oct. 4, 2024, 1:35 p.m. UTC | #2
On Fri, Oct 04, 2024, Vitaly Kuznetsov wrote:
> Sean Christopherson <seanjc@google.com> writes:
> 
> > To play nice with compilers generating AVX instructions, set CR4.OSXSAVE
> > and configure XCR0 by default when creating selftests vCPUs.  Some distros
> > have switched gcc to '-march=x86-64-v3' by default, and while it's hard to
> > find a CPU which doesn't support AVX today, many KVM selftests fail with
> >
> >   ==== Test Assertion Failure ====
> >     lib/x86_64/processor.c:570: Unhandled exception in guest
> >     pid=72747 tid=72747 errno=4 - Interrupted system call
> >     Unhandled exception '0x6' at guest RIP '0x4104f7'
> >
> > due to selftests not enabling AVX by default for the guest.  The failure
> > is easy to reproduce elsewhere with:
> >
> >    $ make clean && CFLAGS='-march=x86-64-v3' make -j && ./x86_64/kvm_pv_test
> >
> > E.g. gcc-13 with -march=x86-64-v3 compiles this chunk from selftests'
> > kvm_fixup_exception():
> >
> >         regs->rip = regs->r11;
> >         regs->r9 = regs->vector;
> >         regs->r10 = regs->error_code;
> >
> > into this monstronsity (which is clever, but oof):
> >
> >   405313:       c4 e1 f9 6e c8          vmovq  %rax,%xmm1
> >   405318:       48 89 68 08             mov    %rbp,0x8(%rax)
> >   40531c:       48 89 e8                mov    %rbp,%rax
> >   40531f:       c4 c3 f1 22 c4 01       vpinsrq $0x1,%r12,%xmm1,%xmm0
> >   405325:       49 89 6d 38             mov    %rbp,0x38(%r13)
> >   405329:       c5 fa 7f 45 00          vmovdqu %xmm0,0x0(%rbp)
> >
> > Alternatively, KVM selftests could explicitly restrict the compiler to
> > -march=x86-64-v2, but odds are very good that punting on AVX enabling will
> > simply result in tests that "need" AVX doing their own thing, e.g. there
> > are already three or so additional cleanups that can be done on top.
> 
> Ideally, we may still want to precisely pin the set of instructions
> which are used to generete guest code in selftests as the environment
> where this code runs is defined by us and it may not match the host. I
> can easily imaging future CPU features leading to similar issues in case
> they require explicit enablement.

Maybe.  I suspect the cross-section of features that require explicit enablement
*and* will be generated by the compiler for "regular" code will be limited to AVX
and the like.  E.g. the only new in -v4 is AVX512.

> To achive this, we can probably separate guest code from each test into its
> own compilation unit.

Hopefully we don't need to worry about that for years and years :-)
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index e247f99e0473..645200e95f89 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -1049,6 +1049,11 @@  static inline void vcpu_set_cpuid(struct kvm_vcpu *vcpu)
 	vcpu_ioctl(vcpu, KVM_GET_CPUID2, vcpu->cpuid);
 }
 
+static inline void vcpu_get_cpuid(struct kvm_vcpu *vcpu)
+{
+	vcpu_ioctl(vcpu, KVM_GET_CPUID2, vcpu->cpuid);
+}
+
 void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu,
 			     struct kvm_x86_cpu_property property,
 			     uint32_t value);
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index 974bcd2df6d7..636b29ba8985 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -506,6 +506,8 @@  static void vcpu_init_sregs(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
 
 	sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG;
 	sregs.cr4 |= X86_CR4_PAE | X86_CR4_OSFXSR;
+	if (kvm_cpu_has(X86_FEATURE_XSAVE))
+		sregs.cr4 |= X86_CR4_OSXSAVE;
 	sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX);
 
 	kvm_seg_set_unusable(&sregs.ldt);
@@ -519,6 +521,20 @@  static void vcpu_init_sregs(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
 	vcpu_sregs_set(vcpu, &sregs);
 }
 
+static void vcpu_init_xcrs(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
+{
+	struct kvm_xcrs xcrs = {
+		.nr_xcrs = 1,
+		.xcrs[0].xcr = 0,
+		.xcrs[0].value = kvm_cpu_supported_xcr0(),
+	};
+
+	if (!kvm_cpu_has(X86_FEATURE_XSAVE))
+		return;
+
+	vcpu_xcrs_set(vcpu, &xcrs);
+}
+
 static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr,
 			  int dpl, unsigned short selector)
 {
@@ -675,6 +691,7 @@  struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
 	vcpu = __vm_vcpu_add(vm, vcpu_id);
 	vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid());
 	vcpu_init_sregs(vm, vcpu);
+	vcpu_init_xcrs(vm, vcpu);
 
 	/* Setup guest general purpose registers */
 	vcpu_regs_get(vcpu, &regs);
@@ -686,6 +703,13 @@  struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
 	mp_state.mp_state = 0;
 	vcpu_mp_state_set(vcpu, &mp_state);
 
+	/*
+	 * Refresh CPUID after setting SREGS and XCR0, so that KVM's "runtime"
+	 * updates to guest CPUID, e.g. for OSXSAVE and XSAVE state size, are
+	 * reflected into selftests' vCPU CPUID cache, i.e. so that the cache
+	 * is consistent with vCPU state.
+	 */
+	vcpu_get_cpuid(vcpu);
 	return vcpu;
 }
 
diff --git a/tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c b/tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c
index 95ce192d0753..a4aecdc77da5 100644
--- a/tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c
@@ -48,16 +48,16 @@  do {									\
 
 static void guest_code(void)
 {
-	uint64_t xcr0_reset;
+	uint64_t initial_xcr0;
 	uint64_t supported_xcr0;
 	int i, vector;
 
 	set_cr4(get_cr4() | X86_CR4_OSXSAVE);
 
-	xcr0_reset = xgetbv(0);
+	initial_xcr0 = xgetbv(0);
 	supported_xcr0 = this_cpu_supported_xcr0();
 
-	GUEST_ASSERT(xcr0_reset == XFEATURE_MASK_FP);
+	GUEST_ASSERT(initial_xcr0 == supported_xcr0);
 
 	/* Check AVX */
 	ASSERT_XFEATURE_DEPENDENCIES(supported_xcr0,