diff mbox series

[RFC,v1,4/4] KVM: selftests: Add bus lock exit test

Message ID 20240709175145.9986-5-manali.shukla@amd.com (mailing list archive)
State New
Headers show
Series Add support for the Bus Lock Threshold | expand

Commit Message

Manali Shukla July 9, 2024, 5:51 p.m. UTC
From: Nikunj A Dadhania <nikunj@amd.com>

Malicious guests can cause bus locks to degrade the performance of
a system.  The Bus Lock Threshold feature is beneficial for
hypervisors aiming to restrict the ability of the guests to perform
excessive bus locks and slow down the system for all the tenants.

Add a test case to verify the Bus Lock Threshold feature for SVM.

[Manali:
  - The KVM_CAP_X86_BUS_LOCK_EXIT capability is not enabled while
    vcpus are created, changed the VM and vCPU creation logic to
    resolve the mentioned issue.
  - Added nested guest test case for bus lock exit.
  - massage commit message.
  - misc cleanups. ]

Signed-off-by: Nikunj A Dadhania <nikunj@amd.com>
Co-developed-by: Manali Shukla <manali.shukla@amd.com>
Signed-off-by: Manali Shukla <manali.shukla@amd.com>
---
 tools/testing/selftests/kvm/Makefile          |   1 +
 .../selftests/kvm/x86_64/svm_buslock_test.c   | 114 ++++++++++++++++++
 2 files changed, 115 insertions(+)
 create mode 100644 tools/testing/selftests/kvm/x86_64/svm_buslock_test.c

Comments

Sean Christopherson Aug. 16, 2024, 8:21 p.m. UTC | #1
On Tue, Jul 09, 2024, Manali Shukla wrote:
> From: Nikunj A Dadhania <nikunj@amd.com>
> 
> Malicious guests can cause bus locks to degrade the performance of
> a system.  The Bus Lock Threshold feature is beneficial for
> hypervisors aiming to restrict the ability of the guests to perform
> excessive bus locks and slow down the system for all the tenants.
> 
> Add a test case to verify the Bus Lock Threshold feature for SVM.
> 
> [Manali:
>   - The KVM_CAP_X86_BUS_LOCK_EXIT capability is not enabled while
>     vcpus are created, changed the VM and vCPU creation logic to
>     resolve the mentioned issue.
>   - Added nested guest test case for bus lock exit.
>   - massage commit message.
>   - misc cleanups. ]

Again, 99% of the changelog is boilerplate that does nothing to help me
understand what the test actually does.

> 
> Signed-off-by: Nikunj A Dadhania <nikunj@amd.com>
> Co-developed-by: Manali Shukla <manali.shukla@amd.com>
> Signed-off-by: Manali Shukla <manali.shukla@amd.com>
> ---
>  tools/testing/selftests/kvm/Makefile          |   1 +
>  .../selftests/kvm/x86_64/svm_buslock_test.c   | 114 ++++++++++++++++++
>  2 files changed, 115 insertions(+)
>  create mode 100644 tools/testing/selftests/kvm/x86_64/svm_buslock_test.c
> 
> diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
> index ce8ff8e8ce3a..711ec195e386 100644
> --- a/tools/testing/selftests/kvm/Makefile
> +++ b/tools/testing/selftests/kvm/Makefile
> @@ -94,6 +94,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/smaller_maxphyaddr_emulation_test
>  TEST_GEN_PROGS_x86_64 += x86_64/smm_test
>  TEST_GEN_PROGS_x86_64 += x86_64/state_test
>  TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test
> +TEST_GEN_PROGS_x86_64 += x86_64/svm_buslock_test
>  TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
>  TEST_GEN_PROGS_x86_64 += x86_64/svm_int_ctl_test
>  TEST_GEN_PROGS_x86_64 += x86_64/svm_nested_shutdown_test
> diff --git a/tools/testing/selftests/kvm/x86_64/svm_buslock_test.c b/tools/testing/selftests/kvm/x86_64/svm_buslock_test.c
> new file mode 100644
> index 000000000000..dcb595999046
> --- /dev/null
> +++ b/tools/testing/selftests/kvm/x86_64/svm_buslock_test.c

I would *very* strongly prefer to have a bus lock test that is comment to VMX
and SVM.  For L1, there's no unique behavior.  And for L2, assuming we don't
support nested bus lock enabling, the only vendor specific bits are launching
L2.

I.e. writing this so it works on both VMX and SVM should be quite straightforward.

> @@ -0,0 +1,114 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + * svm_buslock_test
> + *
> + * Copyright (C) 2024 Advanced Micro Devices, Inc.
> + *
> + * SVM testing: Buslock exit

Keep the Copyright, ditch everything else.

> + */
> +
> +#include "test_util.h"
> +#include "kvm_util.h"
> +#include "processor.h"
> +#include "svm_util.h"
> +
> +#define NO_ITERATIONS 100

Heh, NR_ITERATIONS.

> +#define __cacheline_aligned __aligned(128)

Eh, I would just split a page, that's about as future proof as we can get in
terms of cache line sizes.

> +
> +struct buslock_test {
> +	unsigned char pad[126];
> +	atomic_long_t val;
> +} __packed;
> +
> +struct buslock_test test __cacheline_aligned;
> +
> +static __always_inline void buslock_atomic_add(int i, atomic_long_t *v)
> +{
> +	asm volatile(LOCK_PREFIX "addl %1,%0"
> +		     : "+m" (v->counter)
> +		     : "ir" (i) : "memory");
> +}
> +
> +static void buslock_add(void)
> +{
> +	/*
> +	 * Increment a cache unaligned variable atomically.
> +	 * This should generate a bus lock exit.

So... this test doesn't actually verify that a bus lock exit occurs.  The userspace
side will eat an exit if one occurs, but there's literally not a single TEST_ASSERT()
in here.
Manali Shukla Aug. 26, 2024, 10:29 a.m. UTC | #2
Hi Sean,
Thank you for reviewing my changes.

On 8/17/2024 1:51 AM, Sean Christopherson wrote:
> On Tue, Jul 09, 2024, Manali Shukla wrote:
>> From: Nikunj A Dadhania <nikunj@amd.com>
>>
>> Malicious guests can cause bus locks to degrade the performance of
>> a system.  The Bus Lock Threshold feature is beneficial for
>> hypervisors aiming to restrict the ability of the guests to perform
>> excessive bus locks and slow down the system for all the tenants.
>>
>> Add a test case to verify the Bus Lock Threshold feature for SVM.
>>
>> [Manali:
>>   - The KVM_CAP_X86_BUS_LOCK_EXIT capability is not enabled while
>>     vcpus are created, changed the VM and vCPU creation logic to
>>     resolve the mentioned issue.
>>   - Added nested guest test case for bus lock exit.
>>   - massage commit message.
>>   - misc cleanups. ]
> 
> Again, 99% of the changelog is boilerplate that does nothing to help me
> understand what the test actually does.
> 

Sure. I will rewrite the commit messages for all the patches.

>>
>> Signed-off-by: Nikunj A Dadhania <nikunj@amd.com>
>> Co-developed-by: Manali Shukla <manali.shukla@amd.com>
>> Signed-off-by: Manali Shukla <manali.shukla@amd.com>
>> ---
>>  tools/testing/selftests/kvm/Makefile          |   1 +
>>  .../selftests/kvm/x86_64/svm_buslock_test.c   | 114 ++++++++++++++++++
>>  2 files changed, 115 insertions(+)
>>  create mode 100644 tools/testing/selftests/kvm/x86_64/svm_buslock_test.c
>>
>> diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
>> index ce8ff8e8ce3a..711ec195e386 100644
>> --- a/tools/testing/selftests/kvm/Makefile
>> +++ b/tools/testing/selftests/kvm/Makefile
>> @@ -94,6 +94,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/smaller_maxphyaddr_emulation_test
>>  TEST_GEN_PROGS_x86_64 += x86_64/smm_test
>>  TEST_GEN_PROGS_x86_64 += x86_64/state_test
>>  TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test
>> +TEST_GEN_PROGS_x86_64 += x86_64/svm_buslock_test
>>  TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
>>  TEST_GEN_PROGS_x86_64 += x86_64/svm_int_ctl_test
>>  TEST_GEN_PROGS_x86_64 += x86_64/svm_nested_shutdown_test
>> diff --git a/tools/testing/selftests/kvm/x86_64/svm_buslock_test.c b/tools/testing/selftests/kvm/x86_64/svm_buslock_test.c
>> new file mode 100644
>> index 000000000000..dcb595999046
>> --- /dev/null
>> +++ b/tools/testing/selftests/kvm/x86_64/svm_buslock_test.c
> 
> I would *very* strongly prefer to have a bus lock test that is comment to VMX
> and SVM.  For L1, there's no unique behavior.  And for L2, assuming we don't
> support nested bus lock enabling, the only vendor specific bits are launching
> L2.
> 
> I.e. writing this so it works on both VMX and SVM should be quite straightforward.
> 

Sure I will try to write a common test for SVM and VMX.

>> @@ -0,0 +1,114 @@
>> +// SPDX-License-Identifier: GPL-2.0-only
>> +/*
>> + * svm_buslock_test
>> + *
>> + * Copyright (C) 2024 Advanced Micro Devices, Inc.
>> + *
>> + * SVM testing: Buslock exit
> 
> Keep the Copyright, ditch everything else.

Sure.

> 
>> + */
>> +
>> +#include "test_util.h"
>> +#include "kvm_util.h"
>> +#include "processor.h"
>> +#include "svm_util.h"
>> +
>> +#define NO_ITERATIONS 100
> 
> Heh, NR_ITERATIONS.

Ack.

> 
>> +#define __cacheline_aligned __aligned(128)
> 
> Eh, I would just split a page, that's about as future proof as we can get in
> terms of cache line sizes.
> 

Sure.

>> +
>> +struct buslock_test {
>> +	unsigned char pad[126];
>> +	atomic_long_t val;
>> +} __packed;
>> +
>> +struct buslock_test test __cacheline_aligned;
>> +
>> +static __always_inline void buslock_atomic_add(int i, atomic_long_t *v)
>> +{
>> +	asm volatile(LOCK_PREFIX "addl %1,%0"
>> +		     : "+m" (v->counter)
>> +		     : "ir" (i) : "memory");
>> +}
>> +
>> +static void buslock_add(void)
>> +{
>> +	/*
>> +	 * Increment a cache unaligned variable atomically.
>> +	 * This should generate a bus lock exit.
> 
> So... this test doesn't actually verify that a bus lock exit occurs.  The userspace
> side will eat an exit if one occurs, but there's literally not a single TEST_ASSERT()
> in here.

Agreed, How about doing following?

+       for (;;) {
+               struct ucall uc;
+
+               vcpu_run(vcpu);
+
+               if (run->exit_reason == KVM_EXIT_IO) {
+                       switch (get_ucall(vcpu, &uc)) {
+                       case UCALL_ABORT:
+                               REPORT_GUEST_ASSERT(uc);
+                               /* NOT REACHED */
+                       case UCALL_SYNC:
+                               break;
+                       case UCALL_DONE:
+                               goto done;
+                       default:
+                               TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
+                       }
+               }
+
+               TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_X86_BUS_LOCK);
+               TEST_ASSERT_EQ(run->flags, KVM_RUN_X86_BUS_LOCK);
+               run->flags &= ~KVM_RUN_X86_BUS_LOCK;
+               run->exit_reason = 0;
+       }

- Manali
Sean Christopherson Aug. 26, 2024, 4:06 p.m. UTC | #3
On Mon, Aug 26, 2024, Manali Shukla wrote:
> >> +struct buslock_test {
> >> +	unsigned char pad[126];
> >> +	atomic_long_t val;
> >> +} __packed;
> >> +
> >> +struct buslock_test test __cacheline_aligned;
> >> +
> >> +static __always_inline void buslock_atomic_add(int i, atomic_long_t *v)
> >> +{
> >> +	asm volatile(LOCK_PREFIX "addl %1,%0"
> >> +		     : "+m" (v->counter)
> >> +		     : "ir" (i) : "memory");
> >> +}
> >> +
> >> +static void buslock_add(void)
> >> +{
> >> +	/*
> >> +	 * Increment a cache unaligned variable atomically.
> >> +	 * This should generate a bus lock exit.
> > 
> > So... this test doesn't actually verify that a bus lock exit occurs.  The userspace
> > side will eat an exit if one occurs, but there's literally not a single TEST_ASSERT()
> > in here.
> 
> Agreed, How about doing following?
> 
> +       for (;;) {
> +               struct ucall uc;
> +
> +               vcpu_run(vcpu);
> +
> +               if (run->exit_reason == KVM_EXIT_IO) {
> +                       switch (get_ucall(vcpu, &uc)) {
> +                       case UCALL_ABORT:
> +                               REPORT_GUEST_ASSERT(uc);
> +                               /* NOT REACHED */
> +                       case UCALL_SYNC:
> +                               break;
> +                       case UCALL_DONE:
> +                               goto done;
> +                       default:
> +                               TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
> +                       }
> +               }
> +
> +               TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_X86_BUS_LOCK);

I doubt this works, the UCALL_SYNC above will fallthrough to this assert.  I
assume run->exit_reason needs a continue for UCALL_SYNC.

> +               TEST_ASSERT_EQ(run->flags, KVM_RUN_X86_BUS_LOCK);
> +               run->flags &= ~KVM_RUN_X86_BUS_LOCK;

No need, KVM should clear the flag if the exit isn't due to a bus lock.

> +               run->exit_reason = 0;

Again, no need, KVM should take care of resetting exit_reason.

> +       }
> 
> - Manali
> 
>
Manali Shukla Aug. 29, 2024, 9:41 a.m. UTC | #4
Hi Sean,

Thank you for reviewing my patches.

On 8/26/2024 9:36 PM, Sean Christopherson wrote:
> On Mon, Aug 26, 2024, Manali Shukla wrote:
>>>> +struct buslock_test {
>>>> +	unsigned char pad[126];
>>>> +	atomic_long_t val;
>>>> +} __packed;
>>>> +
>>>> +struct buslock_test test __cacheline_aligned;
>>>> +
>>>> +static __always_inline void buslock_atomic_add(int i, atomic_long_t *v)
>>>> +{
>>>> +	asm volatile(LOCK_PREFIX "addl %1,%0"
>>>> +		     : "+m" (v->counter)
>>>> +		     : "ir" (i) : "memory");
>>>> +}
>>>> +
>>>> +static void buslock_add(void)
>>>> +{
>>>> +	/*
>>>> +	 * Increment a cache unaligned variable atomically.
>>>> +	 * This should generate a bus lock exit.
>>>
>>> So... this test doesn't actually verify that a bus lock exit occurs.  The userspace
>>> side will eat an exit if one occurs, but there's literally not a single TEST_ASSERT()
>>> in here.
>>
>> Agreed, How about doing following?
>>
>> +       for (;;) {
>> +               struct ucall uc;
>> +
>> +               vcpu_run(vcpu);
>> +
>> +               if (run->exit_reason == KVM_EXIT_IO) {
>> +                       switch (get_ucall(vcpu, &uc)) {
>> +                       case UCALL_ABORT:
>> +                               REPORT_GUEST_ASSERT(uc);
>> +                               /* NOT REACHED */
>> +                       case UCALL_SYNC:
>> +                               break;
>> +                       case UCALL_DONE:
>> +                               goto done;
>> +                       default:
>> +                               TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
>> +                       }
>> +               }
>> +
>> +               TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_X86_BUS_LOCK);
> 
> I doubt this works, the UCALL_SYNC above will fallthrough to this assert.  I
> assume run->exit_reason needs a continue for UCALL_SYNC.
>

I agree, there should be a continue for UCALL_SYNC in place of break. I will
correct it in V2. 

I didn't observe this issue because UCALL_SYNC is invoked, when GUEST_SYNC() is
called from the guest code. Since GUEST_SYNC() is not present in the guest
code used in bus lock test case, UCALL_SYNC was never triggered.
 
>> +               TEST_ASSERT_EQ(run->flags, KVM_RUN_X86_BUS_LOCK);
>> +               run->flags &= ~KVM_RUN_X86_BUS_LOCK;
>
> No need, KVM should clear the flag if the exit isn't due to a bus lock.

Sure I will remove this.

> 
>> +               run->exit_reason = 0;
> 
> Again, no need, KVM should take care of resetting exit_reason.

Ack.

> 
>> +       }
>>

- Manali
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index ce8ff8e8ce3a..711ec195e386 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -94,6 +94,7 @@  TEST_GEN_PROGS_x86_64 += x86_64/smaller_maxphyaddr_emulation_test
 TEST_GEN_PROGS_x86_64 += x86_64/smm_test
 TEST_GEN_PROGS_x86_64 += x86_64/state_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test
+TEST_GEN_PROGS_x86_64 += x86_64/svm_buslock_test
 TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
 TEST_GEN_PROGS_x86_64 += x86_64/svm_int_ctl_test
 TEST_GEN_PROGS_x86_64 += x86_64/svm_nested_shutdown_test
diff --git a/tools/testing/selftests/kvm/x86_64/svm_buslock_test.c b/tools/testing/selftests/kvm/x86_64/svm_buslock_test.c
new file mode 100644
index 000000000000..dcb595999046
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/svm_buslock_test.c
@@ -0,0 +1,114 @@ 
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * svm_buslock_test
+ *
+ * Copyright (C) 2024 Advanced Micro Devices, Inc.
+ *
+ * SVM testing: Buslock exit
+ */
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "svm_util.h"
+
+#define NO_ITERATIONS 100
+#define __cacheline_aligned __aligned(128)
+
+struct buslock_test {
+	unsigned char pad[126];
+	atomic_long_t val;
+} __packed;
+
+struct buslock_test test __cacheline_aligned;
+
+static __always_inline void buslock_atomic_add(int i, atomic_long_t *v)
+{
+	asm volatile(LOCK_PREFIX "addl %1,%0"
+		     : "+m" (v->counter)
+		     : "ir" (i) : "memory");
+}
+
+static void buslock_add(void)
+{
+	/*
+	 * Increment a cache unaligned variable atomically.
+	 * This should generate a bus lock exit.
+	 */
+	for (int i = 0; i < NO_ITERATIONS; i++)
+		buslock_atomic_add(2, &test.val);
+}
+
+static void l2_guest_code(void)
+{
+	buslock_add();
+	GUEST_DONE();
+}
+
+static void l1_guest_code(struct svm_test_data *svm)
+{
+	#define L2_GUEST_STACK_SIZE 64
+	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+	struct vmcb *vmcb = svm->vmcb;
+
+	generic_svm_setup(svm, l2_guest_code,
+			  &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+	run_guest(vmcb, svm->vmcb_gpa);
+	GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
+	GUEST_DONE();
+}
+
+static void guest_code(struct svm_test_data *svm)
+{
+	buslock_add();
+
+	if (this_cpu_has(X86_FEATURE_SVM))
+		l1_guest_code(svm);
+}
+
+int main(int argc, char *argv[])
+{
+	struct kvm_vcpu *vcpu;
+	struct kvm_run *run;
+	struct kvm_vm *vm;
+	vm_vaddr_t svm_gva;
+
+	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
+	TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_BUS_LOCK_EXIT));
+
+	vm = vm_create(1);
+	vm_enable_cap(vm, KVM_CAP_X86_BUS_LOCK_EXIT, KVM_BUS_LOCK_DETECTION_EXIT);
+	vcpu = vm_vcpu_add(vm, 0, guest_code);
+
+	vcpu_alloc_svm(vm, &svm_gva);
+	vcpu_args_set(vcpu, 1, svm_gva);
+
+	run = vcpu->run;
+
+	for (;;) {
+		struct ucall uc;
+
+		vcpu_run(vcpu);
+
+		if (run->exit_reason == KVM_EXIT_X86_BUS_LOCK) {
+			run->flags &= ~KVM_RUN_X86_BUS_LOCK;
+			run->exit_reason = 0;
+			continue;
+		}
+
+		switch (get_ucall(vcpu, &uc)) {
+		case UCALL_ABORT:
+			REPORT_GUEST_ASSERT(uc);
+			/* NOT REACHED */
+		case UCALL_SYNC:
+			break;
+		case UCALL_DONE:
+			goto done;
+		default:
+			TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
+		}
+	}
+done:
+	kvm_vm_free(vm);
+	return 0;
+}