diff mbox series

[11/20] KVM: selftests: Post to sem_vcpu_stop if and only if vcpu_stop is true

Message ID 20241214010721.2356923-12-seanjc@google.com (mailing list archive)
State New
Headers show
Series [01/20] KVM: selftests: Support multiple write retires in dirty_log_test | expand

Commit Message

Sean Christopherson Dec. 14, 2024, 1:07 a.m. UTC
When running dirty_log_test using the dirty ring, post to sem_vcpu_stop
only when the main thread has explicitly requested that the vCPU stop.
Synchronizing the vCPU and main thread whenever the dirty ring happens to
be full is unnecessary, as KVM's ABI is to actively prevent the vCPU from
running until the ring is no longer full.  I.e. attempting to run the vCPU
will simply result in KVM_EXIT_DIRTY_RING_FULL without ever entering the
guest.  And if KVM doesn't exit, e.g. let's the vCPU dirty more pages,
then that's a KVM bug worth finding.

Posting to sem_vcpu_stop on ring full also makes it difficult to get the
test logic right, e.g. it's easy to let the vCPU keep running when it
shouldn't, as a ring full can essentially happen at any given time.

Opportunistically rework the handling of dirty_ring_vcpu_ring_full to
leave it set for the remainder of the iteration in order to simplify the
surrounding logic.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 tools/testing/selftests/kvm/dirty_log_test.c | 18 ++++--------------
 1 file changed, 4 insertions(+), 14 deletions(-)

Comments

Maxim Levitsky Dec. 18, 2024, 12:01 a.m. UTC | #1
On Fri, 2024-12-13 at 17:07 -0800, Sean Christopherson wrote:
> When running dirty_log_test using the dirty ring, post to sem_vcpu_stop
> only when the main thread has explicitly requested that the vCPU stop.
> Synchronizing the vCPU and main thread whenever the dirty ring happens to
> be full is unnecessary, as KVM's ABI is to actively prevent the vCPU from
> running until the ring is no longer full.  I.e. attempting to run the vCPU
> will simply result in KVM_EXIT_DIRTY_RING_FULL without ever entering the
> guest.  And if KVM doesn't exit, e.g. let's the vCPU dirty more pages,
> then that's a KVM bug worth finding.

This is probably a good idea to do sometimes, but this can also reduce coverage because
now the vCPU will pointlessly enter and exit when dirty log is full.

Best regards,
	Maxim Levitsky


> 
> Posting to sem_vcpu_stop on ring full also makes it difficult to get the
> test logic right, e.g. it's easy to let the vCPU keep running when it
> shouldn't, as a ring full can essentially happen at any given time.
> 
> Opportunistically rework the handling of dirty_ring_vcpu_ring_full to
> leave it set for the remainder of the iteration in order to simplify the
> surrounding logic.
> 
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> ---
>  tools/testing/selftests/kvm/dirty_log_test.c | 18 ++++--------------
>  1 file changed, 4 insertions(+), 14 deletions(-)
> 
> diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
> index 40c8f5551c8e..8544e8425f9c 100644
> --- a/tools/testing/selftests/kvm/dirty_log_test.c
> +++ b/tools/testing/selftests/kvm/dirty_log_test.c
> @@ -379,12 +379,8 @@ static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu)
>  	if (get_ucall(vcpu, NULL) == UCALL_SYNC) {
>  		vcpu_handle_sync_stop();
>  	} else if (run->exit_reason == KVM_EXIT_DIRTY_RING_FULL) {
> -		/* Update the flag first before pause */
>  		WRITE_ONCE(dirty_ring_vcpu_ring_full, true);
> -		sem_post(&sem_vcpu_stop);
> -		pr_info("Dirty ring full, waiting for it to be collected\n");
> -		sem_wait(&sem_vcpu_cont);
> -		WRITE_ONCE(dirty_ring_vcpu_ring_full, false);
> +		vcpu_handle_sync_stop();
>  	} else {
>  		TEST_ASSERT(false, "Invalid guest sync status: "
>  			    "exit_reason=%s",
> @@ -743,7 +739,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
>  	pthread_create(&vcpu_thread, NULL, vcpu_worker, vcpu);
>  
>  	while (iteration < p->iterations) {
> -		bool saw_dirty_ring_full = false;
>  		unsigned long i;
>  
>  		dirty_ring_prev_iteration_last_page = dirty_ring_last_page;
> @@ -775,19 +770,12 @@ static void run_test(enum vm_guest_mode mode, void *arg)
>  			 * the ring on every pass would make it unlikely the
>  			 * vCPU would ever fill the fing).
>  			 */
> -			if (READ_ONCE(dirty_ring_vcpu_ring_full))
> -				saw_dirty_ring_full = true;
> -			if (i && !saw_dirty_ring_full)
> +			if (i && !READ_ONCE(dirty_ring_vcpu_ring_full))
>  				continue;
>  
>  			log_mode_collect_dirty_pages(vcpu, TEST_MEM_SLOT_INDEX,
>  						     bmap, host_num_pages,
>  						     &ring_buf_idx);
> -
> -			if (READ_ONCE(dirty_ring_vcpu_ring_full)) {
> -				pr_info("Dirty ring emptied, restarting vCPU\n");
> -				sem_post(&sem_vcpu_cont);
> -			}
>  		}
>  
>  		/*
> @@ -829,6 +817,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
>  			WRITE_ONCE(host_quit, true);
>  		sync_global_to_guest(vm, iteration);
>  
> +		WRITE_ONCE(dirty_ring_vcpu_ring_full, false);
> +
>  		sem_post(&sem_vcpu_cont);
>  	}
>
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index 40c8f5551c8e..8544e8425f9c 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -379,12 +379,8 @@  static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu)
 	if (get_ucall(vcpu, NULL) == UCALL_SYNC) {
 		vcpu_handle_sync_stop();
 	} else if (run->exit_reason == KVM_EXIT_DIRTY_RING_FULL) {
-		/* Update the flag first before pause */
 		WRITE_ONCE(dirty_ring_vcpu_ring_full, true);
-		sem_post(&sem_vcpu_stop);
-		pr_info("Dirty ring full, waiting for it to be collected\n");
-		sem_wait(&sem_vcpu_cont);
-		WRITE_ONCE(dirty_ring_vcpu_ring_full, false);
+		vcpu_handle_sync_stop();
 	} else {
 		TEST_ASSERT(false, "Invalid guest sync status: "
 			    "exit_reason=%s",
@@ -743,7 +739,6 @@  static void run_test(enum vm_guest_mode mode, void *arg)
 	pthread_create(&vcpu_thread, NULL, vcpu_worker, vcpu);
 
 	while (iteration < p->iterations) {
-		bool saw_dirty_ring_full = false;
 		unsigned long i;
 
 		dirty_ring_prev_iteration_last_page = dirty_ring_last_page;
@@ -775,19 +770,12 @@  static void run_test(enum vm_guest_mode mode, void *arg)
 			 * the ring on every pass would make it unlikely the
 			 * vCPU would ever fill the fing).
 			 */
-			if (READ_ONCE(dirty_ring_vcpu_ring_full))
-				saw_dirty_ring_full = true;
-			if (i && !saw_dirty_ring_full)
+			if (i && !READ_ONCE(dirty_ring_vcpu_ring_full))
 				continue;
 
 			log_mode_collect_dirty_pages(vcpu, TEST_MEM_SLOT_INDEX,
 						     bmap, host_num_pages,
 						     &ring_buf_idx);
-
-			if (READ_ONCE(dirty_ring_vcpu_ring_full)) {
-				pr_info("Dirty ring emptied, restarting vCPU\n");
-				sem_post(&sem_vcpu_cont);
-			}
 		}
 
 		/*
@@ -829,6 +817,8 @@  static void run_test(enum vm_guest_mode mode, void *arg)
 			WRITE_ONCE(host_quit, true);
 		sync_global_to_guest(vm, iteration);
 
+		WRITE_ONCE(dirty_ring_vcpu_ring_full, false);
+
 		sem_post(&sem_vcpu_cont);
 	}