diff mbox series

[v2,3/3] drm/amdgpu: Trigger a wedged event for every type of reset

Message ID 20250220162750.343139-4-andrealmeid@igalia.com (mailing list archive)
State New
Headers show
Series drm/amdgpu: Small reset improvements | expand

Commit Message

André Almeida Feb. 20, 2025, 4:27 p.m. UTC
Instead of only triggering a wedged event for complete GPU resets,
trigger for all types, like soft resets and ring resets. Regardless of
the reset, it's useful for userspace to know that it happened because
the kernel will reject further submissions from that app.

Signed-off-by: André Almeida <andrealmeid@igalia.com>
---
v2: Keep the wedge event in amdgpu_device_gpu_recover() and add and
    extra check to avoid triggering two events.
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 20 +++++++++++++-------
 1 file changed, 13 insertions(+), 7 deletions(-)

Comments

Christian König Feb. 21, 2025, 7:56 a.m. UTC | #1
Am 20.02.25 um 17:27 schrieb André Almeida:
> Instead of only triggering a wedged event for complete GPU resets,
> trigger for all types, like soft resets and ring resets. Regardless of
> the reset, it's useful for userspace to know that it happened because
> the kernel will reject further submissions from that app.
>
> Signed-off-by: André Almeida <andrealmeid@igalia.com>
> ---
> v2: Keep the wedge event in amdgpu_device_gpu_recover() and add and
>     extra check to avoid triggering two events.
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 20 +++++++++++++-------
>  1 file changed, 13 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> index 698e5799e542..9948ea33d2c6 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> @@ -91,8 +91,8 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
>  	struct amdgpu_job *job = to_amdgpu_job(s_job);
>  	struct amdgpu_task_info *ti;
>  	struct amdgpu_device *adev = ring->adev;
> -	int idx;
> -	int r;
> +	bool gpu_recover = false;
> +	int idx, ret = 0;

We usually stick to "r" as name for the return variable in amdgpu.

>  
>  	if (!drm_dev_enter(adev_to_drm(adev), &idx)) {
>  		dev_info(adev->dev, "%s - device unplugged skipping recovery on scheduler:%s",
> @@ -141,8 +141,8 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
>  		 * we'll fall back to full GPU reset.
>  		 */
>  		drm_sched_wqueue_stop(&ring->sched);
> -		r = amdgpu_ring_reset(ring, job->vmid);
> -		if (!r) {
> +		ret = amdgpu_ring_reset(ring, job->vmid);
> +		if (!ret) {
>  			if (amdgpu_ring_sched_ready(ring))
>  				drm_sched_stop(&ring->sched, s_job);
>  			atomic_inc(&ring->adev->gpu_reset_counter);
> @@ -170,9 +170,11 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
>  		 */
>  		set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
>  
> -		r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
> -		if (r)
> -			dev_err(adev->dev, "GPU Recovery Failed: %d\n", r);
> +		ret = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
> +		if (ret)
> +			dev_err(adev->dev, "GPU Recovery Failed: %d\n", ret);
> +		else
> +			gpu_recover = true;
>  	} else {
>  		drm_sched_suspend_timeout(&ring->sched);
>  		if (amdgpu_sriov_vf(adev))
> @@ -180,6 +182,10 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
>  	}
>  
>  exit:
> +	/* Avoid sending two wedge events for the same reset */
> +	if (!ret && !gpu_recover)

Ugh, that's rather ugly I think.

Probably better to just add an extra drm_dev_wedged_event() after the amdgpu_ring_reset() call.

The soft recovery should probably never send a wedged event in the first place and we plan to remove it anyway when queue reset works reliable.

Regards,
Christian.

> +		drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE);
> +
>  	drm_dev_exit(idx);
>  	return DRM_GPU_SCHED_STAT_NOMINAL;
>  }
diff mbox series

Patch

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 698e5799e542..9948ea33d2c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -91,8 +91,8 @@  static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
 	struct amdgpu_job *job = to_amdgpu_job(s_job);
 	struct amdgpu_task_info *ti;
 	struct amdgpu_device *adev = ring->adev;
-	int idx;
-	int r;
+	bool gpu_recover = false;
+	int idx, ret = 0;
 
 	if (!drm_dev_enter(adev_to_drm(adev), &idx)) {
 		dev_info(adev->dev, "%s - device unplugged skipping recovery on scheduler:%s",
@@ -141,8 +141,8 @@  static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
 		 * we'll fall back to full GPU reset.
 		 */
 		drm_sched_wqueue_stop(&ring->sched);
-		r = amdgpu_ring_reset(ring, job->vmid);
-		if (!r) {
+		ret = amdgpu_ring_reset(ring, job->vmid);
+		if (!ret) {
 			if (amdgpu_ring_sched_ready(ring))
 				drm_sched_stop(&ring->sched, s_job);
 			atomic_inc(&ring->adev->gpu_reset_counter);
@@ -170,9 +170,11 @@  static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
 		 */
 		set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
 
-		r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
-		if (r)
-			dev_err(adev->dev, "GPU Recovery Failed: %d\n", r);
+		ret = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
+		if (ret)
+			dev_err(adev->dev, "GPU Recovery Failed: %d\n", ret);
+		else
+			gpu_recover = true;
 	} else {
 		drm_sched_suspend_timeout(&ring->sched);
 		if (amdgpu_sriov_vf(adev))
@@ -180,6 +182,10 @@  static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
 	}
 
 exit:
+	/* Avoid sending two wedge events for the same reset */
+	if (!ret && !gpu_recover)
+		drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE);
+
 	drm_dev_exit(idx);
 	return DRM_GPU_SCHED_STAT_NOMINAL;
 }