diff mbox

drm/radeon: fix virtual memory locking in case of reset

Message ID 1343923542-830-1-git-send-email-j.glisse@gmail.com (mailing list archive)
State New, archived
Headers show

Commit Message

Jerome Glisse Aug. 2, 2012, 4:05 p.m. UTC
From: Jerome Glisse <jglisse@redhat.com>

Lock/unlock mutex in proper order to avoid deadlock in case
of GPU reset triggered from VM code path.

Cc: stable@vger.kernel.org [3.5]
Signed-off-by: Jerome Glisse <jglisse@redhat.com>
---
 drivers/gpu/drm/radeon/radeon_gart.c |   11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)

Comments

Alex Deucher Aug. 2, 2012, 4:27 p.m. UTC | #1
On Thu, Aug 2, 2012 at 12:05 PM,  <j.glisse@gmail.com> wrote:
> From: Jerome Glisse <jglisse@redhat.com>
>
> Lock/unlock mutex in proper order to avoid deadlock in case
> of GPU reset triggered from VM code path.
>
> Cc: stable@vger.kernel.org [3.5]
> Signed-off-by: Jerome Glisse <jglisse@redhat.com>

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>

> ---
>  drivers/gpu/drm/radeon/radeon_gart.c |   11 +++++++++--
>  1 file changed, 9 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
> index b372005..7eabb59 100644
> --- a/drivers/gpu/drm/radeon/radeon_gart.c
> +++ b/drivers/gpu/drm/radeon/radeon_gart.c
> @@ -508,14 +508,19 @@ static void radeon_vm_unbind_locked(struct radeon_device *rdev,
>         while (vm->fence) {
>                 int r;
>                 r = radeon_fence_wait(vm->fence, false);
> -               if (r)
> +               if (r) {
>                         DRM_ERROR("error while waiting for fence: %d\n", r);
> +               }
>                 if (r == -EDEADLK) {
> +                       /* release mutex and lock in right order */
>                         mutex_unlock(&rdev->vm_manager.lock);
> +                       mutex_unlock(&vm->mutex);
>                         r = radeon_gpu_reset(rdev);
>                         mutex_lock(&rdev->vm_manager.lock);
> -                       if (!r)
> +                       mutex_lock(&vm->mutex);
> +                       if (!r) {
>                                 continue;
> +                       }
>                 }
>                 break;
>         }
> @@ -551,7 +556,9 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
>         mutex_lock(&rdev->vm_manager.lock);
>         /* unbind all active vm */
>         list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
> +               mutex_lock(&vm->mutex);
>                 radeon_vm_unbind_locked(rdev, vm);
> +               mutex_unlock(&vm->mutex);
>         }
>         rdev->vm_manager.funcs->fini(rdev);
>         mutex_unlock(&rdev->vm_manager.lock);
> --
> 1.7.10.4
>
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/dri-devel
diff mbox

Patch

diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index b372005..7eabb59 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -508,14 +508,19 @@  static void radeon_vm_unbind_locked(struct radeon_device *rdev,
 	while (vm->fence) {
 		int r;
 		r = radeon_fence_wait(vm->fence, false);
-		if (r)
+		if (r) {
 			DRM_ERROR("error while waiting for fence: %d\n", r);
+		}
 		if (r == -EDEADLK) {
+			/* release mutex and lock in right order */
 			mutex_unlock(&rdev->vm_manager.lock);
+			mutex_unlock(&vm->mutex);
 			r = radeon_gpu_reset(rdev);
 			mutex_lock(&rdev->vm_manager.lock);
-			if (!r)
+			mutex_lock(&vm->mutex);
+			if (!r) {
 				continue;
+			}
 		}
 		break;
 	}
@@ -551,7 +556,9 @@  void radeon_vm_manager_fini(struct radeon_device *rdev)
 	mutex_lock(&rdev->vm_manager.lock);
 	/* unbind all active vm */
 	list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
+		mutex_lock(&vm->mutex);
 		radeon_vm_unbind_locked(rdev, vm);
+		mutex_unlock(&vm->mutex);
 	}
 	rdev->vm_manager.funcs->fini(rdev);
 	mutex_unlock(&rdev->vm_manager.lock);