@@ -334,11 +334,20 @@ static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
*/
static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
{
+ lockdep_assert_held(&sched->job_list_lock);
+
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
!list_empty(&sched->pending_list))
queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
}
+static void drm_sched_start_timeout_unlocked(struct drm_gpu_scheduler *sched)
+{
+ spin_lock(&sched->job_list_lock);
+ drm_sched_start_timeout(sched);
+ spin_unlock(&sched->job_list_lock);
+}
+
/**
* drm_sched_fault - immediately start timeout handler
*
@@ -451,11 +460,8 @@ static void drm_sched_job_timedout(struct work_struct *work)
spin_unlock(&sched->job_list_lock);
}
- if (status != DRM_GPU_SCHED_STAT_ENODEV) {
- spin_lock(&sched->job_list_lock);
- drm_sched_start_timeout(sched);
- spin_unlock(&sched->job_list_lock);
- }
+ if (status != DRM_GPU_SCHED_STAT_ENODEV)
+ drm_sched_start_timeout_unlocked(sched);
}
/**
@@ -581,11 +587,8 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
drm_sched_job_done(s_job, -ECANCELED);
}
- if (full_recovery) {
- spin_lock(&sched->job_list_lock);
- drm_sched_start_timeout(sched);
- spin_unlock(&sched->job_list_lock);
- }
+ if (full_recovery)
+ drm_sched_start_timeout_unlocked(sched);
drm_sched_wqueue_start(sched);
}