@@ -290,7 +290,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
- if (!(ring && ring->sched.thread))
+ if (!(ring && drm_sched_submit_ready(&ring->sched)))
continue;
/* stop secheduler and drain ring. */
@@ -1659,9 +1659,9 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !ring->sched.thread)
+ if (!ring || !drm_sched_submit_ready(&ring->sched))
continue;
- kthread_park(ring->sched.thread);
+ drm_sched_submit_stop(&ring->sched);
}
seq_puts(m, "run ib test:\n");
@@ -1675,9 +1675,9 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !ring->sched.thread)
+ if (!ring || !drm_sched_submit_ready(&ring->sched))
continue;
- kthread_unpark(ring->sched.thread);
+ drm_sched_submit_start(&ring->sched);
}
up_write(&adev->reset_domain->sem);
@@ -1897,7 +1897,8 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
ring = adev->rings[val];
- if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread)
+ if (!ring || !ring->funcs->preempt_ib ||
+ !drm_sched_submit_ready(&ring->sched))
return -EINVAL;
/* the last preemption failed */
@@ -1915,7 +1916,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
goto pro_end;
/* stop the scheduler */
- kthread_park(ring->sched.thread);
+ drm_sched_submit_stop(&ring->sched);
/* preempt the IB */
r = amdgpu_ring_preempt_ib(ring);
@@ -1949,7 +1950,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
failure:
/* restart the scheduler */
- kthread_unpark(ring->sched.thread);
+ drm_sched_submit_start(&ring->sched);
up_read(&adev->reset_domain->sem);
@@ -4614,7 +4614,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !ring->sched.thread)
+ if (!ring || !drm_sched_submit_ready(&ring->sched))
continue;
spin_lock(&ring->sched.job_list_lock);
@@ -4753,7 +4753,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !ring->sched.thread)
+ if (!ring || !drm_sched_submit_ready(&ring->sched))
continue;
/* Clear job fence from fence drv to avoid force_completion
@@ -5292,7 +5292,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
- if (!ring || !ring->sched.thread)
+ if (!ring || !drm_sched_submit_ready(&ring->sched))
continue;
drm_sched_stop(&ring->sched, job ? &job->base : NULL);
@@ -5367,7 +5367,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
- if (!ring || !ring->sched.thread)
+ if (!ring || !drm_sched_submit_ready(&ring->sched))
continue;
drm_sched_start(&ring->sched, true);
@@ -5693,7 +5693,7 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !ring->sched.thread)
+ if (!ring || !drm_sched_submit_ready(&ring->sched))
continue;
drm_sched_stop(&ring->sched, NULL);
@@ -5821,7 +5821,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !ring->sched.thread)
+ if (!ring || !drm_sched_submit_ready(&ring->sched))
continue;
drm_sched_start(&ring->sched, true);
@@ -809,7 +809,8 @@ static void suspend_scheduler(struct msm_gpu *gpu)
*/
for (i = 0; i < gpu->nr_rings; i++) {
struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
- kthread_park(sched->thread);
+
+ drm_sched_submit_stop(sched);
}
}
@@ -819,7 +820,8 @@ static void resume_scheduler(struct msm_gpu *gpu)
for (i = 0; i < gpu->nr_rings; i++) {
struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
- kthread_unpark(sched->thread);
+
+ drm_sched_submit_start(sched);
}
}
@@ -439,7 +439,7 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
{
struct drm_sched_job *s_job, *tmp;
- kthread_park(sched->thread);
+ drm_sched_submit_stop(sched);
/*
* Reinsert back the bad job here - now it's safe as
@@ -552,7 +552,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
spin_unlock(&sched->job_list_lock);
}
- kthread_unpark(sched->thread);
+ drm_sched_submit_start(sched);
}
EXPORT_SYMBOL(drm_sched_start);
@@ -1206,3 +1206,39 @@ void drm_sched_increase_karma(struct drm_sched_job *bad)
}
}
EXPORT_SYMBOL(drm_sched_increase_karma);
+
+/**
+ * drm_sched_submit_ready - scheduler ready for submission
+ *
+ * @sched: scheduler instance
+ *
+ * Returns true if submission is ready
+ */
+bool drm_sched_submit_ready(struct drm_gpu_scheduler *sched)
+{
+ return !!sched->thread;
+
+}
+EXPORT_SYMBOL(drm_sched_submit_ready);
+
+/**
+ * drm_sched_submit_stop - stop scheduler submission
+ *
+ * @sched: scheduler instance
+ */
+void drm_sched_submit_stop(struct drm_gpu_scheduler *sched)
+{
+ kthread_park(sched->thread);
+}
+EXPORT_SYMBOL(drm_sched_submit_stop);
+
+/**
+ * drm_sched_submit_start - start scheduler submission
+ *
+ * @sched: scheduler instance
+ */
+void drm_sched_submit_start(struct drm_gpu_scheduler *sched)
+{
+ kthread_unpark(sched->thread);
+}
+EXPORT_SYMBOL(drm_sched_submit_start);
@@ -550,6 +550,9 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
void drm_sched_job_cleanup(struct drm_sched_job *job);
void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched);
+bool drm_sched_submit_ready(struct drm_gpu_scheduler *sched);
+void drm_sched_submit_stop(struct drm_gpu_scheduler *sched);
+void drm_sched_submit_start(struct drm_gpu_scheduler *sched);
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
Add scheduler submit ready, stop, and start helpers to hide the implementation details of the scheduler from the drivers. Signed-off-by: Matthew Brost <matthew.brost@intel.com> --- .../drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 15 +++---- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 12 +++--- drivers/gpu/drm/msm/adreno/adreno_device.c | 6 ++- drivers/gpu/drm/scheduler/sched_main.c | 40 ++++++++++++++++++- include/drm/gpu_scheduler.h | 3 ++ 6 files changed, 60 insertions(+), 18 deletions(-)