diff mbox series

[RFC,18/18] drm/sched: Scale deadlines depending on queue depth

Message ID 20250108183528.41007-19-tvrtko.ursulin@igalia.com (mailing list archive)
State New
Headers show
Series Deadline scheduler and other ideas | expand

Commit Message

Tvrtko Ursulin Jan. 8, 2025, 6:35 p.m. UTC
Speculative idea for the concern of how to somewhat prioritise clients who
submit small amount of work infrequently is to scale their deadline
calculation based on their queue depth and priority. Kernel context we can
pull in more aggressively into the past.

On the other hand queue depth may not be representative on the GPU
utilisation so it can also incorrectly penalise short deep(-ish) queues
versus single large jobs.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Danilo Krummrich <dakr@redhat.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Philipp Stanner <pstanner@redhat.com>
---
 drivers/gpu/drm/scheduler/sched_entity.c | 24 ++++++++++++++++++------
 include/drm/gpu_scheduler.h              |  2 ++
 2 files changed, 20 insertions(+), 6 deletions(-)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 29daee6b06e5..d17980af85fc 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -391,14 +391,24 @@  static ktime_t
 __drm_sched_entity_get_job_deadline(struct drm_sched_entity *entity,
 				    ktime_t submit_ts)
 {
-	static const unsigned int d_us[] = {
-		[DRM_SCHED_PRIORITY_KERNEL] =    100,
-		[DRM_SCHED_PRIORITY_HIGH]   =   1000,
-		[DRM_SCHED_PRIORITY_NORMAL] =   5000,
-		[DRM_SCHED_PRIORITY_LOW]    = 100000,
+	static const long d_us[] = {
+		[DRM_SCHED_PRIORITY_KERNEL] = -1000,
+		[DRM_SCHED_PRIORITY_HIGH]   =   334,
+		[DRM_SCHED_PRIORITY_NORMAL] =  1000,
+		[DRM_SCHED_PRIORITY_LOW]    =  6667,
 	};
+	static const unsigned int shift[] = {
+		[DRM_SCHED_PRIORITY_KERNEL] = 4,
+		[DRM_SCHED_PRIORITY_HIGH]   = 1,
+		[DRM_SCHED_PRIORITY_NORMAL] = 2,
+		[DRM_SCHED_PRIORITY_LOW]    = 3,
+	};
+	const unsigned int prio = entity->priority;
+	long d;
+
+	d = d_us[prio] * (atomic_read(&entity->qd) << shift[prio]);
 
-	return ktime_add_us(submit_ts, d_us[entity->priority]);
+	return ktime_add_us(submit_ts, d);
 }
 
 ktime_t
@@ -520,6 +530,7 @@  struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
 	 */
 	smp_wmb();
 
+	atomic_dec(&entity->qd);
 	spsc_queue_pop(&entity->job_queue);
 	drm_sched_rq_pop_entity(entity->rq, entity);
 
@@ -608,6 +619,7 @@  void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
 	else
 		fence_deadline = KTIME_MAX;
 
+	atomic_inc(&entity->qd);
 	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
 
 	/* first job wakes up scheduler */
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 25786fb941d8..bce88c9b30c1 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -147,6 +147,8 @@  struct drm_sched_entity {
 	 */
 	struct spsc_queue		job_queue;
 
+	atomic_t			qd;
+
 	/**
 	 * @fence_seq:
 	 *