diff mbox series

[4/6] drm/scheduler: Essentialize the job done callback

Message ID 20201125031708.6433-5-luben.tuikov@amd.com (mailing list archive)
State New, archived
Headers show
Series Allow to extend the timeout without jobs disappearing | expand

Commit Message

Luben Tuikov Nov. 25, 2020, 3:17 a.m. UTC
The job done callback is called from various
places, in two ways: in job done role, and
as a fence callback role.

Essentialize the callback to an atom
function to just complete the job,
and into a second function as a prototype
of fence callback which calls to complete
the job.

This is used in latter patches by the completion
code.

Signed-off-by: Luben Tuikov <luben.tuikov@amd.com>
---
 drivers/gpu/drm/scheduler/sched_main.c | 73 ++++++++++++++------------
 1 file changed, 40 insertions(+), 33 deletions(-)

Comments

Christian König Nov. 25, 2020, 9:51 a.m. UTC | #1
Am 25.11.20 um 04:17 schrieb Luben Tuikov:
> The job done callback is called from various
> places, in two ways: in job done role, and
> as a fence callback role.
>
> Essentialize the callback to an atom
> function to just complete the job,
> and into a second function as a prototype
> of fence callback which calls to complete
> the job.
>
> This is used in latter patches by the completion
> code.
>
> Signed-off-by: Luben Tuikov <luben.tuikov@amd.com>

Reviewed-by: Christian König <christian.koenig@amd.com>

> ---
>   drivers/gpu/drm/scheduler/sched_main.c | 73 ++++++++++++++------------
>   1 file changed, 40 insertions(+), 33 deletions(-)
>
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> index b694df12aaba..3eb7618a627d 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -60,8 +60,6 @@
>   #define to_drm_sched_job(sched_job)		\
>   		container_of((sched_job), struct drm_sched_job, queue_node)
>   
> -static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
> -
>   /**
>    * drm_sched_rq_init - initialize a given run queue struct
>    *
> @@ -162,6 +160,40 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
>   	return NULL;
>   }
>   
> +/**
> + * drm_sched_job_done - complete a job
> + * @s_job: pointer to the job which is done
> + *
> + * Finish the job's fence and wake up the worker thread.
> + */
> +static void drm_sched_job_done(struct drm_sched_job *s_job)
> +{
> +	struct drm_sched_fence *s_fence = s_job->s_fence;
> +	struct drm_gpu_scheduler *sched = s_fence->sched;
> +
> +	atomic_dec(&sched->hw_rq_count);
> +	atomic_dec(&sched->score);
> +
> +	trace_drm_sched_process_job(s_fence);
> +
> +	dma_fence_get(&s_fence->finished);
> +	drm_sched_fence_finished(s_fence);
> +	dma_fence_put(&s_fence->finished);
> +	wake_up_interruptible(&sched->wake_up_worker);
> +}
> +
> +/**
> + * drm_sched_job_done_cb - the callback for a done job
> + * @f: fence
> + * @cb: fence callbacks
> + */
> +static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
> +{
> +	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
> +
> +	drm_sched_job_done(s_job);
> +}
> +
>   /**
>    * drm_sched_dependency_optimized
>    *
> @@ -473,14 +505,14 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
>   
>   		if (fence) {
>   			r = dma_fence_add_callback(fence, &s_job->cb,
> -						   drm_sched_process_job);
> +						   drm_sched_job_done_cb);
>   			if (r == -ENOENT)
> -				drm_sched_process_job(fence, &s_job->cb);
> +				drm_sched_job_done(s_job);
>   			else if (r)
>   				DRM_ERROR("fence add callback failed (%d)\n",
>   					  r);
>   		} else
> -			drm_sched_process_job(NULL, &s_job->cb);
> +			drm_sched_job_done(s_job);
>   	}
>   
>   	if (full_recovery) {
> @@ -635,31 +667,6 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
>   	return entity;
>   }
>   
> -/**
> - * drm_sched_process_job - process a job
> - *
> - * @f: fence
> - * @cb: fence callbacks
> - *
> - * Called after job has finished execution.
> - */
> -static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
> -{
> -	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
> -	struct drm_sched_fence *s_fence = s_job->s_fence;
> -	struct drm_gpu_scheduler *sched = s_fence->sched;
> -
> -	atomic_dec(&sched->hw_rq_count);
> -	atomic_dec(&sched->score);
> -
> -	trace_drm_sched_process_job(s_fence);
> -
> -	dma_fence_get(&s_fence->finished);
> -	drm_sched_fence_finished(s_fence);
> -	dma_fence_put(&s_fence->finished);
> -	wake_up_interruptible(&sched->wake_up_worker);
> -}
> -
>   /**
>    * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
>    *
> @@ -809,9 +816,9 @@ static int drm_sched_main(void *param)
>   		if (!IS_ERR_OR_NULL(fence)) {
>   			s_fence->parent = dma_fence_get(fence);
>   			r = dma_fence_add_callback(fence, &sched_job->cb,
> -						   drm_sched_process_job);
> +						   drm_sched_job_done_cb);
>   			if (r == -ENOENT)
> -				drm_sched_process_job(fence, &sched_job->cb);
> +				drm_sched_job_done(sched_job);
>   			else if (r)
>   				DRM_ERROR("fence add callback failed (%d)\n",
>   					  r);
> @@ -820,7 +827,7 @@ static int drm_sched_main(void *param)
>   			if (IS_ERR(fence))
>   				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
>   
> -			drm_sched_process_job(NULL, &sched_job->cb);
> +			drm_sched_job_done(sched_job);
>   		}
>   
>   		wake_up(&sched->job_scheduled);
diff mbox series

Patch

diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index b694df12aaba..3eb7618a627d 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -60,8 +60,6 @@ 
 #define to_drm_sched_job(sched_job)		\
 		container_of((sched_job), struct drm_sched_job, queue_node)
 
-static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
-
 /**
  * drm_sched_rq_init - initialize a given run queue struct
  *
@@ -162,6 +160,40 @@  drm_sched_rq_select_entity(struct drm_sched_rq *rq)
 	return NULL;
 }
 
+/**
+ * drm_sched_job_done - complete a job
+ * @s_job: pointer to the job which is done
+ *
+ * Finish the job's fence and wake up the worker thread.
+ */
+static void drm_sched_job_done(struct drm_sched_job *s_job)
+{
+	struct drm_sched_fence *s_fence = s_job->s_fence;
+	struct drm_gpu_scheduler *sched = s_fence->sched;
+
+	atomic_dec(&sched->hw_rq_count);
+	atomic_dec(&sched->score);
+
+	trace_drm_sched_process_job(s_fence);
+
+	dma_fence_get(&s_fence->finished);
+	drm_sched_fence_finished(s_fence);
+	dma_fence_put(&s_fence->finished);
+	wake_up_interruptible(&sched->wake_up_worker);
+}
+
+/**
+ * drm_sched_job_done_cb - the callback for a done job
+ * @f: fence
+ * @cb: fence callbacks
+ */
+static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
+
+	drm_sched_job_done(s_job);
+}
+
 /**
  * drm_sched_dependency_optimized
  *
@@ -473,14 +505,14 @@  void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
 
 		if (fence) {
 			r = dma_fence_add_callback(fence, &s_job->cb,
-						   drm_sched_process_job);
+						   drm_sched_job_done_cb);
 			if (r == -ENOENT)
-				drm_sched_process_job(fence, &s_job->cb);
+				drm_sched_job_done(s_job);
 			else if (r)
 				DRM_ERROR("fence add callback failed (%d)\n",
 					  r);
 		} else
-			drm_sched_process_job(NULL, &s_job->cb);
+			drm_sched_job_done(s_job);
 	}
 
 	if (full_recovery) {
@@ -635,31 +667,6 @@  drm_sched_select_entity(struct drm_gpu_scheduler *sched)
 	return entity;
 }
 
-/**
- * drm_sched_process_job - process a job
- *
- * @f: fence
- * @cb: fence callbacks
- *
- * Called after job has finished execution.
- */
-static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
-{
-	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
-	struct drm_sched_fence *s_fence = s_job->s_fence;
-	struct drm_gpu_scheduler *sched = s_fence->sched;
-
-	atomic_dec(&sched->hw_rq_count);
-	atomic_dec(&sched->score);
-
-	trace_drm_sched_process_job(s_fence);
-
-	dma_fence_get(&s_fence->finished);
-	drm_sched_fence_finished(s_fence);
-	dma_fence_put(&s_fence->finished);
-	wake_up_interruptible(&sched->wake_up_worker);
-}
-
 /**
  * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
  *
@@ -809,9 +816,9 @@  static int drm_sched_main(void *param)
 		if (!IS_ERR_OR_NULL(fence)) {
 			s_fence->parent = dma_fence_get(fence);
 			r = dma_fence_add_callback(fence, &sched_job->cb,
-						   drm_sched_process_job);
+						   drm_sched_job_done_cb);
 			if (r == -ENOENT)
-				drm_sched_process_job(fence, &sched_job->cb);
+				drm_sched_job_done(sched_job);
 			else if (r)
 				DRM_ERROR("fence add callback failed (%d)\n",
 					  r);
@@ -820,7 +827,7 @@  static int drm_sched_main(void *param)
 			if (IS_ERR(fence))
 				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
 
-			drm_sched_process_job(NULL, &sched_job->cb);
+			drm_sched_job_done(sched_job);
 		}
 
 		wake_up(&sched->job_scheduled);