diff mbox series

[1/2] drm/etnaviv: split fence lock

Message ID 20221201174846.2732578-1-l.stach@pengutronix.de (mailing list archive)
State New, archived
Headers show
Series [1/2] drm/etnaviv: split fence lock | expand

Commit Message

Lucas Stach Dec. 1, 2022, 5:48 p.m. UTC
The fence lock currently protects two distinct things. It protects the fence
IDR from concurrent inserts and removes and also keeps drm_sched_job_arm and
drm_sched_entity_push_job in one atomic section to guarantee the fence seqno
monotonicity. Split the lock into those two functions.

Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
---
 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c |  4 ++--
 drivers/gpu/drm/etnaviv/etnaviv_gpu.c        |  3 ++-
 drivers/gpu/drm/etnaviv/etnaviv_gpu.h        |  3 ++-
 drivers/gpu/drm/etnaviv/etnaviv_sched.c      | 11 +++++++----
 4 files changed, 13 insertions(+), 8 deletions(-)

Comments

Philipp Zabel Dec. 2, 2022, 9:41 a.m. UTC | #1
On Thu, Dec 01, 2022 at 06:48:45PM +0100, Lucas Stach wrote:
> The fence lock currently protects two distinct things. It protects the fence
> IDR from concurrent inserts and removes and also keeps drm_sched_job_arm and
> drm_sched_entity_push_job in one atomic section to guarantee the fence seqno
> monotonicity. Split the lock into those two functions.
> 
> Signed-off-by: Lucas Stach <l.stach@pengutronix.de>

Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>

regards
Philipp
diff mbox series

Patch

diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 1ac916b24891..2337b24b05b0 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -394,9 +394,9 @@  static void submit_cleanup(struct kref *kref)
 
 	if (submit->out_fence) {
 		/* first remove from IDR, so fence can not be found anymore */
-		mutex_lock(&submit->gpu->fence_lock);
+		mutex_lock(&submit->gpu->idr_lock);
 		idr_remove(&submit->gpu->fence_idr, submit->out_fence_id);
-		mutex_unlock(&submit->gpu->fence_lock);
+		mutex_unlock(&submit->gpu->idr_lock);
 		dma_fence_put(submit->out_fence);
 	}
 	kfree(submit->pmrs);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 37018bc55810..30d7c1d8d6c0 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1786,7 +1786,8 @@  static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
 
 	gpu->dev = &pdev->dev;
 	mutex_init(&gpu->lock);
-	mutex_init(&gpu->fence_lock);
+	mutex_init(&gpu->sched_lock);
+	mutex_init(&gpu->idr_lock);
 
 	/* Map registers: */
 	gpu->mmio = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 85eddd492774..267d8ec97f11 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -99,6 +99,7 @@  struct etnaviv_gpu {
 	struct etnaviv_chip_identity identity;
 	enum etnaviv_sec_mode sec_mode;
 	struct workqueue_struct *wq;
+	struct mutex sched_lock;
 	struct drm_gpu_scheduler sched;
 	bool initialized;
 	bool fe_running;
@@ -116,7 +117,7 @@  struct etnaviv_gpu {
 	u32 idle_mask;
 
 	/* Fencing support */
-	struct mutex fence_lock;
+	struct mutex idr_lock;
 	struct idr fence_idr;
 	u32 next_fence;
 	u32 completed_fence;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 72e2553fbc98..27448431a45c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -97,21 +97,24 @@  static const struct drm_sched_backend_ops etnaviv_sched_ops = {
 
 int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
 {
+	struct etnaviv_gpu *gpu = submit->gpu;
 	int ret = 0;
 
 	/*
-	 * Hold the fence lock across the whole operation to avoid jobs being
+	 * Hold the sched lock across the whole operation to avoid jobs being
 	 * pushed out of order with regard to their sched fence seqnos as
 	 * allocated in drm_sched_job_arm.
 	 */
-	mutex_lock(&submit->gpu->fence_lock);
+	mutex_lock(&gpu->sched_lock);
 
 	drm_sched_job_arm(&submit->sched_job);
 
 	submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
-	submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
+	mutex_lock(&gpu->idr_lock);
+	submit->out_fence_id = idr_alloc_cyclic(&gpu->fence_idr,
 						submit->out_fence, 0,
 						INT_MAX, GFP_KERNEL);
+	mutex_unlock(&gpu->idr_lock);
 	if (submit->out_fence_id < 0) {
 		drm_sched_job_cleanup(&submit->sched_job);
 		ret = -ENOMEM;
@@ -124,7 +127,7 @@  int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
 	drm_sched_entity_push_job(&submit->sched_job);
 
 out_unlock:
-	mutex_unlock(&submit->gpu->fence_lock);
+	mutex_unlock(&gpu->sched_lock);
 
 	return ret;
 }