diff mbox series

[v2] gpu/drm/bridge/cadence: avoid flush_scheduled_work() usage

Message ID e9b95132-89cd-5cfc-1a09-966393c5ecb0@I-love.SAKURA.ne.jp (mailing list archive)
State New, archived
Headers show
Series [v2] gpu/drm/bridge/cadence: avoid flush_scheduled_work() usage | expand

Commit Message

Tetsuo Handa June 10, 2022, 2:35 p.m. UTC
Use local wq in order to avoid flush_scheduled_work() usage.

Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
---
Changes in v2:
  Replace flush_scheduled_work() with flush_workqueue().

Please see commit c4f135d643823a86 ("workqueue: Wrap flush_workqueue()
using a macro") for background.

This is a blind conversion, and is only compile tested.

 .../drm/bridge/cadence/cdns-mhdp8546-core.c   | 32 ++++++++++++++++---
 .../drm/bridge/cadence/cdns-mhdp8546-core.h   |  2 ++
 .../drm/bridge/cadence/cdns-mhdp8546-hdcp.c   | 16 +++++-----
 3 files changed, 37 insertions(+), 13 deletions(-)

Comments

Tetsuo Handa June 13, 2022, 1:57 p.m. UTC | #1
On 2022/06/10 23:35, Tetsuo Handa wrote:
> Use local wq in order to avoid flush_scheduled_work() usage.
> 
> Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
> ---
> Changes in v2:
>   Replace flush_scheduled_work() with flush_workqueue().
> 
> Please see commit c4f135d643823a86 ("workqueue: Wrap flush_workqueue()
> using a macro") for background.
> 
> This is a blind conversion, and is only compile tested.
> 
>  .../drm/bridge/cadence/cdns-mhdp8546-core.c   | 32 ++++++++++++++++---
>  .../drm/bridge/cadence/cdns-mhdp8546-core.h   |  2 ++
>  .../drm/bridge/cadence/cdns-mhdp8546-hdcp.c   | 16 +++++-----
>  3 files changed, 37 insertions(+), 13 deletions(-)
> 

I'm thinking about flush_work() version, and I got confused.

Since cdns-mhdp8546 driver uses 4 works

	mhdp->modeset_retry_work
	mhdp->hpd_work
	mhdp->hdcp.check_work
	mhdp->hdcp.prop_work

I assume that flush_scheduled_work() in cdns_mhdp_remove() needs to wait
for only these 4 works. And since mhdp->modeset_retry_work already uses
cancel_work_sync(), flush_scheduled_work() would need to wait for only 3 works.
Therefore, I guess that the flush_work() version would look something like

diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index 67f0f444b4e8..04b21752ab3f 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -2603,7 +2603,11 @@ static int cdns_mhdp_remove(struct platform_device *pdev)
 	pm_runtime_disable(&pdev->dev);
 
 	cancel_work_sync(&mhdp->modeset_retry_work);
-	flush_scheduled_work();
+	flush_work(&mhdp->hpd_work);
+	if (mhdp->hdcp_supported) {
+		cancel_delayed_work_sync(&mhdp->hdcp.check_work);
+		flush_work(&mhdp->hdcp.prop_work);
+	}
 
 	clk_disable_unprepare(mhdp->clk);
 

but I came to wonder whether mhdp->hdcp.check_work should be flushed or cancelled.

While flush_scheduled_work() waits for completion of works which were already queued
to system_wq, mhdp->modeset_retry_work and mhdp->hdcp.check_work are delayed works.
That is, work won't be queued to system_wq unless timeout expires.

Current code will wait for mhdp->hdcp.check_work only if timeout already expired.
If timeout is not expired yet, flush_scheduled_work() will fail to cancel
mhdp->hdcp.check_work, and cdns_mhdp_hdcp_check_work() which is triggered by
mhdp->hdcp.check_work will schedule hdcp->check_work, which is too late for
flush_scheduled_work() to wait for completion of cdns_mhdp_hdcp_prop_work().

Thus, how do we want to handle this race window?

  flush_delayed_work(&mhdp->hdcp.check_work) followed by
  flush_work(&mhdp->hdcp.prop_work) (i.e. flush as much as possible) ?

  cancel_delayed_work_sync(&mhdp->hdcp.check_work) followed by
  cancel_work_sync(&mhdp->hdcp.prop_work) (i.e. cancel as much as possible) ?

  do nothing (i.e. no need to flush or cancel mhdp->hdcp.check_work and mhdp->hdcp.prop_work) ?
Tetsuo Handa June 30, 2022, 4:33 a.m. UTC | #2
Ping?

On 2022/06/13 22:57, Tetsuo Handa wrote:
> Thus, how do we want to handle this race window?
> 
>   flush_delayed_work(&mhdp->hdcp.check_work) followed by
>   flush_work(&mhdp->hdcp.prop_work) (i.e. flush as much as possible) ?
> 
>   cancel_delayed_work_sync(&mhdp->hdcp.check_work) followed by
>   cancel_work_sync(&mhdp->hdcp.prop_work) (i.e. cancel as much as possible) ?
> 
>   do nothing (i.e. no need to flush or cancel mhdp->hdcp.check_work and mhdp->hdcp.prop_work) ?
>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index 67f0f444b4e8..f29c9484d4bb 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -53,6 +53,8 @@ 
 #include "cdns-mhdp8546-hdcp.h"
 #include "cdns-mhdp8546-j721e.h"
 
+struct workqueue_struct *cadence_mhdp_wq;
+
 static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device *mhdp)
 {
 	int ret, empty;
@@ -2049,7 +2051,7 @@  static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
 out:
 	mutex_unlock(&mhdp->link_mutex);
 	if (ret < 0)
-		schedule_work(&mhdp->modeset_retry_work);
+		queue_work(cadence_mhdp_wq, &mhdp->modeset_retry_work);
 }
 
 static void cdns_mhdp_atomic_disable(struct drm_bridge *bridge,
@@ -2373,7 +2375,7 @@  static irqreturn_t cdns_mhdp_irq_handler(int irq, void *data)
 	spin_unlock(&mhdp->start_lock);
 
 	if (bridge_attached && (sw_ev0 & CDNS_DPTX_HPD)) {
-		schedule_work(&mhdp->hpd_work);
+		queue_work(cadence_mhdp_wq, &mhdp->hpd_work);
 	}
 
 	if (sw_ev0 & ~CDNS_DPTX_HPD) {
@@ -2413,7 +2415,7 @@  static void cdns_mhdp_hpd_work(struct work_struct *work)
 	ret = cdns_mhdp_update_link_status(mhdp);
 	if (mhdp->connector.dev) {
 		if (ret < 0)
-			schedule_work(&mhdp->modeset_retry_work);
+			queue_work(cadence_mhdp_wq, &mhdp->modeset_retry_work);
 		else
 			drm_kms_helper_hotplug_event(mhdp->bridge.dev);
 	} else {
@@ -2603,7 +2605,7 @@  static int cdns_mhdp_remove(struct platform_device *pdev)
 	pm_runtime_disable(&pdev->dev);
 
 	cancel_work_sync(&mhdp->modeset_retry_work);
-	flush_scheduled_work();
+	flush_workqueue(cadence_mhdp_wq);
 
 	clk_disable_unprepare(mhdp->clk);
 
@@ -2632,7 +2634,27 @@  static struct platform_driver mhdp_driver = {
 	.probe	= cdns_mhdp_probe,
 	.remove	= cdns_mhdp_remove,
 };
-module_platform_driver(mhdp_driver);
+
+static int __init mhdp_init(void)
+{
+	int ret;
+
+	cadence_mhdp_wq = alloc_workqueue("cadence_mhdp_wq", 0, 0);
+	if (!cadence_mhdp_wq)
+		return -ENOMEM;
+	ret = platform_driver_register(&mhdp_driver);
+	if (ret)
+		destroy_workqueue(cadence_mhdp_wq);
+	return ret;
+}
+module_init(mhdp_init);
+
+static void __exit mhdp_exit(void)
+{
+	platform_driver_unregister(&mhdp_driver);
+	destroy_workqueue(cadence_mhdp_wq);
+}
+module_exit(mhdp_exit);
 
 MODULE_FIRMWARE(FW_NAME);
 
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h
index bedddd510d17..e6c475612480 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h
@@ -419,4 +419,6 @@  struct cdns_mhdp_device {
 
 u32 cdns_mhdp_wait_for_sw_event(struct cdns_mhdp_device *mhdp, uint32_t event);
 
+extern struct workqueue_struct *cadence_mhdp_wq;
+
 #endif
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
index 946212a95598..09f9e3d42f11 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
@@ -449,14 +449,14 @@  static int cdns_mhdp_hdcp_check_link(struct cdns_mhdp_device *mhdp)
 	ret = _cdns_mhdp_hdcp_disable(mhdp);
 	if (ret) {
 		mhdp->hdcp.value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
-		schedule_work(&mhdp->hdcp.prop_work);
+		queue_work(cadence_mhdp_wq, &mhdp->hdcp.prop_work);
 		goto out;
 	}
 
 	ret = _cdns_mhdp_hdcp_enable(mhdp, mhdp->hdcp.hdcp_content_type);
 	if (ret) {
 		mhdp->hdcp.value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
-		schedule_work(&mhdp->hdcp.prop_work);
+		queue_work(cadence_mhdp_wq, &mhdp->hdcp.prop_work);
 	}
 out:
 	mutex_unlock(&mhdp->hdcp.mutex);
@@ -474,8 +474,8 @@  static void cdns_mhdp_hdcp_check_work(struct work_struct *work)
 						     hdcp);
 
 	if (!cdns_mhdp_hdcp_check_link(mhdp))
-		schedule_delayed_work(&hdcp->check_work,
-				      DRM_HDCP_CHECK_PERIOD_MS);
+		queue_delayed_work(cadence_mhdp_wq, &hdcp->check_work,
+				   DRM_HDCP_CHECK_PERIOD_MS);
 }
 
 static void cdns_mhdp_hdcp_prop_work(struct work_struct *work)
@@ -538,9 +538,9 @@  int cdns_mhdp_hdcp_enable(struct cdns_mhdp_device *mhdp, u8 content_type)
 
 	mhdp->hdcp.hdcp_content_type = content_type;
 	mhdp->hdcp.value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
-	schedule_work(&mhdp->hdcp.prop_work);
-	schedule_delayed_work(&mhdp->hdcp.check_work,
-			      DRM_HDCP_CHECK_PERIOD_MS);
+	queue_work(cadence_mhdp_wq, &mhdp->hdcp.prop_work);
+	queue_delayed_work(cadence_mhdp_wq, &mhdp->hdcp.check_work,
+			   DRM_HDCP_CHECK_PERIOD_MS);
 out:
 	mutex_unlock(&mhdp->hdcp.mutex);
 	return ret;
@@ -553,7 +553,7 @@  int cdns_mhdp_hdcp_disable(struct cdns_mhdp_device *mhdp)
 	mutex_lock(&mhdp->hdcp.mutex);
 	if (mhdp->hdcp.value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
 		mhdp->hdcp.value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
-		schedule_work(&mhdp->hdcp.prop_work);
+		queue_work(cadence_mhdp_wq, &mhdp->hdcp.prop_work);
 		ret = _cdns_mhdp_hdcp_disable(mhdp);
 	}
 	mutex_unlock(&mhdp->hdcp.mutex);