diff mbox series

[v3,1/2] scsi: ufs: core: Introduce a new clock_gating lock

Message ID 20241105112502.710427-2-avri.altman@wdc.com (mailing list archive)
State Superseded
Headers show
Series Untie the host lock entanglement - part 2 | expand

Commit Message

Avri Altman Nov. 5, 2024, 11:25 a.m. UTC
Introduce a new clock gating lock to serialize access to some of the
clock gating members instead of the host_lock.

While at it, simplify the code with the guard() macro and co for
automatic cleanup of the new lock. There are some explicit
spin_lock_irqsave/spin_unlock_irqrestore snaking instances I left behind
because I couldn't make heads or tails of it.

Additionally, move the trace_ufshcd_clk_gating() call from inside the
region protected by the lock as it doesn't needs protection.

Signed-off-by: Avri Altman <avri.altman@wdc.com>
---
 drivers/ufs/core/ufshcd.c | 93 +++++++++++++++++----------------------
 include/ufs/ufshcd.h      |  8 +++-
 2 files changed, 46 insertions(+), 55 deletions(-)

Comments

Avri Altman Nov. 9, 2024, 6:05 a.m. UTC | #1
A gentle ping.

Thanks,
Avri

> -----Original Message-----
> From: Avri Altman <avri.altman@wdc.com>
> Sent: Tuesday, November 5, 2024 1:25 PM
> To: Martin K . Petersen <martin.petersen@oracle.com>
> Cc: linux-scsi@vger.kernel.org; linux-kernel@vger.kernel.org; Bart Van Assche
> <bvanassche@acm.org>; Avri Altman <Avri.Altman@wdc.com>
> Subject: [PATCH v3 1/2] scsi: ufs: core: Introduce a new clock_gating lock
> 
> Introduce a new clock gating lock to serialize access to some of the clock gating
> members instead of the host_lock.
> 
> While at it, simplify the code with the guard() macro and co for automatic
> cleanup of the new lock. There are some explicit
> spin_lock_irqsave/spin_unlock_irqrestore snaking instances I left behind
> because I couldn't make heads or tails of it.
> 
> Additionally, move the trace_ufshcd_clk_gating() call from inside the region
> protected by the lock as it doesn't needs protection.
> 
> Signed-off-by: Avri Altman <avri.altman@wdc.com>
> ---
>  drivers/ufs/core/ufshcd.c | 93 +++++++++++++++++----------------------
>  include/ufs/ufshcd.h      |  8 +++-
>  2 files changed, 46 insertions(+), 55 deletions(-)
> 
> diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index
> e338867bc96c..62c0e2323f50 100644
> --- a/drivers/ufs/core/ufshcd.c
> +++ b/drivers/ufs/core/ufshcd.c
> @@ -1811,19 +1811,17 @@ static void ufshcd_exit_clk_scaling(struct ufs_hba
> *hba)  static void ufshcd_ungate_work(struct work_struct *work)  {
>  	int ret;
> -	unsigned long flags;
>  	struct ufs_hba *hba = container_of(work, struct ufs_hba,
>  			clk_gating.ungate_work);
> 
>  	cancel_delayed_work_sync(&hba->clk_gating.gate_work);
> 
> -	spin_lock_irqsave(hba->host->host_lock, flags);
> -	if (hba->clk_gating.state == CLKS_ON) {
> -		spin_unlock_irqrestore(hba->host->host_lock, flags);
> -		return;
> +	scoped_guard(spinlock_irqsave, &hba->clk_gating.lock)
> +	{
> +		if (hba->clk_gating.state == CLKS_ON)
> +			return;
>  	}
> 
> -	spin_unlock_irqrestore(hba->host->host_lock, flags);
>  	ufshcd_hba_vreg_set_hpm(hba);
>  	ufshcd_setup_clocks(hba, true);
> 
> @@ -1858,7 +1856,7 @@ void ufshcd_hold(struct ufs_hba *hba)
>  	if (!ufshcd_is_clkgating_allowed(hba) ||
>  	    !hba->clk_gating.is_initialized)
>  		return;
> -	spin_lock_irqsave(hba->host->host_lock, flags);
> +	spin_lock_irqsave(&hba->clk_gating.lock, flags);
>  	hba->clk_gating.active_reqs++;
> 
>  start:
> @@ -1874,11 +1872,11 @@ void ufshcd_hold(struct ufs_hba *hba)
>  		 */
>  		if (ufshcd_can_hibern8_during_gating(hba) &&
>  		    ufshcd_is_link_hibern8(hba)) {
> -			spin_unlock_irqrestore(hba->host->host_lock, flags);
> +			spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
>  			flush_result = flush_work(&hba-
> >clk_gating.ungate_work);
>  			if (hba->clk_gating.is_suspended && !flush_result)
>  				return;
> -			spin_lock_irqsave(hba->host->host_lock, flags);
> +			spin_lock_irqsave(&hba->clk_gating.lock, flags);
>  			goto start;
>  		}
>  		break;
> @@ -1907,17 +1905,17 @@ void ufshcd_hold(struct ufs_hba *hba)
>  		 */
>  		fallthrough;
>  	case REQ_CLKS_ON:
> -		spin_unlock_irqrestore(hba->host->host_lock, flags);
> +		spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
>  		flush_work(&hba->clk_gating.ungate_work);
>  		/* Make sure state is CLKS_ON before returning */
> -		spin_lock_irqsave(hba->host->host_lock, flags);
> +		spin_lock_irqsave(&hba->clk_gating.lock, flags);
>  		goto start;
>  	default:
>  		dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
>  				__func__, hba->clk_gating.state);
>  		break;
>  	}
> -	spin_unlock_irqrestore(hba->host->host_lock, flags);
> +	spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
>  }
>  EXPORT_SYMBOL_GPL(ufshcd_hold);
> 
> @@ -1925,29 +1923,28 @@ static void ufshcd_gate_work(struct work_struct
> *work)  {
>  	struct ufs_hba *hba = container_of(work, struct ufs_hba,
>  			clk_gating.gate_work.work);
> -	unsigned long flags;
>  	int ret;
> 
> -	spin_lock_irqsave(hba->host->host_lock, flags);
> -	/*
> -	 * In case you are here to cancel this work the gating state
> -	 * would be marked as REQ_CLKS_ON. In this case save time by
> -	 * skipping the gating work and exit after changing the clock
> -	 * state to CLKS_ON.
> -	 */
> -	if (hba->clk_gating.is_suspended ||
> -		(hba->clk_gating.state != REQ_CLKS_OFF)) {
> -		hba->clk_gating.state = CLKS_ON;
> -		trace_ufshcd_clk_gating(dev_name(hba->dev),
> -					hba->clk_gating.state);
> -		goto rel_lock;
> +	scoped_guard(spinlock_irqsave, &hba->clk_gating.lock)
> +	{
> +		/*
> +		 * In case you are here to cancel this work the gating state
> +		 * would be marked as REQ_CLKS_ON. In this case save time by
> +		 * skipping the gating work and exit after changing the clock
> +		 * state to CLKS_ON.
> +		 */
> +		if (hba->clk_gating.is_suspended ||
> +		    hba->clk_gating.state != REQ_CLKS_OFF) {
> +			hba->clk_gating.state = CLKS_ON;
> +			trace_ufshcd_clk_gating(dev_name(hba->dev),
> +						hba->clk_gating.state);
> +			return;
> +		}
> +		if (ufshcd_is_ufs_dev_busy(hba) ||
> +		    hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
> +			return;
>  	}
> 
> -	if (ufshcd_is_ufs_dev_busy(hba) || hba->ufshcd_state !=
> UFSHCD_STATE_OPERATIONAL)
> -		goto rel_lock;
> -
> -	spin_unlock_irqrestore(hba->host->host_lock, flags);
> -
>  	/* put the link into hibern8 mode before turning off clocks */
>  	if (ufshcd_can_hibern8_during_gating(hba)) {
>  		ret = ufshcd_uic_hibern8_enter(hba);
> @@ -1957,7 +1954,7 @@ static void ufshcd_gate_work(struct work_struct
> *work)
>  					__func__, ret);
>  			trace_ufshcd_clk_gating(dev_name(hba->dev),
>  						hba->clk_gating.state);
> -			goto out;
> +			return;
>  		}
>  		ufshcd_set_link_hibern8(hba);
>  	}
> @@ -1977,16 +1974,12 @@ static void ufshcd_gate_work(struct work_struct
> *work)
>  	 * prevent from doing cancel work multiple times when there are
>  	 * new requests arriving before the current cancel work is done.
>  	 */
> -	spin_lock_irqsave(hba->host->host_lock, flags);
> +	guard(spinlock_irqsave)(&hba->clk_gating.lock);
>  	if (hba->clk_gating.state == REQ_CLKS_OFF) {
>  		hba->clk_gating.state = CLKS_OFF;
>  		trace_ufshcd_clk_gating(dev_name(hba->dev),
>  					hba->clk_gating.state);
>  	}
> -rel_lock:
> -	spin_unlock_irqrestore(hba->host->host_lock, flags);
> -out:
> -	return;
>  }
> 
>  /* host lock must be held before calling this variant */ @@ -2013,11 +2006,8
> @@ static void __ufshcd_release(struct ufs_hba *hba)
> 
>  void ufshcd_release(struct ufs_hba *hba)  {
> -	unsigned long flags;
> -
> -	spin_lock_irqsave(hba->host->host_lock, flags);
> +	guard(spinlock_irqsave)(&hba->clk_gating.lock);
>  	__ufshcd_release(hba);
> -	spin_unlock_irqrestore(hba->host->host_lock, flags);
>  }
>  EXPORT_SYMBOL_GPL(ufshcd_release);
> 
> @@ -2032,11 +2022,9 @@ static ssize_t ufshcd_clkgate_delay_show(struct
> device *dev,  void ufshcd_clkgate_delay_set(struct device *dev, unsigned long
> value)  {
>  	struct ufs_hba *hba = dev_get_drvdata(dev);
> -	unsigned long flags;
> 
> -	spin_lock_irqsave(hba->host->host_lock, flags);
> +	guard(spinlock_irqsave)(&hba->clk_gating.lock);
>  	hba->clk_gating.delay_ms = value;
> -	spin_unlock_irqrestore(hba->host->host_lock, flags);
>  }
>  EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set);
> 
> @@ -2064,7 +2052,6 @@ static ssize_t ufshcd_clkgate_enable_store(struct
> device *dev,
>  		struct device_attribute *attr, const char *buf, size_t count)  {
>  	struct ufs_hba *hba = dev_get_drvdata(dev);
> -	unsigned long flags;
>  	u32 value;
> 
>  	if (kstrtou32(buf, 0, &value))
> @@ -2072,9 +2059,10 @@ static ssize_t ufshcd_clkgate_enable_store(struct
> device *dev,
> 
>  	value = !!value;
> 
> -	spin_lock_irqsave(hba->host->host_lock, flags);
> +	guard(spinlock_irqsave)(&hba->clk_gating.lock);
> +
>  	if (value == hba->clk_gating.is_enabled)
> -		goto out;
> +		return count;
> 
>  	if (value)
>  		__ufshcd_release(hba);
> @@ -2082,8 +2070,7 @@ static ssize_t ufshcd_clkgate_enable_store(struct
> device *dev,
>  		hba->clk_gating.active_reqs++;
> 
>  	hba->clk_gating.is_enabled = value;
> -out:
> -	spin_unlock_irqrestore(hba->host->host_lock, flags);
> +
>  	return count;
>  }
> 
> @@ -2125,6 +2112,8 @@ static void ufshcd_init_clk_gating(struct ufs_hba
> *hba)
>  	INIT_DELAYED_WORK(&hba->clk_gating.gate_work,
> ufshcd_gate_work);
>  	INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
> 
> +	spin_lock_init(&hba->clk_gating.lock);
> +
>  	hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(
>  		"ufs_clk_gating_%d", WQ_MEM_RECLAIM | WQ_HIGHPRI,
>  		hba->host->host_no);
> @@ -9112,7 +9101,6 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba,
> bool on)
>  	int ret = 0;
>  	struct ufs_clk_info *clki;
>  	struct list_head *head = &hba->clk_list_head;
> -	unsigned long flags;
>  	ktime_t start = ktime_get();
>  	bool clk_state_changed = false;
> 
> @@ -9163,11 +9151,10 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba,
> bool on)
>  				clk_disable_unprepare(clki->clk);
>  		}
>  	} else if (!ret && on) {
> -		spin_lock_irqsave(hba->host->host_lock, flags);
> -		hba->clk_gating.state = CLKS_ON;
> +		scoped_guard(spinlock_irqsave, &hba->clk_gating.lock)
> +			hba->clk_gating.state = CLKS_ON;
>  		trace_ufshcd_clk_gating(dev_name(hba->dev),
>  					hba->clk_gating.state);
> -		spin_unlock_irqrestore(hba->host->host_lock, flags);
>  	}
> 
>  	if (clk_state_changed)
> diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index
> d7aca9e61684..8f9997b0dbf9 100644
> --- a/include/ufs/ufshcd.h
> +++ b/include/ufs/ufshcd.h
> @@ -403,6 +403,8 @@ enum clk_gating_state {
>   * delay_ms
>   * @ungate_work: worker to turn on clocks that will be used in case of
>   * interrupt context
> + * @clk_gating_workq: workqueue for clock gating work.
> + * @lock: serialize access to some struct ufs_clk_gating members
>   * @state: the current clocks state
>   * @delay_ms: gating delay in ms
>   * @is_suspended: clk gating is suspended when set to 1 which can be used
> @@ -413,11 +415,14 @@ enum clk_gating_state {
>   * @is_initialized: Indicates whether clock gating is initialized or not
>   * @active_reqs: number of requests that are pending and should be waited for
>   * completion before gating clocks.
> - * @clk_gating_workq: workqueue for clock gating work.
>   */
>  struct ufs_clk_gating {
>  	struct delayed_work gate_work;
>  	struct work_struct ungate_work;
> +	struct workqueue_struct *clk_gating_workq;
> +
> +	spinlock_t lock;
> +
>  	enum clk_gating_state state;
>  	unsigned long delay_ms;
>  	bool is_suspended;
> @@ -426,7 +431,6 @@ struct ufs_clk_gating {
>  	bool is_enabled;
>  	bool is_initialized;
>  	int active_reqs;
> -	struct workqueue_struct *clk_gating_workq;
>  };
> 
>  /**
> --
> 2.25.1
Bean Huo Nov. 11, 2024, 11:16 p.m. UTC | #2
On Tue, 2024-11-05 at 13:25 +0200, Avri Altman wrote:
> -       spin_lock_irqsave(hba->host->host_lock, flags);
> -       /*
> -        * In case you are here to cancel this work the gating state
> -        * would be marked as REQ_CLKS_ON. In this case save time by
> -        * skipping the gating work and exit after changing the clock
> -        * state to CLKS_ON.
> -        */
> -       if (hba->clk_gating.is_suspended ||
> -               (hba->clk_gating.state != REQ_CLKS_OFF)) {
> -               hba->clk_gating.state = CLKS_ON;
> -               trace_ufshcd_clk_gating(dev_name(hba->dev),
> -                                       hba->clk_gating.state);
> -               goto rel_lock;
> +       scoped_guard(spinlock_irqsave, &hba->clk_gating.lock)
> +       {
> +               /*
> +                * In case you are here to cancel this work the
> gating state
> +                * would be marked as REQ_CLKS_ON. In this case save
> time by
> +                * skipping the gating work and exit after changing
> the clock
> +                * state to CLKS_ON.
> +                */
> +               if (hba->clk_gating.is_suspended ||
> +                   hba->clk_gating.state != REQ_CLKS_OFF) {
> +                       hba->clk_gating.state = CLKS_ON;
> +                       trace_ufshcd_clk_gating(dev_name(hba->dev),
> +                                               hba-
> >clk_gating.state);
> +                       return;
> +               }
> +               if (ufshcd_is_ufs_dev_busy(hba) ||
> +                   hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
> +                       return;
>         }

I'm wondering if it would be safe to replace host_lock with gating.lock
or scaling.lock. For instance, in above context, ufshcd_state needs to
be checked, but it's currently serialized by host_lock.

King regards,
Bean
Avri Altman Nov. 18, 2024, 7 a.m. UTC | #3
> On Tue, 2024-11-05 at 13:25 +0200, Avri Altman wrote:
> > -       spin_lock_irqsave(hba->host->host_lock, flags);
> > -       /*
> > -        * In case you are here to cancel this work the gating state
> > -        * would be marked as REQ_CLKS_ON. In this case save time by
> > -        * skipping the gating work and exit after changing the clock
> > -        * state to CLKS_ON.
> > -        */
> > -       if (hba->clk_gating.is_suspended ||
> > -               (hba->clk_gating.state != REQ_CLKS_OFF)) {
> > -               hba->clk_gating.state = CLKS_ON;
> > -               trace_ufshcd_clk_gating(dev_name(hba->dev),
> > -                                       hba->clk_gating.state);
> > -               goto rel_lock;
> > +       scoped_guard(spinlock_irqsave, &hba->clk_gating.lock)
> > +       {
> > +               /*
> > +                * In case you are here to cancel this work the
> > gating state
> > +                * would be marked as REQ_CLKS_ON. In this case save
> > time by
> > +                * skipping the gating work and exit after changing
> > the clock
> > +                * state to CLKS_ON.
> > +                */
> > +               if (hba->clk_gating.is_suspended ||
> > +                   hba->clk_gating.state != REQ_CLKS_OFF) {
> > +                       hba->clk_gating.state = CLKS_ON;
> > +                       trace_ufshcd_clk_gating(dev_name(hba->dev),
> > +                                               hba-
> > >clk_gating.state);
> > +                       return;
> > +               }
> > +               if (ufshcd_is_ufs_dev_busy(hba) ||
> > +                   hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
> > +                       return;
> >         }
> 
> I'm wondering if it would be safe to replace host_lock with gating.lock or
> scaling.lock. For instance, in above context, ufshcd_state needs to be checked,
> but it's currently serialized by host_lock.
Hi, thank you for your feedback.
Yeah - I think you have a valid point.
I will remove the state check out of the scope of the clk_gating.lock,
and restore it under the host lock.

Thanks,
Avri


> 
> King regards,
> Bean
diff mbox series

Patch

diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index e338867bc96c..62c0e2323f50 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -1811,19 +1811,17 @@  static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
 static void ufshcd_ungate_work(struct work_struct *work)
 {
 	int ret;
-	unsigned long flags;
 	struct ufs_hba *hba = container_of(work, struct ufs_hba,
 			clk_gating.ungate_work);
 
 	cancel_delayed_work_sync(&hba->clk_gating.gate_work);
 
-	spin_lock_irqsave(hba->host->host_lock, flags);
-	if (hba->clk_gating.state == CLKS_ON) {
-		spin_unlock_irqrestore(hba->host->host_lock, flags);
-		return;
+	scoped_guard(spinlock_irqsave, &hba->clk_gating.lock)
+	{
+		if (hba->clk_gating.state == CLKS_ON)
+			return;
 	}
 
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
 	ufshcd_hba_vreg_set_hpm(hba);
 	ufshcd_setup_clocks(hba, true);
 
@@ -1858,7 +1856,7 @@  void ufshcd_hold(struct ufs_hba *hba)
 	if (!ufshcd_is_clkgating_allowed(hba) ||
 	    !hba->clk_gating.is_initialized)
 		return;
-	spin_lock_irqsave(hba->host->host_lock, flags);
+	spin_lock_irqsave(&hba->clk_gating.lock, flags);
 	hba->clk_gating.active_reqs++;
 
 start:
@@ -1874,11 +1872,11 @@  void ufshcd_hold(struct ufs_hba *hba)
 		 */
 		if (ufshcd_can_hibern8_during_gating(hba) &&
 		    ufshcd_is_link_hibern8(hba)) {
-			spin_unlock_irqrestore(hba->host->host_lock, flags);
+			spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
 			flush_result = flush_work(&hba->clk_gating.ungate_work);
 			if (hba->clk_gating.is_suspended && !flush_result)
 				return;
-			spin_lock_irqsave(hba->host->host_lock, flags);
+			spin_lock_irqsave(&hba->clk_gating.lock, flags);
 			goto start;
 		}
 		break;
@@ -1907,17 +1905,17 @@  void ufshcd_hold(struct ufs_hba *hba)
 		 */
 		fallthrough;
 	case REQ_CLKS_ON:
-		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
 		flush_work(&hba->clk_gating.ungate_work);
 		/* Make sure state is CLKS_ON before returning */
-		spin_lock_irqsave(hba->host->host_lock, flags);
+		spin_lock_irqsave(&hba->clk_gating.lock, flags);
 		goto start;
 	default:
 		dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
 				__func__, hba->clk_gating.state);
 		break;
 	}
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
 }
 EXPORT_SYMBOL_GPL(ufshcd_hold);
 
@@ -1925,29 +1923,28 @@  static void ufshcd_gate_work(struct work_struct *work)
 {
 	struct ufs_hba *hba = container_of(work, struct ufs_hba,
 			clk_gating.gate_work.work);
-	unsigned long flags;
 	int ret;
 
-	spin_lock_irqsave(hba->host->host_lock, flags);
-	/*
-	 * In case you are here to cancel this work the gating state
-	 * would be marked as REQ_CLKS_ON. In this case save time by
-	 * skipping the gating work and exit after changing the clock
-	 * state to CLKS_ON.
-	 */
-	if (hba->clk_gating.is_suspended ||
-		(hba->clk_gating.state != REQ_CLKS_OFF)) {
-		hba->clk_gating.state = CLKS_ON;
-		trace_ufshcd_clk_gating(dev_name(hba->dev),
-					hba->clk_gating.state);
-		goto rel_lock;
+	scoped_guard(spinlock_irqsave, &hba->clk_gating.lock)
+	{
+		/*
+		 * In case you are here to cancel this work the gating state
+		 * would be marked as REQ_CLKS_ON. In this case save time by
+		 * skipping the gating work and exit after changing the clock
+		 * state to CLKS_ON.
+		 */
+		if (hba->clk_gating.is_suspended ||
+		    hba->clk_gating.state != REQ_CLKS_OFF) {
+			hba->clk_gating.state = CLKS_ON;
+			trace_ufshcd_clk_gating(dev_name(hba->dev),
+						hba->clk_gating.state);
+			return;
+		}
+		if (ufshcd_is_ufs_dev_busy(hba) ||
+		    hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
+			return;
 	}
 
-	if (ufshcd_is_ufs_dev_busy(hba) || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
-		goto rel_lock;
-
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
-
 	/* put the link into hibern8 mode before turning off clocks */
 	if (ufshcd_can_hibern8_during_gating(hba)) {
 		ret = ufshcd_uic_hibern8_enter(hba);
@@ -1957,7 +1954,7 @@  static void ufshcd_gate_work(struct work_struct *work)
 					__func__, ret);
 			trace_ufshcd_clk_gating(dev_name(hba->dev),
 						hba->clk_gating.state);
-			goto out;
+			return;
 		}
 		ufshcd_set_link_hibern8(hba);
 	}
@@ -1977,16 +1974,12 @@  static void ufshcd_gate_work(struct work_struct *work)
 	 * prevent from doing cancel work multiple times when there are
 	 * new requests arriving before the current cancel work is done.
 	 */
-	spin_lock_irqsave(hba->host->host_lock, flags);
+	guard(spinlock_irqsave)(&hba->clk_gating.lock);
 	if (hba->clk_gating.state == REQ_CLKS_OFF) {
 		hba->clk_gating.state = CLKS_OFF;
 		trace_ufshcd_clk_gating(dev_name(hba->dev),
 					hba->clk_gating.state);
 	}
-rel_lock:
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
-out:
-	return;
 }
 
 /* host lock must be held before calling this variant */
@@ -2013,11 +2006,8 @@  static void __ufshcd_release(struct ufs_hba *hba)
 
 void ufshcd_release(struct ufs_hba *hba)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(hba->host->host_lock, flags);
+	guard(spinlock_irqsave)(&hba->clk_gating.lock);
 	__ufshcd_release(hba);
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
 }
 EXPORT_SYMBOL_GPL(ufshcd_release);
 
@@ -2032,11 +2022,9 @@  static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
 void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value)
 {
 	struct ufs_hba *hba = dev_get_drvdata(dev);
-	unsigned long flags;
 
-	spin_lock_irqsave(hba->host->host_lock, flags);
+	guard(spinlock_irqsave)(&hba->clk_gating.lock);
 	hba->clk_gating.delay_ms = value;
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
 }
 EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set);
 
@@ -2064,7 +2052,6 @@  static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t count)
 {
 	struct ufs_hba *hba = dev_get_drvdata(dev);
-	unsigned long flags;
 	u32 value;
 
 	if (kstrtou32(buf, 0, &value))
@@ -2072,9 +2059,10 @@  static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
 
 	value = !!value;
 
-	spin_lock_irqsave(hba->host->host_lock, flags);
+	guard(spinlock_irqsave)(&hba->clk_gating.lock);
+
 	if (value == hba->clk_gating.is_enabled)
-		goto out;
+		return count;
 
 	if (value)
 		__ufshcd_release(hba);
@@ -2082,8 +2070,7 @@  static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
 		hba->clk_gating.active_reqs++;
 
 	hba->clk_gating.is_enabled = value;
-out:
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
 	return count;
 }
 
@@ -2125,6 +2112,8 @@  static void ufshcd_init_clk_gating(struct ufs_hba *hba)
 	INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
 	INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
 
+	spin_lock_init(&hba->clk_gating.lock);
+
 	hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(
 		"ufs_clk_gating_%d", WQ_MEM_RECLAIM | WQ_HIGHPRI,
 		hba->host->host_no);
@@ -9112,7 +9101,6 @@  static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
 	int ret = 0;
 	struct ufs_clk_info *clki;
 	struct list_head *head = &hba->clk_list_head;
-	unsigned long flags;
 	ktime_t start = ktime_get();
 	bool clk_state_changed = false;
 
@@ -9163,11 +9151,10 @@  static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
 				clk_disable_unprepare(clki->clk);
 		}
 	} else if (!ret && on) {
-		spin_lock_irqsave(hba->host->host_lock, flags);
-		hba->clk_gating.state = CLKS_ON;
+		scoped_guard(spinlock_irqsave, &hba->clk_gating.lock)
+			hba->clk_gating.state = CLKS_ON;
 		trace_ufshcd_clk_gating(dev_name(hba->dev),
 					hba->clk_gating.state);
-		spin_unlock_irqrestore(hba->host->host_lock, flags);
 	}
 
 	if (clk_state_changed)
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index d7aca9e61684..8f9997b0dbf9 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -403,6 +403,8 @@  enum clk_gating_state {
  * delay_ms
  * @ungate_work: worker to turn on clocks that will be used in case of
  * interrupt context
+ * @clk_gating_workq: workqueue for clock gating work.
+ * @lock: serialize access to some struct ufs_clk_gating members
  * @state: the current clocks state
  * @delay_ms: gating delay in ms
  * @is_suspended: clk gating is suspended when set to 1 which can be used
@@ -413,11 +415,14 @@  enum clk_gating_state {
  * @is_initialized: Indicates whether clock gating is initialized or not
  * @active_reqs: number of requests that are pending and should be waited for
  * completion before gating clocks.
- * @clk_gating_workq: workqueue for clock gating work.
  */
 struct ufs_clk_gating {
 	struct delayed_work gate_work;
 	struct work_struct ungate_work;
+	struct workqueue_struct *clk_gating_workq;
+
+	spinlock_t lock;
+
 	enum clk_gating_state state;
 	unsigned long delay_ms;
 	bool is_suspended;
@@ -426,7 +431,6 @@  struct ufs_clk_gating {
 	bool is_enabled;
 	bool is_initialized;
 	int active_reqs;
-	struct workqueue_struct *clk_gating_workq;
 };
 
 /**