Message ID | 20241027082519.576869-2-avri.altman@wdc.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | Untie the host lock entanglement - part 2 | expand |
On 10/27/24 1:25 AM, Avri Altman wrote: > Introduce a new clock gating lock to seriliaze access to the clock ^^^^^^^^^ serialize > diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c > index 099373a25017..b7c7a7dd327f 100644 > --- a/drivers/ufs/core/ufshcd.c > +++ b/drivers/ufs/core/ufshcd.c > @@ -1817,13 +1817,13 @@ static void ufshcd_ungate_work(struct work_struct *work) > > cancel_delayed_work_sync(&hba->clk_gating.gate_work); > > - spin_lock_irqsave(hba->host->host_lock, flags); > + spin_lock_irqsave(&hba->clk_gating.lock, flags); > if (hba->clk_gating.state == CLKS_ON) { > - spin_unlock_irqrestore(hba->host->host_lock, flags); > + spin_unlock_irqrestore(&hba->clk_gating.lock, flags); > return; > } > > - spin_unlock_irqrestore(hba->host->host_lock, flags); > + spin_unlock_irqrestore(&hba->clk_gating.lock, flags); > ufshcd_hba_vreg_set_hpm(hba); > ufshcd_setup_clocks(hba, true); This would be a great opportunity to replace the spinlock calls with scoped_guard(), isn't it? > @@ -1928,7 +1928,7 @@ static void ufshcd_gate_work(struct work_struct *work) > unsigned long flags; > int ret; > > - spin_lock_irqsave(hba->host->host_lock, flags); > + spin_lock_irqsave(&hba->clk_gating.lock, flags); > /* > * In case you are here to cancel this work the gating state > * would be marked as REQ_CLKS_ON. In this case save time by > @@ -1946,7 +1946,7 @@ static void ufshcd_gate_work(struct work_struct *work) > if (ufshcd_is_ufs_dev_busy(hba) || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) > goto rel_lock; > > - spin_unlock_irqrestore(hba->host->host_lock, flags); > + spin_unlock_irqrestore(&hba->clk_gating.lock, flags); Same comment here: please consider using scoped_guard(). > /* put the link into hibern8 mode before turning off clocks */ > if (ufshcd_can_hibern8_during_gating(hba)) { > @@ -1977,14 +1977,14 @@ static void ufshcd_gate_work(struct work_struct *work) > * prevent from doing cancel work multiple times when there are > * new requests arriving before the current cancel work is done. > */ > - spin_lock_irqsave(hba->host->host_lock, flags); > + spin_lock_irqsave(&hba->clk_gating.lock, flags); > if (hba->clk_gating.state == REQ_CLKS_OFF) { > hba->clk_gating.state = CLKS_OFF; > trace_ufshcd_clk_gating(dev_name(hba->dev), > hba->clk_gating.state); > } > rel_lock: > - spin_unlock_irqrestore(hba->host->host_lock, flags); > + spin_unlock_irqrestore(&hba->clk_gating.lock, flags); > out: > return; > } ufshcd_gate_work() can be simplified by using guard() and scoped_guard(). > @@ -2015,9 +2015,9 @@ void ufshcd_release(struct ufs_hba *hba) > { > unsigned long flags; > > - spin_lock_irqsave(hba->host->host_lock, flags); > + spin_lock_irqsave(&hba->clk_gating.lock, flags); > __ufshcd_release(hba); > - spin_unlock_irqrestore(hba->host->host_lock, flags); > + spin_unlock_irqrestore(&hba->clk_gating.lock, flags); For this function and also for later changes, please use guard(). > diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h > index 9ea2a7411bb5..52c822fe2944 100644 > --- a/include/ufs/ufshcd.h > +++ b/include/ufs/ufshcd.h > @@ -413,6 +413,7 @@ enum clk_gating_state { > * @active_reqs: number of requests that are pending and should be waited for > * completion before gating clocks. > * @clk_gating_workq: workqueue for clock gating work. > + * @lock: serielize access to the clk_gating members ^^^^^^^^^ serialize I don't think that the added comment is correct - 'lock' is used to serialize access to some struct ufs_clk_gating members but not for serializing access to all members. Accesses to e.g. gate_work, ungate_work and clk_gating_workq are not serialized. Please reorder the struct ufs_clk_gating members as follows: - Members that are not serialized first. - Next, 'lock'. - Finally, the members serialized by 'lock'. I think it is common in Linux kernel code that structure members are organized this way. Thanks, Bart.
> > On 10/27/24 1:25 AM, Avri Altman wrote: > > Introduce a new clock gating lock to seriliaze access to the clock > ^^^^^^^^^ > serialize Done. > > > diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c > > index 099373a25017..b7c7a7dd327f 100644 > > --- a/drivers/ufs/core/ufshcd.c > > +++ b/drivers/ufs/core/ufshcd.c > > @@ -1817,13 +1817,13 @@ static void ufshcd_ungate_work(struct > > work_struct *work) > > > > cancel_delayed_work_sync(&hba->clk_gating.gate_work); > > > > - spin_lock_irqsave(hba->host->host_lock, flags); > > + spin_lock_irqsave(&hba->clk_gating.lock, flags); > > if (hba->clk_gating.state == CLKS_ON) { > > - spin_unlock_irqrestore(hba->host->host_lock, flags); > > + spin_unlock_irqrestore(&hba->clk_gating.lock, flags); > > return; > > } > > > > - spin_unlock_irqrestore(hba->host->host_lock, flags); > > + spin_unlock_irqrestore(&hba->clk_gating.lock, flags); > > ufshcd_hba_vreg_set_hpm(hba); > > ufshcd_setup_clocks(hba, true); > > This would be a great opportunity to replace the spinlock calls with > scoped_guard(), isn't it? Done. > > > @@ -1928,7 +1928,7 @@ static void ufshcd_gate_work(struct work_struct > *work) > > unsigned long flags; > > int ret; > > > > - spin_lock_irqsave(hba->host->host_lock, flags); > > + spin_lock_irqsave(&hba->clk_gating.lock, flags); > > /* > > * In case you are here to cancel this work the gating state > > * would be marked as REQ_CLKS_ON. In this case save time by @@ > > -1946,7 +1946,7 @@ static void ufshcd_gate_work(struct work_struct > *work) > > if (ufshcd_is_ufs_dev_busy(hba) || hba->ufshcd_state != > UFSHCD_STATE_OPERATIONAL) > > goto rel_lock; > > > > - spin_unlock_irqrestore(hba->host->host_lock, flags); > > + spin_unlock_irqrestore(&hba->clk_gating.lock, flags); > > Same comment here: please consider using scoped_guard(). Done. > > > /* put the link into hibern8 mode before turning off clocks */ > > if (ufshcd_can_hibern8_during_gating(hba)) { @@ -1977,14 > > +1977,14 @@ static void ufshcd_gate_work(struct work_struct *work) > > * prevent from doing cancel work multiple times when there are > > * new requests arriving before the current cancel work is done. > > */ > > - spin_lock_irqsave(hba->host->host_lock, flags); > > + spin_lock_irqsave(&hba->clk_gating.lock, flags); > > if (hba->clk_gating.state == REQ_CLKS_OFF) { > > hba->clk_gating.state = CLKS_OFF; > > trace_ufshcd_clk_gating(dev_name(hba->dev), > > hba->clk_gating.state); > > } > > rel_lock: > > - spin_unlock_irqrestore(hba->host->host_lock, flags); > > + spin_unlock_irqrestore(&hba->clk_gating.lock, flags); > > out: > > return; > > } > > ufshcd_gate_work() can be simplified by using guard() and scoped_guard(). Done. > > > @@ -2015,9 +2015,9 @@ void ufshcd_release(struct ufs_hba *hba) > > { > > unsigned long flags; > > > > - spin_lock_irqsave(hba->host->host_lock, flags); > > + spin_lock_irqsave(&hba->clk_gating.lock, flags); > > __ufshcd_release(hba); > > - spin_unlock_irqrestore(hba->host->host_lock, flags); > > + spin_unlock_irqrestore(&hba->clk_gating.lock, flags); > > For this function and also for later changes, please use guard(). Done. > > > diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index > > 9ea2a7411bb5..52c822fe2944 100644 > > --- a/include/ufs/ufshcd.h > > +++ b/include/ufs/ufshcd.h > > @@ -413,6 +413,7 @@ enum clk_gating_state { > > * @active_reqs: number of requests that are pending and should be > waited for > > * completion before gating clocks. > > * @clk_gating_workq: workqueue for clock gating work. > > + * @lock: serielize access to the clk_gating members > ^^^^^^^^^ > serialize > > I don't think that the added comment is correct - 'lock' is used to serialize > access to some struct ufs_clk_gating members but not for serializing access > to all members. Accesses to e.g. gate_work, ungate_work and > clk_gating_workq are not serialized. Please reorder the struct ufs_clk_gating > members as follows: > - Members that are not serialized first. > - Next, 'lock'. > - Finally, the members serialized by 'lock'. > > I think it is common in Linux kernel code that structure members are > organized this way. Done. Thanks, Avri > > Thanks, > > Bart.
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 099373a25017..b7c7a7dd327f 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -1817,13 +1817,13 @@ static void ufshcd_ungate_work(struct work_struct *work) cancel_delayed_work_sync(&hba->clk_gating.gate_work); - spin_lock_irqsave(hba->host->host_lock, flags); + spin_lock_irqsave(&hba->clk_gating.lock, flags); if (hba->clk_gating.state == CLKS_ON) { - spin_unlock_irqrestore(hba->host->host_lock, flags); + spin_unlock_irqrestore(&hba->clk_gating.lock, flags); return; } - spin_unlock_irqrestore(hba->host->host_lock, flags); + spin_unlock_irqrestore(&hba->clk_gating.lock, flags); ufshcd_hba_vreg_set_hpm(hba); ufshcd_setup_clocks(hba, true); @@ -1858,7 +1858,7 @@ void ufshcd_hold(struct ufs_hba *hba) if (!ufshcd_is_clkgating_allowed(hba) || !hba->clk_gating.is_initialized) return; - spin_lock_irqsave(hba->host->host_lock, flags); + spin_lock_irqsave(&hba->clk_gating.lock, flags); hba->clk_gating.active_reqs++; start: @@ -1874,11 +1874,11 @@ void ufshcd_hold(struct ufs_hba *hba) */ if (ufshcd_can_hibern8_during_gating(hba) && ufshcd_is_link_hibern8(hba)) { - spin_unlock_irqrestore(hba->host->host_lock, flags); + spin_unlock_irqrestore(&hba->clk_gating.lock, flags); flush_result = flush_work(&hba->clk_gating.ungate_work); if (hba->clk_gating.is_suspended && !flush_result) return; - spin_lock_irqsave(hba->host->host_lock, flags); + spin_lock_irqsave(&hba->clk_gating.lock, flags); goto start; } break; @@ -1907,17 +1907,17 @@ void ufshcd_hold(struct ufs_hba *hba) */ fallthrough; case REQ_CLKS_ON: - spin_unlock_irqrestore(hba->host->host_lock, flags); + spin_unlock_irqrestore(&hba->clk_gating.lock, flags); flush_work(&hba->clk_gating.ungate_work); /* Make sure state is CLKS_ON before returning */ - spin_lock_irqsave(hba->host->host_lock, flags); + spin_lock_irqsave(&hba->clk_gating.lock, flags); goto start; default: dev_err(hba->dev, "%s: clk gating is in invalid state %d\n", __func__, hba->clk_gating.state); break; } - spin_unlock_irqrestore(hba->host->host_lock, flags); + spin_unlock_irqrestore(&hba->clk_gating.lock, flags); } EXPORT_SYMBOL_GPL(ufshcd_hold); @@ -1928,7 +1928,7 @@ static void ufshcd_gate_work(struct work_struct *work) unsigned long flags; int ret; - spin_lock_irqsave(hba->host->host_lock, flags); + spin_lock_irqsave(&hba->clk_gating.lock, flags); /* * In case you are here to cancel this work the gating state * would be marked as REQ_CLKS_ON. In this case save time by @@ -1946,7 +1946,7 @@ static void ufshcd_gate_work(struct work_struct *work) if (ufshcd_is_ufs_dev_busy(hba) || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) goto rel_lock; - spin_unlock_irqrestore(hba->host->host_lock, flags); + spin_unlock_irqrestore(&hba->clk_gating.lock, flags); /* put the link into hibern8 mode before turning off clocks */ if (ufshcd_can_hibern8_during_gating(hba)) { @@ -1977,14 +1977,14 @@ static void ufshcd_gate_work(struct work_struct *work) * prevent from doing cancel work multiple times when there are * new requests arriving before the current cancel work is done. */ - spin_lock_irqsave(hba->host->host_lock, flags); + spin_lock_irqsave(&hba->clk_gating.lock, flags); if (hba->clk_gating.state == REQ_CLKS_OFF) { hba->clk_gating.state = CLKS_OFF; trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); } rel_lock: - spin_unlock_irqrestore(hba->host->host_lock, flags); + spin_unlock_irqrestore(&hba->clk_gating.lock, flags); out: return; } @@ -2015,9 +2015,9 @@ void ufshcd_release(struct ufs_hba *hba) { unsigned long flags; - spin_lock_irqsave(hba->host->host_lock, flags); + spin_lock_irqsave(&hba->clk_gating.lock, flags); __ufshcd_release(hba); - spin_unlock_irqrestore(hba->host->host_lock, flags); + spin_unlock_irqrestore(&hba->clk_gating.lock, flags); } EXPORT_SYMBOL_GPL(ufshcd_release); @@ -2034,9 +2034,9 @@ void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value) struct ufs_hba *hba = dev_get_drvdata(dev); unsigned long flags; - spin_lock_irqsave(hba->host->host_lock, flags); + spin_lock_irqsave(&hba->clk_gating.lock, flags); hba->clk_gating.delay_ms = value; - spin_unlock_irqrestore(hba->host->host_lock, flags); + spin_unlock_irqrestore(&hba->clk_gating.lock, flags); } EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set); @@ -2072,7 +2072,7 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev, value = !!value; - spin_lock_irqsave(hba->host->host_lock, flags); + spin_lock_irqsave(&hba->clk_gating.lock, flags); if (value == hba->clk_gating.is_enabled) goto out; @@ -2083,7 +2083,7 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev, hba->clk_gating.is_enabled = value; out: - spin_unlock_irqrestore(hba->host->host_lock, flags); + spin_unlock_irqrestore(&hba->clk_gating.lock, flags); return count; } @@ -2125,6 +2125,8 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba) INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work); INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work); + spin_lock_init(&hba->clk_gating.lock); + hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue( "ufs_clk_gating_%d", WQ_MEM_RECLAIM | WQ_HIGHPRI, hba->host->host_no); @@ -9173,11 +9175,11 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on) clk_disable_unprepare(clki->clk); } } else if (!ret && on) { - spin_lock_irqsave(hba->host->host_lock, flags); + spin_lock_irqsave(&hba->clk_gating.lock, flags); hba->clk_gating.state = CLKS_ON; trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); - spin_unlock_irqrestore(hba->host->host_lock, flags); + spin_unlock_irqrestore(&hba->clk_gating.lock, flags); } if (clk_state_changed) diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 9ea2a7411bb5..52c822fe2944 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -413,6 +413,7 @@ enum clk_gating_state { * @active_reqs: number of requests that are pending and should be waited for * completion before gating clocks. * @clk_gating_workq: workqueue for clock gating work. + * @lock: serielize access to the clk_gating members */ struct ufs_clk_gating { struct delayed_work gate_work; @@ -426,6 +427,7 @@ struct ufs_clk_gating { bool is_initialized; int active_reqs; struct workqueue_struct *clk_gating_workq; + spinlock_t lock; }; /**
Introduce a new clock gating lock to seriliaze access to the clock gating members instead of the host_lock. Signed-off-by: Avri Altman <avri.altman@wdc.com> --- drivers/ufs/core/ufshcd.c | 44 ++++++++++++++++++++------------------- include/ufs/ufshcd.h | 2 ++ 2 files changed, 25 insertions(+), 21 deletions(-)