@@ -1175,12 +1175,12 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
* clock scaling is in progress
*/
ufshcd_scsi_block_requests(hba);
- down_write(&hba->clk_scaling_lock);
+ ufshcd_down_write(hba);
if (!hba->clk_scaling.is_allowed ||
ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
ret = -EBUSY;
- up_write(&hba->clk_scaling_lock);
+ ufshcd_up_write(hba);
ufshcd_scsi_unblock_requests(hba);
goto out;
}
@@ -1195,9 +1195,9 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
{
if (writelock)
- up_write(&hba->clk_scaling_lock);
+ ufshcd_up_write(hba);
else
- up_read(&hba->clk_scaling_lock);
+ ufshcd_up_read(hba);
ufshcd_scsi_unblock_requests(hba);
ufshcd_release(hba);
}
@@ -1244,7 +1244,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
}
/* Enable Write Booster if we have scaled up else disable it */
- downgrade_write(&hba->clk_scaling_lock);
+ ufshcd_downgrade_write(hba);
is_writelock = false;
ufshcd_wb_toggle(hba, scale_up);
@@ -2681,7 +2681,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
- if (!down_read_trylock(&hba->clk_scaling_lock))
+ if (!ufshcd_down_read_trylock(hba))
return SCSI_MLQUEUE_HOST_BUSY;
switch (hba->ufshcd_state) {
@@ -2756,7 +2756,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
ufshcd_send_command(hba, tag);
out:
- up_read(&hba->clk_scaling_lock);
+ ufshcd_up_read(hba);
if (ufs_trigger_eh())
scsi_schedule_eh(hba->host);
@@ -2914,7 +2914,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
int err;
int tag;
- down_read(&hba->clk_scaling_lock);
+ ufshcd_down_read(hba);
/*
* Get free slot, sleep if slots are unavailable.
@@ -2950,7 +2950,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
out:
blk_put_request(req);
out_unlock:
- up_read(&hba->clk_scaling_lock);
+ ufshcd_up_read(hba);
return err;
}
@@ -5934,9 +5934,9 @@ static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
{
- down_write(&hba->clk_scaling_lock);
+ ufshcd_down_write(hba);
hba->clk_scaling.is_allowed = allow;
- up_write(&hba->clk_scaling_lock);
+ ufshcd_up_write(hba);
}
static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
@@ -5984,8 +5984,8 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
}
ufshcd_scsi_block_requests(hba);
/* Drain ufshcd_queuecommand() */
- down_write(&hba->clk_scaling_lock);
- up_write(&hba->clk_scaling_lock);
+ ufshcd_down_write(hba);
+ ufshcd_up_write(hba);
cancel_work_sync(&hba->eeh_work);
}
@@ -6196,7 +6196,7 @@ static void ufshcd_err_handler(struct Scsi_Host *host)
* Hold the scaling lock just in case dev cmds
* are sent via bsg and/or sysfs.
*/
- down_write(&hba->clk_scaling_lock);
+ ufshcd_down_write(hba);
hba->force_pmc = true;
pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
if (pmc_err) {
@@ -6206,7 +6206,7 @@ static void ufshcd_err_handler(struct Scsi_Host *host)
}
hba->force_pmc = false;
ufshcd_print_pwr_info(hba);
- up_write(&hba->clk_scaling_lock);
+ ufshcd_up_write(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
}
@@ -6705,7 +6705,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
int tag;
u8 upiu_flags;
- down_read(&hba->clk_scaling_lock);
+ ufshcd_down_read(hba);
req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
if (IS_ERR(req)) {
@@ -6790,7 +6790,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
out:
blk_put_request(req);
out_unlock:
- up_read(&hba->clk_scaling_lock);
+ ufshcd_up_read(hba);
return err;
}
@@ -1418,4 +1418,34 @@ static inline int ufshcd_rpmb_rpm_put(struct ufs_hba *hba)
return pm_runtime_put(&hba->sdev_rpmb->sdev_gendev);
}
+static inline void ufshcd_down_read(struct ufs_hba *hba)
+{
+ down_read(&hba->clk_scaling_lock);
+}
+
+static inline void ufshcd_up_read(struct ufs_hba *hba)
+{
+ up_read(&hba->clk_scaling_lock);
+}
+
+static inline int ufshcd_down_read_trylock(struct ufs_hba *hba)
+{
+ return down_read_trylock(&hba->clk_scaling_lock);
+}
+
+static inline void ufshcd_down_write(struct ufs_hba *hba)
+{
+ down_write(&hba->clk_scaling_lock);
+}
+
+static inline void ufshcd_up_write(struct ufs_hba *hba)
+{
+ up_write(&hba->clk_scaling_lock);
+}
+
+static inline void ufshcd_downgrade_write(struct ufs_hba *hba)
+{
+ downgrade_write(&hba->clk_scaling_lock);
+}
+
#endif /* End of Header */
In preparation for making clk_scaling_lock a more general purpose sleeping lock for the host. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> --- drivers/scsi/ufs/ufshcd.c | 34 +++++++++++++++++----------------- drivers/scsi/ufs/ufshcd.h | 30 ++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 17 deletions(-)