@@ -278,17 +278,22 @@ static int ufshcd_mcq_get_tag(struct ufs_hba *hba, struct cq_entry *cqe)
return div_u64(addr, ufshcd_get_ucd_size(hba));
}
-static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
- struct ufs_hw_queue *hwq)
+/* Returns true if and only if @compl_cmd has been completed. */
+static bool ufshcd_mcq_process_cqe(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq,
+ struct scsi_cmnd *compl_cmd)
{
struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq);
- int tag = ufshcd_mcq_get_tag(hba, cqe);
if (cqe->command_desc_base_addr) {
- ufshcd_compl_one_cqe(hba, tag, cqe);
- /* After processed the cqe, mark it empty (invalid) entry */
+ const int tag = ufshcd_mcq_get_tag(hba, cqe);
+
+ /* Mark the CQE as invalid. */
cqe->command_desc_base_addr = 0;
+
+ return ufshcd_compl_one_cqe(hba, tag, cqe, compl_cmd);
}
+ return false;
}
void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba,
@@ -299,7 +304,7 @@ void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba,
spin_lock_irqsave(&hwq->cq_lock, flags);
while (entries > 0) {
- ufshcd_mcq_process_cqe(hba, hwq);
+ ufshcd_mcq_process_cqe(hba, hwq, NULL);
ufshcd_mcq_inc_cq_head_slot(hwq);
entries--;
}
@@ -309,8 +314,10 @@ void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba,
spin_unlock_irqrestore(&hwq->cq_lock, flags);
}
+/* Clears *@compl_cmd if and only if *@compl_cmd has been completed. */
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
- struct ufs_hw_queue *hwq)
+ struct ufs_hw_queue *hwq,
+ struct scsi_cmnd **compl_cmd)
{
unsigned long completed_reqs = 0;
unsigned long flags;
@@ -318,7 +325,9 @@ unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
spin_lock_irqsave(&hwq->cq_lock, flags);
ufshcd_mcq_update_cq_tail_slot(hwq);
while (!ufshcd_mcq_is_cq_empty(hwq)) {
- ufshcd_mcq_process_cqe(hba, hwq);
+ if (ufshcd_mcq_process_cqe(hba, hwq,
+ compl_cmd ? *compl_cmd : NULL))
+ *compl_cmd = NULL;
ufshcd_mcq_inc_cq_head_slot(hwq);
completed_reqs++;
}
@@ -61,8 +61,8 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, u8 index, bool *flag_res);
void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
-void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
- struct cq_entry *cqe);
+bool ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
+ struct cq_entry *cqe, struct scsi_cmnd *compl_cmd);
int ufshcd_mcq_init(struct ufs_hba *hba);
int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba);
int ufshcd_mcq_memory_alloc(struct ufs_hba *hba);
@@ -5481,9 +5481,12 @@ void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
* @hba: per adapter instance
* @task_tag: the task tag of the request to be completed
* @cqe: pointer to the completion queue entry
+ * @compl_cmd: if not NULL, check whether this command has been completed
+ *
+ * Returns: true if and only if @compl_cmd has been completed.
*/
-void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
- struct cq_entry *cqe)
+bool ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
+ struct cq_entry *cqe, struct scsi_cmnd *compl_cmd)
{
struct ufshcd_lrb *lrbp;
struct scsi_cmnd *cmd;
@@ -5500,6 +5503,7 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
ufshcd_release_scsi_cmd(hba, lrbp);
/* Do not touch lrbp after scsi done */
scsi_done(cmd);
+ return cmd == compl_cmd;
} else if (hba->dev_cmd.complete) {
if (cqe) {
ocs = le32_to_cpu(cqe->status) & MASK_OCS;
@@ -5507,20 +5511,26 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
}
complete(hba->dev_cmd.complete);
}
+ return false;
}
/**
* __ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
* @completed_reqs: bitmask that indicates which requests to complete
+ * @compl_cmd: if not NULL, check whether *@compl_cmd has been completed.
+ * Clear *@compl_cmd if it has been completed.
*/
static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
- unsigned long completed_reqs)
+ unsigned long completed_reqs,
+ struct scsi_cmnd **compl_cmd)
{
int tag;
for_each_set_bit(tag, &completed_reqs, hba->nutrs)
- ufshcd_compl_one_cqe(hba, tag, NULL);
+ if (ufshcd_compl_one_cqe(hba, tag, NULL,
+ compl_cmd ? *compl_cmd : NULL))
+ *compl_cmd = NULL;
}
/* Any value that is not an existing queue number is fine for this constant. */
@@ -5547,7 +5557,8 @@ static void ufshcd_clear_polled(struct ufs_hba *hba,
* Return: > 0 if one or more commands have been completed or 0 if no
* requests have been completed.
*/
-static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
+static int __ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num,
+ struct scsi_cmnd **compl_cmd)
{
struct ufs_hba *hba = shost_priv(shost);
unsigned long completed_reqs, flags;
@@ -5558,7 +5569,7 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
WARN_ON_ONCE(queue_num == UFSHCD_POLL_FROM_INTERRUPT_CONTEXT);
hwq = &hba->uhq[queue_num];
- return ufshcd_mcq_poll_cqe_lock(hba, hwq);
+ return ufshcd_mcq_poll_cqe_lock(hba, hwq, compl_cmd);
}
spin_lock_irqsave(&hba->outstanding_lock, flags);
@@ -5575,11 +5586,16 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
if (completed_reqs)
- __ufshcd_transfer_req_compl(hba, completed_reqs);
+ __ufshcd_transfer_req_compl(hba, completed_reqs, compl_cmd);
return completed_reqs != 0;
}
+static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
+{
+ return __ufshcd_poll(shost, queue_num, NULL);
+}
+
/**
* ufshcd_mcq_compl_pending_transfer - MCQ mode function. It is
* invoked from the error handler context or ufshcd_host_reset_and_restore()
@@ -5623,7 +5639,7 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
}
spin_unlock_irqrestore(&hwq->cq_lock, flags);
} else {
- ufshcd_mcq_poll_cqe_lock(hba, hwq);
+ ufshcd_mcq_poll_cqe_lock(hba, hwq, NULL);
}
}
}
@@ -6898,7 +6914,7 @@ static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
ufshcd_mcq_write_cqis(hba, events, i);
if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
- ufshcd_mcq_poll_cqe_lock(hba, hwq);
+ ufshcd_mcq_poll_cqe_lock(hba, hwq, NULL);
}
return IRQ_HANDLED;
@@ -7391,7 +7407,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
lrbp->lun == lun) {
ufshcd_clear_cmd(hba, pos);
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
- ufshcd_mcq_poll_cqe_lock(hba, hwq);
+ ufshcd_mcq_poll_cqe_lock(hba, hwq, NULL);
}
}
err = 0;
@@ -7419,7 +7435,8 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
__func__, pos);
}
}
- __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared_mask);
+ __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared_mask,
+ NULL);
out:
hba->req_abort_count = 0;
@@ -7596,7 +7613,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
dev_err(hba->dev,
"%s: cmd was completed, but without a notifying intr, tag = %d",
__func__, tag);
- __ufshcd_transfer_req_compl(hba, 1UL << tag);
+ __ufshcd_transfer_req_compl(hba, 1UL << tag, NULL);
goto release;
}
@@ -1715,7 +1715,7 @@ static irqreturn_t ufs_mtk_mcq_intr(int irq, void *__intr_info)
ufshcd_mcq_write_cqis(hba, events, qid);
if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
- ufshcd_mcq_poll_cqe_lock(hba, hwq);
+ ufshcd_mcq_poll_cqe_lock(hba, hwq, NULL);
return IRQ_HANDLED;
}
@@ -1709,7 +1709,7 @@ static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data)
struct ufs_hw_queue *hwq = &hba->uhq[id];
ufshcd_mcq_write_cqis(hba, 0x1, id);
- ufshcd_mcq_poll_cqe_lock(hba, hwq);
+ ufshcd_mcq_poll_cqe_lock(hba, hwq, NULL);
return IRQ_HANDLED;
}
@@ -1266,7 +1266,8 @@ void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds);
u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
- struct ufs_hw_queue *hwq);
+ struct ufs_hw_queue *hwq,
+ struct scsi_cmnd **compl_cmd);
void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba);
void ufshcd_mcq_enable_esi(struct ufs_hba *hba);
void ufshcd_mcq_enable(struct ufs_hba *hba);
Prepare for introducing a new __ufshcd_poll() caller that will need to know whether or not a specific command has been completed. Signed-off-by: Bart Van Assche <bvanassche@acm.org> --- drivers/ufs/core/ufs-mcq.c | 25 +++++++++++++------- drivers/ufs/core/ufshcd-priv.h | 4 ++-- drivers/ufs/core/ufshcd.c | 41 +++++++++++++++++++++++---------- drivers/ufs/host/ufs-mediatek.c | 2 +- drivers/ufs/host/ufs-qcom.c | 2 +- include/ufs/ufshcd.h | 3 ++- 6 files changed, 52 insertions(+), 25 deletions(-)