diff mbox series

[RFC,1/2] remove field use_cqe in mmc_queue

Message ID 20210215003217.GA12240@lupo-laptop (mailing list archive)
State New, archived
Headers show
Series Support temporarily disable of the CMDQ mode | expand

Commit Message

Luca Porzio Feb. 15, 2021, 12:32 a.m. UTC
Remove usage of use_cqe parameter in mmc_queue
and use more appropriate mmc_host->cqe_enabled

Signed-off-by: Luca Porzio <lporzio@micron.com>
Signed-off-by: Zhan Liu <zliua@micron.com>
---
 drivers/mmc/core/block.c |  7 ++++---
 drivers/mmc/core/queue.c | 11 +++++------
 drivers/mmc/core/queue.h |  1 -
 3 files changed, 9 insertions(+), 10 deletions(-)

Comments

Ulf Hansson March 9, 2021, 9:01 a.m. UTC | #1
On Mon, 15 Feb 2021 at 01:33, Luca Porzio <porzio@gmail.com> wrote:
>
> Remove usage of use_cqe parameter in mmc_queue
> and use more appropriate mmc_host->cqe_enabled
>
> Signed-off-by: Luca Porzio <lporzio@micron.com>
> Signed-off-by: Zhan Liu <zliua@micron.com>

This looks like standalone and nice cleanup. So, applied for next (I
took the liberty of updating the commit message), thanks!

Kind regards
Uffe


> ---
>  drivers/mmc/core/block.c |  7 ++++---
>  drivers/mmc/core/queue.c | 11 +++++------
>  drivers/mmc/core/queue.h |  1 -
>  3 files changed, 9 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
> index b877f62df366..08b3c4c4b9f6 100644
> --- a/drivers/mmc/core/block.c
> +++ b/drivers/mmc/core/block.c
> @@ -1933,8 +1933,9 @@ static void mmc_blk_hsq_req_done(struct mmc_request *mrq)
>  void mmc_blk_mq_complete(struct request *req)
>  {
>         struct mmc_queue *mq = req->q->queuedata;
> +       struct mmc_host *host = mq->card->host;
>
> -       if (mq->use_cqe)
> +       if (host->cqe_enabled)
>                 mmc_blk_cqe_complete_rq(mq, req);
>         else if (likely(!blk_should_fake_timeout(req->q)))
>                 mmc_blk_mq_complete_rq(mq, req);
> @@ -2179,7 +2180,7 @@ static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq,
>
>  static int mmc_blk_wait_for_idle(struct mmc_queue *mq, struct mmc_host *host)
>  {
> -       if (mq->use_cqe)
> +       if (host->cqe_enabled)
>                 return host->cqe_ops->cqe_wait_for_idle(host);
>
>         return mmc_blk_rw_wait(mq, NULL);
> @@ -2228,7 +2229,7 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
>                         break;
>                 case REQ_OP_READ:
>                 case REQ_OP_WRITE:
> -                       if (mq->use_cqe)
> +                       if (host->cqe_enabled)
>                                 ret = mmc_blk_cqe_issue_rw_rq(mq, req);
>                         else
>                                 ret = mmc_blk_mq_issue_rw_rq(mq, req);
> diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
> index 27d2b8ed9484..d600e0a4a460 100644
> --- a/drivers/mmc/core/queue.c
> +++ b/drivers/mmc/core/queue.c
> @@ -60,7 +60,7 @@ enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
>  {
>         struct mmc_host *host = mq->card->host;
>
> -       if (mq->use_cqe && !host->hsq_enabled)
> +       if (host->cqe_enabled && !host->hsq_enabled)
>                 return mmc_cqe_issue_type(host, req);
>
>         if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
> @@ -127,7 +127,7 @@ static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
>         bool ignore_tout;
>
>         spin_lock_irqsave(&mq->lock, flags);
> -       ignore_tout = mq->recovery_needed || !mq->use_cqe || host->hsq_enabled;
> +       ignore_tout = mq->recovery_needed || !host->cqe_enabled || host->hsq_enabled;
>         spin_unlock_irqrestore(&mq->lock, flags);
>
>         return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
> @@ -144,7 +144,7 @@ static void mmc_mq_recovery_handler(struct work_struct *work)
>
>         mq->in_recovery = true;
>
> -       if (mq->use_cqe && !host->hsq_enabled)
> +       if (host->cqe_enabled && !host->hsq_enabled)
>                 mmc_blk_cqe_recovery(mq);
>         else
>                 mmc_blk_mq_recovery(mq);
> @@ -315,7 +315,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
>         if (get_card)
>                 mmc_get_card(card, &mq->ctx);
>
> -       if (mq->use_cqe) {
> +       if (host->cqe_enabled) {
>                 host->retune_now = host->need_retune && cqe_retune_ok &&
>                                    !host->hold_retune;
>         }
> @@ -430,7 +430,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
>         int ret;
>
>         mq->card = card;
> -       mq->use_cqe = host->cqe_enabled;
>
>         spin_lock_init(&mq->lock);
>
> @@ -440,7 +439,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
>          * The queue depth for CQE must match the hardware because the request
>          * tag is used to index the hardware queue.
>          */
> -       if (mq->use_cqe && !host->hsq_enabled)
> +       if (host->cqe_enabled && !host->hsq_enabled)
>                 mq->tag_set.queue_depth =
>                         min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
>         else
> diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
> index 57c59b6cb1b9..3319d8ab57d0 100644
> --- a/drivers/mmc/core/queue.h
> +++ b/drivers/mmc/core/queue.h
> @@ -82,7 +82,6 @@ struct mmc_queue {
>         unsigned int            cqe_busy;
>  #define MMC_CQE_DCMD_BUSY      BIT(0)
>         bool                    busy;
> -       bool                    use_cqe;
>         bool                    recovery_needed;
>         bool                    in_recovery;
>         bool                    rw_wait;
> --
> 2.17.1
>
diff mbox series

Patch

diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index b877f62df366..08b3c4c4b9f6 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1933,8 +1933,9 @@  static void mmc_blk_hsq_req_done(struct mmc_request *mrq)
 void mmc_blk_mq_complete(struct request *req)
 {
 	struct mmc_queue *mq = req->q->queuedata;
+	struct mmc_host *host = mq->card->host;
 
-	if (mq->use_cqe)
+	if (host->cqe_enabled)
 		mmc_blk_cqe_complete_rq(mq, req);
 	else if (likely(!blk_should_fake_timeout(req->q)))
 		mmc_blk_mq_complete_rq(mq, req);
@@ -2179,7 +2180,7 @@  static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq,
 
 static int mmc_blk_wait_for_idle(struct mmc_queue *mq, struct mmc_host *host)
 {
-	if (mq->use_cqe)
+	if (host->cqe_enabled)
 		return host->cqe_ops->cqe_wait_for_idle(host);
 
 	return mmc_blk_rw_wait(mq, NULL);
@@ -2228,7 +2229,7 @@  enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
 			break;
 		case REQ_OP_READ:
 		case REQ_OP_WRITE:
-			if (mq->use_cqe)
+			if (host->cqe_enabled)
 				ret = mmc_blk_cqe_issue_rw_rq(mq, req);
 			else
 				ret = mmc_blk_mq_issue_rw_rq(mq, req);
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 27d2b8ed9484..d600e0a4a460 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -60,7 +60,7 @@  enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
 {
 	struct mmc_host *host = mq->card->host;
 
-	if (mq->use_cqe && !host->hsq_enabled)
+	if (host->cqe_enabled && !host->hsq_enabled)
 		return mmc_cqe_issue_type(host, req);
 
 	if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
@@ -127,7 +127,7 @@  static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
 	bool ignore_tout;
 
 	spin_lock_irqsave(&mq->lock, flags);
-	ignore_tout = mq->recovery_needed || !mq->use_cqe || host->hsq_enabled;
+	ignore_tout = mq->recovery_needed || !host->cqe_enabled || host->hsq_enabled;
 	spin_unlock_irqrestore(&mq->lock, flags);
 
 	return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
@@ -144,7 +144,7 @@  static void mmc_mq_recovery_handler(struct work_struct *work)
 
 	mq->in_recovery = true;
 
-	if (mq->use_cqe && !host->hsq_enabled)
+	if (host->cqe_enabled && !host->hsq_enabled)
 		mmc_blk_cqe_recovery(mq);
 	else
 		mmc_blk_mq_recovery(mq);
@@ -315,7 +315,7 @@  static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
 	if (get_card)
 		mmc_get_card(card, &mq->ctx);
 
-	if (mq->use_cqe) {
+	if (host->cqe_enabled) {
 		host->retune_now = host->need_retune && cqe_retune_ok &&
 				   !host->hold_retune;
 	}
@@ -430,7 +430,6 @@  int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
 	int ret;
 
 	mq->card = card;
-	mq->use_cqe = host->cqe_enabled;
 	
 	spin_lock_init(&mq->lock);
 
@@ -440,7 +439,7 @@  int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
 	 * The queue depth for CQE must match the hardware because the request
 	 * tag is used to index the hardware queue.
 	 */
-	if (mq->use_cqe && !host->hsq_enabled)
+	if (host->cqe_enabled && !host->hsq_enabled)
 		mq->tag_set.queue_depth =
 			min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
 	else
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index 57c59b6cb1b9..3319d8ab57d0 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -82,7 +82,6 @@  struct mmc_queue {
 	unsigned int		cqe_busy;
 #define MMC_CQE_DCMD_BUSY	BIT(0)
 	bool			busy;
-	bool			use_cqe;
 	bool			recovery_needed;
 	bool			in_recovery;
 	bool			rw_wait;