Message ID | 1500630584-22852-9-git-send-email-adrian.hunter@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On 21 July 2017 at 11:49, Adrian Hunter <adrian.hunter@intel.com> wrote: > Enhance mmc_blk_data_prep() to support CQE requests. Please try to be a bit more descriptive. > > Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> > --- > drivers/mmc/core/block.c | 45 ++++++++++++++++++++++++++++++++++----------- > 1 file changed, 34 insertions(+), 11 deletions(-) > > diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c > index 40f0d596ed54..915290c74363 100644 > --- a/drivers/mmc/core/block.c > +++ b/drivers/mmc/core/block.c > @@ -36,6 +36,7 @@ > #include <linux/compat.h> > #include <linux/pm_runtime.h> > #include <linux/idr.h> > +#include <linux/ioprio.h> > > #include <linux/mmc/ioctl.h> > #include <linux/mmc/card.h> > @@ -1485,25 +1486,27 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card, > } > > static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, > - int disable_multi, bool *do_rel_wr, > - bool *do_data_tag) > + int disable_multi, bool *do_rel_wr_p, > + bool *do_data_tag_p) > { > struct mmc_blk_data *md = mq->blkdata; > struct mmc_card *card = md->queue.card; > struct mmc_blk_request *brq = &mqrq->brq; > struct request *req = mmc_queue_req_to_req(mqrq); > + bool do_rel_wr, do_data_tag; > > /* > * Reliable writes are used to implement Forced Unit Access and > * are supported only on MMCs. > */ > - *do_rel_wr = (req->cmd_flags & REQ_FUA) && > - rq_data_dir(req) == WRITE && > - (md->flags & MMC_BLK_REL_WR); > + do_rel_wr = (req->cmd_flags & REQ_FUA) && > + rq_data_dir(req) == WRITE && > + (md->flags & MMC_BLK_REL_WR); > > memset(brq, 0, sizeof(struct mmc_blk_request)); > > brq->mrq.data = &brq->data; > + brq->mrq.tag = req->tag; > > brq->stop.opcode = MMC_STOP_TRANSMISSION; > brq->stop.arg = 0; > @@ -1518,6 +1521,15 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, > > brq->data.blksz = 512; > brq->data.blocks = blk_rq_sectors(req); > + brq->data.blk_addr = blk_rq_pos(req); > + > + /* > + * The command queue supports 2 priorities: "high" (1) and "simple" (0). > + * The eMMC will give "high" priority tasks priority over "simple" > + * priority tasks. Here we give priority to IOPRIO_CLASS_RT. > + */ > + if (IOPRIO_PRIO_CLASS(req_get_ioprio(req)) == IOPRIO_CLASS_RT) > + brq->data.flags |= MMC_DATA_PRIO; As I said earlier. I suggest you move the things related to priority last in the series. Then we can discuss them separately, not blocking anything else. > > /* > * The block layer doesn't support all sector count > @@ -1547,18 +1559,23 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, > brq->data.blocks); > } > > - if (*do_rel_wr) > + if (do_rel_wr) { > mmc_apply_rel_rw(brq, card, req); > + brq->data.flags |= MMC_DATA_REL_WR; > + } > > /* > * Data tag is used only during writing meta data to speed > * up write and any subsequent read of this meta data > */ > - *do_data_tag = card->ext_csd.data_tag_unit_size && > - (req->cmd_flags & REQ_META) && > - (rq_data_dir(req) == WRITE) && > - ((brq->data.blocks * brq->data.blksz) >= > - card->ext_csd.data_tag_unit_size); > + do_data_tag = card->ext_csd.data_tag_unit_size && > + (req->cmd_flags & REQ_META) && > + (rq_data_dir(req) == WRITE) && > + ((brq->data.blocks * brq->data.blksz) >= > + card->ext_csd.data_tag_unit_size); > + > + if (do_data_tag) > + brq->data.flags |= MMC_DATA_DAT_TAG; > > mmc_set_data_timeout(&brq->data, card); > > @@ -1587,6 +1604,12 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, > mqrq->areq.mrq = &brq->mrq; > > mmc_queue_bounce_pre(mqrq); > + > + if (do_rel_wr_p) I guess you don't need to make this conditional, let's just do the below assignment. > + *do_rel_wr_p = do_rel_wr; > + > + if (do_data_tag_p) Ditto. > + *do_data_tag_p = do_data_tag; > } > > static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, > -- > 1.9.1 > I like the clarification this change introduces, as to assign the two out parameters the values of the two local variables. However, could you perhaps make that change being a separate patch, which should come prior $subject patch in the series. That would make it more evident on what changes are really for CQE. Kind regards Uffe -- To unsubscribe from this list: send the line "unsubscribe linux-mmc" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 40f0d596ed54..915290c74363 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -36,6 +36,7 @@ #include <linux/compat.h> #include <linux/pm_runtime.h> #include <linux/idr.h> +#include <linux/ioprio.h> #include <linux/mmc/ioctl.h> #include <linux/mmc/card.h> @@ -1485,25 +1486,27 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card, } static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, - int disable_multi, bool *do_rel_wr, - bool *do_data_tag) + int disable_multi, bool *do_rel_wr_p, + bool *do_data_tag_p) { struct mmc_blk_data *md = mq->blkdata; struct mmc_card *card = md->queue.card; struct mmc_blk_request *brq = &mqrq->brq; struct request *req = mmc_queue_req_to_req(mqrq); + bool do_rel_wr, do_data_tag; /* * Reliable writes are used to implement Forced Unit Access and * are supported only on MMCs. */ - *do_rel_wr = (req->cmd_flags & REQ_FUA) && - rq_data_dir(req) == WRITE && - (md->flags & MMC_BLK_REL_WR); + do_rel_wr = (req->cmd_flags & REQ_FUA) && + rq_data_dir(req) == WRITE && + (md->flags & MMC_BLK_REL_WR); memset(brq, 0, sizeof(struct mmc_blk_request)); brq->mrq.data = &brq->data; + brq->mrq.tag = req->tag; brq->stop.opcode = MMC_STOP_TRANSMISSION; brq->stop.arg = 0; @@ -1518,6 +1521,15 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, brq->data.blksz = 512; brq->data.blocks = blk_rq_sectors(req); + brq->data.blk_addr = blk_rq_pos(req); + + /* + * The command queue supports 2 priorities: "high" (1) and "simple" (0). + * The eMMC will give "high" priority tasks priority over "simple" + * priority tasks. Here we give priority to IOPRIO_CLASS_RT. + */ + if (IOPRIO_PRIO_CLASS(req_get_ioprio(req)) == IOPRIO_CLASS_RT) + brq->data.flags |= MMC_DATA_PRIO; /* * The block layer doesn't support all sector count @@ -1547,18 +1559,23 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, brq->data.blocks); } - if (*do_rel_wr) + if (do_rel_wr) { mmc_apply_rel_rw(brq, card, req); + brq->data.flags |= MMC_DATA_REL_WR; + } /* * Data tag is used only during writing meta data to speed * up write and any subsequent read of this meta data */ - *do_data_tag = card->ext_csd.data_tag_unit_size && - (req->cmd_flags & REQ_META) && - (rq_data_dir(req) == WRITE) && - ((brq->data.blocks * brq->data.blksz) >= - card->ext_csd.data_tag_unit_size); + do_data_tag = card->ext_csd.data_tag_unit_size && + (req->cmd_flags & REQ_META) && + (rq_data_dir(req) == WRITE) && + ((brq->data.blocks * brq->data.blksz) >= + card->ext_csd.data_tag_unit_size); + + if (do_data_tag) + brq->data.flags |= MMC_DATA_DAT_TAG; mmc_set_data_timeout(&brq->data, card); @@ -1587,6 +1604,12 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, mqrq->areq.mrq = &brq->mrq; mmc_queue_bounce_pre(mqrq); + + if (do_rel_wr_p) + *do_rel_wr_p = do_rel_wr; + + if (do_data_tag_p) + *do_data_tag_p = do_data_tag; } static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
Enhance mmc_blk_data_prep() to support CQE requests. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> --- drivers/mmc/core/block.c | 45 ++++++++++++++++++++++++++++++++++----------- 1 file changed, 34 insertions(+), 11 deletions(-)