@@ -340,9 +340,9 @@ static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
struct mmc_blk_data *md = mq->data;
/*
- No-op, only service this because we need REQ_FUA
- for reliable writes.
- */
+ * No-op, only service this because we need REQ_FUA for reliable
+ * writes.
+ */
spin_lock_irq(&md->lock);
__blk_end_request_all(req, 0);
spin_unlock_irq(&md->lock);
@@ -364,16 +364,14 @@ static inline int mmc_apply_rel_rw(struct mmc_blk_request *brq,
int err;
struct mmc_command set_count;
- if (!(card->ext_csd.rel_param &
- EXT_CSD_WR_REL_PARAM_EN)) {
-
+ if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
/* Legacy mode imposes restrictions on transfers. */
if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
brq->data.blocks = 1;
if (brq->data.blocks > card->ext_csd.rel_sectors)
brq->data.blocks = card->ext_csd.rel_sectors;
- else if (brq->data.blocks != card->ext_csd.rel_sectors)
+ else if (brq->data.blocks < card->ext_csd.rel_sectors)
brq->data.blocks = 1;
}
@@ -396,8 +394,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
int ret = 1, disable_multi = 0;
/*
- Reliable writes are used to implement Forced Unit Access and
- REQ_META accesses, and it's supported only on MMCs.
+ * Reliable writes are used to implement Forced Unit Access and
+ * REQ_META accesses, and are supported only on MMCs.
*/
bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
(req->cmd_flags & REQ_META)) &&
@@ -464,10 +462,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
brq.data.flags |= MMC_DATA_WRITE;
}
- if (do_rel_wr) {
- if (mmc_apply_rel_rw(&brq, card, req))
- goto cmd_err;
- }
+ if (do_rel_wr && mmc_apply_rel_rw(&brq, card, req))
+ goto cmd_err;
mmc_set_data_timeout(&brq.data, card);